wireless-drivers-next patches for 4.14

The first wireless-drivers-next pull request for 4.14. I'm submitting
 this unusally late in the cycle as my vacation postponed this. But
 even if this is late there's not still that much new features, mostly
 cleanup or fixes.
 
 Major changes:
 
 ath10k
 
 * preparation for wcn3990 support
 
 iwlwifi
 
 * Reorganization of the code into separate directories continues
 
 qtnfmac
 
 * regulatory support updates
 
 * add get_channel, dump_survey and channel_switch cfg80211 handlers
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJZiHuQAAoJEG4XJFUm622bVSEIAKdausycC6OOZjwTGWnFyxE/
 58n79VTrTbXVLwJ7lSBCGYCTujc7amPxAVlDOLYd+9TKm0fO7gap50Gdl35HO5sp
 9v/augHQSouz52q2vgsTi0JbXsqhJQZ4Ie4P0fo8OyqJMYAvFga2FhFBpJseMYd9
 NX88SMoxAGgDkTC0JfzzLnA/jZ0W6ULai6zmRE1s6lUIynP2kzHgpfbMH3+KEkod
 SUW+yX91MdOkkyFGXyY11uuBqanUpEVSAQXW6J76vw3qS88qIqaL3iIeJ6C4Vozq
 fKNkHN4iZOd9FlKY1IFi4vS0+7hWiq6DQ3c+ngtU6cuq1XdBa6PuanC3I2e0B8E=
 =PKUU
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2017-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.14

The first wireless-drivers-next pull request for 4.14. I'm submitting
this unusally late in the cycle as my vacation postponed this. But
even if this is late there's not still that much new features, mostly
cleanup or fixes.

Major changes:

ath10k

* preparation for wcn3990 support

iwlwifi

* Reorganization of the code into separate directories continues

qtnfmac

* regulatory support updates

* add get_channel, dump_survey and channel_switch cfg80211 handlers
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-08-07 11:34:41 -07:00
Родитель 2a32ca138e 9d54619870
Коммит 46d4b68f89
162 изменённых файлов: 9068 добавлений и 6302 удалений

Просмотреть файл

@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
case BCMA_CHIP_ID_BCM53573:
case BCMA_CHIP_ID_BCM47094:
chip->ngpio = 32;
break;

Просмотреть файл

@ -787,8 +787,9 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ar_pci->mem = ar_ahb->mem;
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops;
ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops;
ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ar->ce_priv = &ar_pci->ce;
ret = ath10k_pci_setup_resource(ar);
if (ret) {

Просмотреть файл

@ -16,7 +16,6 @@
*/
#include "hif.h"
#include "pci.h"
#include "ce.h"
#include "debug.h"
@ -33,7 +32,7 @@
* Each ring consists of a number of descriptors which specify
* an address, length, and meta-data.
*
* Typically, one side of the PCIe interconnect (Host or Target)
* Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
* controls one ring and the other side controls the other ring.
* The source side chooses when to initiate a transfer and it
* chooses what to send (buffer address, length). The destination
@ -73,57 +72,71 @@ ath10k_get_ring_byte(unsigned int offset,
return ((offset & addr_map->mask) >> (addr_map->lsb));
}
static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
return ce->bus_ops->read32(ar, offset);
}
static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
ce->bus_ops->write32(ar, offset, value);
}
static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dst_wr_index_addr, n);
ath10k_ce_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dst_wr_index_addr, n);
}
static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dst_wr_index_addr);
return ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dst_wr_index_addr);
}
static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_wr_index_addr, n);
ath10k_ce_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_wr_index_addr, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_wr_index_addr);
return ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_wr_index_addr);
}
static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->current_srri_addr);
return ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->current_srri_addr);
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int addr)
{
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_base_addr, addr);
ath10k_ce_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_base_addr, addr);
}
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_size_addr, n);
ath10k_ce_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->sr_size_addr, n);
}
static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
@ -131,12 +144,13 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_pci_read32(ar,
ce_ctrl_addr + ctrl_regs->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->dmax));
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->dmax));
}
static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
@ -144,11 +158,13 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->src_ring));
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->src_ring));
}
static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
@ -156,34 +172,36 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
(ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
}
static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
return ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->current_drri_addr);
return ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->current_drri_addr);
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
u32 addr)
{
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dr_base_addr, addr);
ath10k_ce_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dr_base_addr, addr);
}
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
ath10k_pci_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dr_size_addr, n);
ath10k_ce_write32(ar, ce_ctrl_addr +
ar->hw_ce_regs->dr_size_addr, n);
}
static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
@ -191,11 +209,11 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
(addr & ~(srcr_wm->wm_high->mask)) |
(ath10k_set_ring_byte(n, srcr_wm->wm_high)));
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
(addr & ~(srcr_wm->wm_high->mask)) |
(ath10k_set_ring_byte(n, srcr_wm->wm_high)));
}
static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
@ -203,11 +221,11 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
(addr & ~(srcr_wm->wm_low->mask)) |
(ath10k_set_ring_byte(n, srcr_wm->wm_low)));
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
(addr & ~(srcr_wm->wm_low->mask)) |
(ath10k_set_ring_byte(n, srcr_wm->wm_low)));
}
static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
@ -215,11 +233,11 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
(addr & ~(dstr_wm->wm_high->mask)) |
(ath10k_set_ring_byte(n, dstr_wm->wm_high)));
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
(addr & ~(dstr_wm->wm_high->mask)) |
(ath10k_set_ring_byte(n, dstr_wm->wm_high)));
}
static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
@ -227,66 +245,73 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
(addr & ~(dstr_wm->wm_low->mask)) |
(ath10k_set_ring_byte(n, dstr_wm->wm_low)));
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
(addr & ~(dstr_wm->wm_low->mask)) |
(ath10k_set_ring_byte(n, dstr_wm->wm_low)));
}
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr | host_ie->copy_complete->mask);
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr | host_ie->copy_complete->mask);
}
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr & ~(host_ie->copy_complete->mask));
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr & ~(host_ie->copy_complete->mask));
}
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr & ~(wm_regs->wm_mask));
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
host_ie_addr & ~(wm_regs->wm_mask));
}
static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->misc_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
misc_ie_addr | misc_regs->err_mask);
u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->misc_ie_addr);
ath10k_ce_write32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
misc_ie_addr | misc_regs->err_mask);
}
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->misc_ie_addr);
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
misc_ie_addr & ~(misc_regs->err_mask));
u32 misc_ie_addr = ath10k_ce_read32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
ath10k_ce_write32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
misc_ie_addr & ~(misc_regs->err_mask));
}
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
@ -295,7 +320,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
{
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
ath10k_pci_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
/*
@ -362,11 +387,11 @@ exit:
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_ring *src_ring = pipe->src_ring;
u32 ctrl_addr = pipe->ctrl_addr;
lockdep_assert_held(&ar_pci->ce_lock);
lockdep_assert_held(&ce->ce_lock);
/*
* This function must be called only if there is an incomplete
@ -394,13 +419,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
buffer, nbytes, transfer_id, flags);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -408,14 +433,14 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int delta;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
pipe->src_ring->write_index,
pipe->src_ring->sw_index - 1);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return delta;
}
@ -423,13 +448,13 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
unsigned int sw_index = dest_ring->sw_index;
lockdep_assert_held(&ar_pci->ce_lock);
lockdep_assert_held(&ce->ce_lock);
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
@ -437,7 +462,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
@ -446,7 +471,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
u32 ctrl_addr = pipe->ctrl_addr;
lockdep_assert_held(&ar_pci->ce_lock);
lockdep_assert_held(&ce->ce_lock);
if ((pipe->id != 5) &&
CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
@ -486,12 +511,12 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -554,14 +579,14 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
nbytesp);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -576,7 +601,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
unsigned int write_index;
int ret;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ath10k_ce *ce;
dest_ring = ce_state->dest_ring;
@ -584,9 +609,9 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
return -EIO;
ar = ce_state->ar;
ar_pci = ath10k_pci_priv(ar);
ce = ath10k_ce_priv(ar);
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
nentries_mask = dest_ring->nentries_mask;
sw_index = dest_ring->sw_index;
@ -614,7 +639,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
ret = -EIO;
}
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -686,7 +711,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int write_index;
int ret;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ath10k_ce *ce;
src_ring = ce_state->src_ring;
@ -694,9 +719,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
return -EIO;
ar = ce_state->ar;
ar_pci = ath10k_pci_priv(ar);
ce = ath10k_ce_priv(ar);
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
nentries_mask = src_ring->nentries_mask;
sw_index = src_ring->sw_index;
@ -727,7 +752,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
ret = -EIO;
}
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -736,13 +761,13 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ret = ath10k_ce_completed_send_next_nolock(ce_state,
per_transfer_contextp);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -755,17 +780,18 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
*/
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->cc_mask);
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
wm_regs->cc_mask);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
@ -773,7 +799,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
@ -781,7 +807,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
*/
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
}
/*
@ -795,7 +821,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
int ce_id;
u32 intr_summary;
intr_summary = CE_INTERRUPT_SUMMARY(ar);
intr_summary = ath10k_ce_interrupt_summary(ar);
for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
if (intr_summary & (1 << ce_id))
@ -847,22 +873,25 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ce_id;
struct ath10k_ce_pipe *ce_state;
/* Skip the last copy engine, CE7 the diagnostic window, as that
* uses polling and isn't initialized for interrupts.
*/
for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
ce_state = &ce->ce_states[ce_id];
ath10k_ce_per_engine_handler_adjust(ce_state);
}
}
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
@ -898,8 +927,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
@ -1081,8 +1110,8 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
int ret;
/*
@ -1138,8 +1167,8 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
if (ce_state->src_ring) {
dma_free_coherent(ar->dev,
@ -1168,38 +1197,38 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_crash_data ce;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_crash_data ce_data;
u32 addr, id;
lockdep_assert_held(&ar->data_lock);
ath10k_err(ar, "Copy Engine register dump:\n");
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
for (id = 0; id < CE_COUNT; id++) {
addr = ath10k_ce_base_address(ar, id);
ce.base_addr = cpu_to_le32(addr);
ce_data.base_addr = cpu_to_le32(addr);
ce.src_wr_idx =
ce_data.src_wr_idx =
cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
ce.src_r_idx =
ce_data.src_r_idx =
cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
ce.dst_wr_idx =
ce_data.dst_wr_idx =
cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
ce.dst_r_idx =
ce_data.dst_r_idx =
cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
if (crash_data)
crash_data->ce_crash_data[id] = ce;
crash_data->ce_crash_data[id] = ce_data;
ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
le32_to_cpu(ce.base_addr),
le32_to_cpu(ce.src_wr_idx),
le32_to_cpu(ce.src_r_idx),
le32_to_cpu(ce.dst_wr_idx),
le32_to_cpu(ce.dst_r_idx));
le32_to_cpu(ce_data.base_addr),
le32_to_cpu(ce_data.src_wr_idx),
le32_to_cpu(ce_data.src_r_idx),
le32_to_cpu(ce_data.dst_wr_idx),
le32_to_cpu(ce_data.dst_r_idx));
}
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
}

Просмотреть файл

@ -122,6 +122,24 @@ struct ath10k_ce_pipe {
/* Copy Engine settable attributes */
struct ce_attr;
struct ath10k_bus_ops {
u32 (*read32)(struct ath10k *ar, u32 offset);
void (*write32)(struct ath10k *ar, u32 offset, u32 value);
int (*get_num_banks)(struct ath10k *ar);
};
static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar)
{
return (struct ath10k_ce *)ar->ce_priv;
}
struct ath10k_ce {
/* protects CE info */
spinlock_t ce_lock;
const struct ath10k_bus_ops *bus_ops;
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
/*==================Send====================*/
/* ath10k_ce_send flags */
@ -291,9 +309,13 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define CE_INTERRUPT_SUMMARY(ar) \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
}
#endif /* _CE_H_ */

Просмотреть файл

@ -2516,6 +2516,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca4019_values;
break;
case ATH10K_HW_WCN3990:
ar->regs = &wcn3990_regs;
ar->hw_ce_regs = &wcn3990_ce_regs;
ar->hw_values = &wcn3990_values;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);

Просмотреть файл

@ -993,6 +993,8 @@ struct ath10k {
u32 reg_ack_cts_timeout_orig;
} fw_coverage;
void *ce_priv;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};

Просмотреть файл

@ -237,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
{
struct ath10k *ar = file->private_data;
char *buf;
size_t len = 0, buf_len = 4096;
size_t len = 0, buf_len = 8192;
const char *name;
ssize_t ret_cnt;
bool enabled;

Просмотреть файл

@ -192,6 +192,156 @@ const struct ath10k_hw_values qca4019_values = {
.ce_desc_meta_data_lsb = 4,
};
const struct ath10k_hw_regs wcn3990_regs = {
.rtc_soc_base_address = 0x00000000,
.rtc_wmac_base_address = 0x00000000,
.soc_core_base_address = 0x00000000,
.ce_wrapper_base_address = 0x0024C000,
.ce0_base_address = 0x00240000,
.ce1_base_address = 0x00241000,
.ce2_base_address = 0x00242000,
.ce3_base_address = 0x00243000,
.ce4_base_address = 0x00244000,
.ce5_base_address = 0x00245000,
.ce6_base_address = 0x00246000,
.ce7_base_address = 0x00247000,
.ce8_base_address = 0x00248000,
.ce9_base_address = 0x00249000,
.ce10_base_address = 0x0024A000,
.ce11_base_address = 0x0024B000,
.soc_chip_id_address = 0x000000f0,
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00000100,
.ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
.ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
.pcie_intr_fw_mask = 0x00100000,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(17, 17),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
.msb = 0x00000012,
.lsb = 0x00000012,
.mask = GENMASK(18, 18),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
.addr = 0x00000018,
.src_ring = &wcn3990_src_ring,
.dst_ring = &wcn3990_dst_ring,
.dmax = &wcn3990_dmax,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
.mask = GENMASK(0, 0),
};
static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
.copy_complete = &wcn3990_host_ie_cc,
};
static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
.srcr_hmask = 0x00000002,
.cc_mask = 0x00000001,
.wm_mask = 0x0000001E,
.addr = 0x00000030,
};
static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.axi_err = 0x00000100,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
.dstr_mlen_vio = 0x00000080,
.dstr_overflow = 0x00000040,
.srcr_overflow = 0x00000020,
.err_mask = 0x000003E0,
.addr = 0x00000038,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
.msb = 0x00000000,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
.wm_low = &wcn3990_src_wm_low,
.wm_high = &wcn3990_src_wm_high,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
.wm_low = &wcn3990_dst_wm_low,
.wm_high = &wcn3990_dst_wm_high,
};
struct ath10k_hw_ce_regs wcn3990_ce_regs = {
.sr_base_addr = 0x00000000,
.sr_size_addr = 0x00000008,
.dr_base_addr = 0x0000000c,
.dr_size_addr = 0x00000014,
.misc_ie_addr = 0x00000034,
.sr_wr_index_addr = 0x0000003c,
.dst_wr_index_addr = 0x00000040,
.current_srri_addr = 0x00000044,
.current_drri_addr = 0x00000048,
.ddr_addr_for_rri_low = 0x00000004,
.ddr_addr_for_rri_high = 0x00000008,
.ce_rri_low = 0x0024C004,
.ce_rri_high = 0x0024C008,
.host_ie_addr = 0x0000002c,
.ctrl1_regs = &wcn3990_ctrl1,
.host_ie = &wcn3990_host_ie,
.wm_regs = &wcn3990_wm_reg,
.misc_regs = &wcn3990_misc_reg,
.wm_srcr = &wcn3990_wm_src_ring,
.wm_dstr = &wcn3990_wm_dst_ring,
};
const struct ath10k_hw_values wcn3990_values = {
.rtc_state_val_on = 5,
.ce_count = 12,
.msi_assign_ce_max = 12,
.num_target_ce_config_wlan = 12,
.ce_desc_meta_data_mask = 0xFFF0,
.ce_desc_meta_data_lsb = 4,
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,

Просмотреть файл

@ -231,6 +231,7 @@ enum ath10k_hw_rev {
ATH10K_HW_QCA9377,
ATH10K_HW_QCA4019,
ATH10K_HW_QCA9887,
ATH10K_HW_WCN3990,
};
struct ath10k_hw_regs {
@ -247,6 +248,10 @@ struct ath10k_hw_regs {
u32 ce5_base_address;
u32 ce6_base_address;
u32 ce7_base_address;
u32 ce8_base_address;
u32 ce9_base_address;
u32 ce10_base_address;
u32 ce11_base_address;
u32 soc_reset_control_si0_rst_mask;
u32 soc_reset_control_ce_rst_mask;
u32 soc_chip_id_address;
@ -267,6 +272,7 @@ extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
extern const struct ath10k_hw_regs qca99x0_regs;
extern const struct ath10k_hw_regs qca4019_regs;
extern const struct ath10k_hw_regs wcn3990_regs;
struct ath10k_hw_ce_regs_addr_map {
u32 msb;
@ -362,6 +368,8 @@ extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
extern const struct ath10k_hw_values qca9888_values;
extern const struct ath10k_hw_values qca4019_values;
extern const struct ath10k_hw_values wcn3990_values;
extern struct ath10k_hw_ce_regs wcn3990_ce_regs;
extern struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
@ -375,6 +383,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990)
/* Known peculiarities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap

Просмотреть файл

@ -672,16 +672,16 @@ static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
ar_pci->bus_ops->write32(ar, offset, value);
ce->bus_ops->write32(ar, offset, value);
}
inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
return ar_pci->bus_ops->read32(ar, offset);
return ce->bus_ops->read32(ar, offset);
}
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
@ -761,7 +761,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
{
struct ath10k *ar = pipe->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
struct sk_buff *skb;
dma_addr_t paddr;
@ -784,9 +784,9 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ATH10K_SKB_RXCB(skb)->paddr = paddr;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
if (ret) {
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@ -801,6 +801,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
{
struct ath10k *ar = pipe->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
int ret, num;
@ -810,9 +811,9 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
if (!ce_pipe->dest_ring)
return;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
while (num >= 0) {
ret = __ath10k_pci_rx_post_buf(pipe);
@ -882,6 +883,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
int nbytes)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
@ -892,7 +894,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
void *data_buf = NULL;
int i;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ce_diag = ar_pci->ce_diag;
@ -986,7 +988,7 @@ done:
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -1034,6 +1036,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
const void *data, int nbytes)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
@ -1043,7 +1046,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
dma_addr_t ce_data_base = 0;
int i;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
ce_diag = ar_pci->ce_diag;
@ -1147,7 +1150,7 @@ done:
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
@ -1342,6 +1345,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
@ -1350,7 +1354,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
unsigned int write_index;
int err, i = 0;
spin_lock_bh(&ar_pci->ce_lock);
spin_lock_bh(&ce->ce_lock);
nentries_mask = src_ring->nentries_mask;
sw_index = src_ring->sw_index;
@ -1396,14 +1400,14 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
if (err)
goto err;
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return 0;
err:
for (; i > 0; i--)
__ath10k_ce_send_revert(ce_pipe);
spin_unlock_bh(&ar_pci->ce_lock);
spin_unlock_bh(&ce->ce_lock);
return err;
}
@ -1593,6 +1597,8 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
* to mask irq/MSI.
*/
break;
case ATH10K_HW_WCN3990:
break;
}
}
@ -1619,6 +1625,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
* to unmask irq/MSI.
*/
break;
case ATH10K_HW_WCN3990:
break;
}
}
@ -2000,9 +2008,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
static int ath10k_bus_get_num_banks(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
return ar_pci->bus_ops->get_num_banks(ar);
return ce->bus_ops->get_num_banks(ar);
}
int ath10k_pci_init_config(struct ath10k *ar)
@ -2173,11 +2181,12 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
pipe = &ar_pci->pipe_info[i];
pipe->ce_hdl = &ar_pci->ce_states[i];
pipe->ce_hdl = &ce->ce_states[i];
pipe->pipe_num = i;
pipe->hif_ce_state = ar;
@ -2825,7 +2834,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
* interrupts safer to check for pending interrupts for
* immediate servicing.
*/
if (CE_INTERRUPT_SUMMARY(ar)) {
if (ath10k_ce_interrupt_summary(ar)) {
napi_reschedule(ctx);
goto out;
}
@ -3142,9 +3151,10 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
int ath10k_pci_setup_resource(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_init(&ar_pci->ce_lock);
spin_lock_init(&ce->ce_lock);
spin_lock_init(&ar_pci->ps_lock);
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
@ -3263,10 +3273,11 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->ar = ar;
ar->dev_id = pci_dev->device;
ar_pci->pci_ps = pci_ps;
ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
ar_pci->pci_soft_reset = pci_soft_reset;
ar_pci->pci_hard_reset = pci_hard_reset;
ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->ce_priv = &ar_pci->ce;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;

Просмотреть файл

@ -150,12 +150,6 @@ struct ath10k_pci_supp_chip {
u32 rev_id;
};
struct ath10k_bus_ops {
u32 (*read32)(struct ath10k *ar, u32 offset);
void (*write32)(struct ath10k *ar, u32 offset, u32 value);
int (*get_num_banks)(struct ath10k *ar);
};
enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_AUTO = 0,
ATH10K_PCI_IRQ_LEGACY = 1,
@ -177,11 +171,7 @@ struct ath10k_pci {
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct ath10k_ce ce;
struct timer_list rx_post_retry;
/* Due to HW quirks it is recommended to disable ASPM during device
@ -225,8 +215,6 @@ struct ath10k_pci {
*/
bool pci_ps;
const struct ath10k_bus_ops *bus_ops;
/* Chip specific pci reset routine used to do a safe reset */
int (*pci_soft_reset)(struct ath10k *ar);

Просмотреть файл

@ -3305,7 +3305,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
skb_put_data(bcn, arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
arvif->u.ap.noa_len);
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,

Просмотреть файл

@ -1951,7 +1951,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
static struct pci_device_id brcmf_pcie_devid_table[] = {
static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),

Просмотреть файл

@ -1724,7 +1724,7 @@ static const struct libipw_geo ipw_geos[] = {
static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
{
unsigned long flags;
int rc = 0;
int err = 0;
u32 lock;
u32 ord_len = sizeof(lock);
@ -1757,33 +1757,33 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
if (priv->status & STATUS_POWERED ||
(priv->status & STATUS_RESET_PENDING)) {
/* Power cycle the card ... */
if (ipw2100_power_cycle_adapter(priv)) {
err = ipw2100_power_cycle_adapter(priv);
if (err) {
printk(KERN_WARNING DRV_NAME
": %s: Could not cycle adapter.\n",
priv->net_dev->name);
rc = 1;
goto exit;
}
} else
priv->status |= STATUS_POWERED;
/* Load the firmware, start the clocks, etc. */
if (ipw2100_start_adapter(priv)) {
err = ipw2100_start_adapter(priv);
if (err) {
printk(KERN_ERR DRV_NAME
": %s: Failed to start the firmware.\n",
priv->net_dev->name);
rc = 1;
goto exit;
}
ipw2100_initialize_ordinals(priv);
/* Determine capabilities of this particular HW configuration */
if (ipw2100_get_hw_features(priv)) {
err = ipw2100_get_hw_features(priv);
if (err) {
printk(KERN_ERR DRV_NAME
": %s: Failed to determine HW features.\n",
priv->net_dev->name);
rc = 1;
goto exit;
}
@ -1792,11 +1792,11 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
priv->ieee->freq_band = LIBIPW_24GHZ_BAND;
lock = LOCK_NONE;
if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) {
err = ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len);
if (err) {
printk(KERN_ERR DRV_NAME
": %s: Failed to clear ordinal lock.\n",
priv->net_dev->name);
rc = 1;
goto exit;
}
@ -1820,21 +1820,21 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* Send all of the commands that must be sent prior to
* HOST_COMPLETE */
if (ipw2100_adapter_setup(priv)) {
err = ipw2100_adapter_setup(priv);
if (err) {
printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n",
priv->net_dev->name);
rc = 1;
goto exit;
}
if (!deferred) {
/* Enable the adapter - sends HOST_COMPLETE */
if (ipw2100_enable_adapter(priv)) {
err = ipw2100_enable_adapter(priv);
if (err) {
printk(KERN_ERR DRV_NAME ": "
"%s: failed in call to enable adapter.\n",
priv->net_dev->name);
ipw2100_hw_stop_adapter(priv);
rc = 1;
goto exit;
}
@ -1844,7 +1844,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
}
exit:
return rc;
return err;
}
static void ipw2100_down(struct ipw2100_priv *priv)

Просмотреть файл

@ -11,6 +11,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-objs += $(iwlwifi-m)

Просмотреть файл

@ -1437,22 +1437,6 @@ struct agg_tx_status {
__le16 sequence;
} __packed;
/*
* definitions for initial rate index field
* bits [3:0] initial rate index
* bits [6:4] rate table color, used for the initial rate
* bit-7 invalid rate indication
* i.e. rate was not chosen from rate table
* or rate table color was changed during frame retries
* refer tlc rate info
*/
#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0
#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f
#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4
#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70
#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80
/* refer to ra_tid */
#define IWLAGN_TX_RES_TID_POS 0
#define IWLAGN_TX_RES_TID_MSK 0x0f

Просмотреть файл

@ -0,0 +1,206 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_alive_h__
#define __iwl_fw_api_alive_h__
/* alive response is_valid values */
#define ALIVE_RESP_UCODE_OK BIT(0)
#define ALIVE_RESP_RFKILL BIT(1)
/* alive response ver_type values */
enum {
FW_TYPE_HW = 0,
FW_TYPE_PROT = 1,
FW_TYPE_AP = 2,
FW_TYPE_WOWLAN = 3,
FW_TYPE_TIMING = 4,
FW_TYPE_WIPAN = 5
};
/* alive response ver_subtype values */
enum {
FW_SUBTYPE_FULL_FEATURE = 0,
FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
FW_SUBTYPE_REDUCED = 2,
FW_SUBTYPE_ALIVE_ONLY = 3,
FW_SUBTYPE_WOWLAN = 4,
FW_SUBTYPE_AP_SUBTYPE = 5,
FW_SUBTYPE_WIPAN = 6,
FW_SUBTYPE_INITIALIZE = 9
};
#define IWL_ALIVE_STATUS_ERR 0xDEAD
#define IWL_ALIVE_STATUS_OK 0xCAFE
#define IWL_ALIVE_FLG_RFKILL BIT(0)
struct iwl_lmac_alive {
__le32 ucode_minor;
__le32 ucode_major;
u8 ver_subtype;
u8 ver_type;
u8 mac;
u8 opt;
__le32 timestamp;
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 log_event_table_ptr; /* SRAM address for LMAC event log */
__le32 cpu_register_ptr;
__le32 dbgm_config_ptr;
__le32 alive_counter_ptr;
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
struct iwl_umac_alive {
__le32 umac_minor; /* UMAC version: minor */
__le32 umac_major; /* UMAC version: major */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
struct mvm_alive_resp_v3 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data;
struct iwl_umac_alive umac_data;
} __packed; /* ALIVE_RES_API_S_VER_3 */
struct mvm_alive_resp {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data[2];
struct iwl_umac_alive umac_data;
} __packed; /* ALIVE_RES_API_S_VER_4 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
* @IWL_INIT_PHY: driver is going to send PHY_DB commands
*/
enum iwl_extended_cfg_flags {
IWL_INIT_DEBUG_CFG,
IWL_INIT_NVM,
IWL_INIT_PHY,
};
/**
* struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/
struct iwl_init_extended_cfg_cmd {
__le32 init_flags;
} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
/**
* struct iwl_radio_version_notif - information on the radio version
* ( RADIO_VERSION_NOTIFICATION = 0x68 )
* @radio_flavor: radio flavor
* @radio_step: radio version step
* @radio_dash: radio version dash
*/
struct iwl_radio_version_notif {
__le32 radio_flavor;
__le32 radio_step;
__le32 radio_dash;
} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
enum iwl_card_state_flags {
CARD_ENABLED = 0x00,
HW_CARD_DISABLED = 0x01,
SW_CARD_DISABLED = 0x02,
CT_KILL_CARD_DISABLED = 0x04,
HALT_CARD_DISABLED = 0x08,
CARD_DISABLED_MSK = 0x0f,
CARD_IS_RX_ON = 0x10,
};
/**
* struct iwl_radio_version_notif - information on the card state
* ( CARD_STATE_NOTIFICATION = 0xa1 )
* @flags: &enum iwl_card_state_flags
*/
struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
/**
* struct iwl_fseq_ver_mismatch_nty - Notification about version
*
* This notification does not have a direct impact on the init flow.
* It means that another core (not WiFi) has initiated the FSEQ flow
* and updated the FSEQ version. The driver only prints an error when
* this occurs.
*
* @aux_read_fseq_ver: auxiliary read FSEQ version
* @wifi_fseq_ver: FSEQ version (embedded in WiFi)
*/
struct iwl_fseq_ver_mismatch_ntf {
__le32 aux_read_fseq_ver;
__le32 wifi_fseq_ver;
} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_alive_h__ */

Просмотреть файл

@ -0,0 +1,144 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_binding_h__
#define __iwl_fw_api_binding_h__
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
/**
* struct iwl_binding_cmd_v1 - configuring bindings
* ( BINDING_CONTEXT_CMD = 0x2b )
* @id_and_color: ID and color of the relevant Binding,
* &enum iwl_ctxt_id_and_color
* @action: action to perform, one of FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding,
* &enum iwl_ctxt_id_and_color
* @phy: PHY id and color which belongs to the binding,
* &enum iwl_ctxt_id_and_color
*/
struct iwl_binding_cmd_v1 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
/* BINDING_DATA_API_S_VER_1 */
__le32 macs[MAX_MACS_IN_BINDING];
__le32 phy;
} __packed; /* BINDING_CMD_API_S_VER_1 */
/**
* struct iwl_binding_cmd - configuring bindings
* ( BINDING_CONTEXT_CMD = 0x2b )
* @id_and_color: ID and color of the relevant Binding,
* &enum iwl_ctxt_id_and_color
* @action: action to perform, one of FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding
* &enum iwl_ctxt_id_and_color
* @phy: PHY id and color which belongs to the binding
* &enum iwl_ctxt_id_and_color
* @lmac_id: the lmac id the binding belongs to
*/
struct iwl_binding_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
/* BINDING_DATA_API_S_VER_1 */
__le32 macs[MAX_MACS_IN_BINDING];
__le32 phy;
__le32 lmac_id;
} __packed; /* BINDING_CMD_API_S_VER_2 */
#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1)
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
/**
* struct iwl_time_quota_data - configuration of time quota per binding
* @id_and_color: ID and color of the relevant Binding,
* &enum iwl_ctxt_id_and_color
* @quota: absolute time quota in TU. The scheduler will try to divide the
* remainig quota (after Time Events) according to this quota.
* @max_duration: max uninterrupted context duration in TU
*/
struct iwl_time_quota_data {
__le32 id_and_color;
__le32 quota;
__le32 max_duration;
} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
/**
* struct iwl_time_quota_cmd - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
* Note: on non-CDB the fourth one is the auxilary mac and is
* essentially zero.
* On CDB the fourth one is a regular binding.
*/
struct iwl_time_quota_cmd {
struct iwl_time_quota_data quotas[MAX_BINDINGS];
} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_binding_h__ */

Просмотреть файл

@ -59,8 +59,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_h__
#define __iwl_fw_api_h__
#ifndef __iwl_fw_api_cmdhdr_h__
#define __iwl_fw_api_cmdhdr_h__
/**
* DOC: Host command section
@ -112,15 +112,24 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
#define IWL_ALWAYS_LONG_GROUP 1
/**
* struct iwl_cmd_header
* struct iwl_cmd_header - (short) command header format
*
* This header format appears in the beginning of each command sent from the
* driver, and each response/notification received from uCode.
*/
struct iwl_cmd_header {
u8 cmd; /* Command ID: REPLY_RXON, etc. */
/**
* @cmd: Command ID: REPLY_RXON, etc.
*/
u8 cmd;
/**
* @group_id: group ID, for commands with groups
*/
u8 group_id;
/*
/**
* @sequence:
* Sequence number for the command.
*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
* when sending the response to each driver-originated command, so
@ -150,6 +159,13 @@ struct iwl_cmd_header {
* driver, and each response/notification received from uCode.
* this is the wide version that contains more information about the command
* like length, version and command type
*
* @cmd: command ID, like in &struct iwl_cmd_header
* @group_id: group ID, like in &struct iwl_cmd_header
* @sequence: sequence, like in &struct iwl_cmd_header
* @length: length of the command
* @reserved: reserved
* @version: command version
*/
struct iwl_cmd_header_wide {
u8 cmd;
@ -160,48 +176,6 @@ struct iwl_cmd_header_wide {
u8 version;
} __packed;
/**
* iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
* @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
*/
enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
* @tid: tid of the queue
* @flags: see &enum iwl_tx_queue_cfg_actions
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
* @byte_cnt_addr: address of byte count table
* @tfdq_addr: address of TFD circular buffer
*/
struct iwl_tx_queue_cfg_cmd {
u8 sta_id;
u8 tid;
__le16 flags;
__le32 cb_size;
__le64 byte_cnt_addr;
__le64 tfdq_addr;
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
/**
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
* @queue_number: queue number assigned to this RA -TID
* @flags: set on failure
* @write_pointer: initial value for write pointer
*/
struct iwl_tx_queue_cfg_rsp {
__le16 queue_number;
__le16 flags;
__le16 write_pointer;
__le16 reserved;
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
/**
* struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations
* @type: type of the result - mostly ignored
@ -226,4 +200,12 @@ struct iwl_phy_db_cmd {
u8 data[];
} __packed;
#endif /* __iwl_fw_api_h__*/
/**
* struct iwl_cmd_response - generic response struct for most commands
* @status: status of the command asked, changes for each one
*/
struct iwl_cmd_response {
__le32 status;
};
#endif /* __iwl_fw_api_cmdhdr_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -64,8 +59,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_api_bt_coex_h__
#define __fw_api_bt_coex_h__
#ifndef __iwl_fw_api_coex_h__
#define __iwl_fw_api_coex_h__
#include <linux/types.h>
#include <linux/bitops.h>
@ -254,4 +249,4 @@ struct iwl_bt_coex_profile_notif {
u8 reserved[3];
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
#endif /* __fw_api_bt_coex_h__ */
#endif /* __iwl_fw_api_coex_h__ */

Просмотреть файл

@ -0,0 +1,664 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
/**
* enum iwl_mvm_command_groups - command groups for the firmware
* @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds
* @LONG_GROUP: legacy group with long header, also uses command IDs
* from &enum iwl_legacy_cmds
* @SYSTEM_GROUP: system group, uses command IDs from
* &enum iwl_system_subcmd_ids
* @MAC_CONF_GROUP: MAC configuration group, uses command IDs from
* &enum iwl_mac_conf_subcmd_ids
* @PHY_OPS_GROUP: PHY operations group, uses command IDs from
* &enum iwl_phy_ops_subcmd_ids
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
* &enum iwl_prot_offload_subcmd_ids
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
* &enum iwl_regulatory_and_nvm_subcmd_ids
* @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
*/
enum iwl_mvm_command_groups {
LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1,
SYSTEM_GROUP = 0x2,
MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
NAN_GROUP = 0x7,
TOF_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
};
/**
* enum iwl_legacy_cmds - legacy group command IDs
*/
enum iwl_legacy_cmds {
/**
* @MVM_ALIVE:
* Alive data from the firmware, as described in
* &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
*/
MVM_ALIVE = 0x1,
/**
* @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
*/
REPLY_ERROR = 0x2,
/**
* @ECHO_CMD: Send data to the device to have it returned immediately.
*/
ECHO_CMD = 0x3,
/**
* @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
*/
INIT_COMPLETE_NOTIF = 0x4,
/**
* @PHY_CONTEXT_CMD:
* Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
*/
PHY_CONTEXT_CMD = 0x8,
/**
* @DBG_CFG: Debug configuration command.
*/
DBG_CFG = 0x9,
/**
* @ANTENNA_COUPLING_NOTIFICATION:
* Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif
*/
ANTENNA_COUPLING_NOTIFICATION = 0xa,
/**
* @SCAN_ITERATION_COMPLETE_UMAC:
* Firmware indicates a scan iteration completed, using
* &struct iwl_umac_scan_iter_complete_notif.
*/
SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
/**
* @SCAN_CFG_CMD:
* uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
*/
SCAN_CFG_CMD = 0xc,
/**
* @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac
*/
SCAN_REQ_UMAC = 0xd,
/**
* @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort
*/
SCAN_ABORT_UMAC = 0xe,
/**
* @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
*/
SCAN_COMPLETE_UMAC = 0xf,
/**
* @BA_WINDOW_STATUS_NOTIFICATION_ID:
* uses &struct iwl_ba_window_status_notif
*/
BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
/**
* @ADD_STA_KEY:
* &struct iwl_mvm_add_sta_key_cmd_v1 or
* &struct iwl_mvm_add_sta_key_cmd.
*/
ADD_STA_KEY = 0x17,
/**
* @ADD_STA:
* &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
*/
ADD_STA = 0x18,
/**
* @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
*/
REMOVE_STA = 0x19,
/**
* @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd
*/
FW_GET_ITEM_CMD = 0x1a,
/**
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/
TX_CMD = 0x1c,
/**
* @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
*/
TXPATH_FLUSH = 0x1e,
/**
* @MGMT_MCAST_KEY:
* &struct iwl_mvm_mgmt_mcast_key_cmd or
* &struct iwl_mvm_mgmt_mcast_key_cmd_v1
*/
MGMT_MCAST_KEY = 0x1f,
/* scheduler config */
/**
* @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
* &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
* for newer (A000) hardware.
*/
SCD_QUEUE_CFG = 0x1d,
/**
* @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd
*/
WEP_KEY = 0x20,
/**
* @SHARED_MEM_CFG:
* retrieve shared memory configuration - response in
* &struct iwl_shared_mem_cfg
*/
SHARED_MEM_CFG = 0x25,
/**
* @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd
*/
TDLS_CHANNEL_SWITCH_CMD = 0x27,
/**
* @TDLS_CHANNEL_SWITCH_NOTIFICATION:
* uses &struct iwl_tdls_channel_switch_notif
*/
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
/**
* @TDLS_CONFIG_CMD:
* &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res
*/
TDLS_CONFIG_CMD = 0xa7,
/**
* @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
*/
MAC_CONTEXT_CMD = 0x28,
/**
* @TIME_EVENT_CMD:
* &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
*/
TIME_EVENT_CMD = 0x29, /* both CMD and response */
/**
* @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
*/
TIME_EVENT_NOTIFICATION = 0x2a,
/**
* @BINDING_CONTEXT_CMD:
* &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
*/
BINDING_CONTEXT_CMD = 0x2b,
/**
* @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
*/
TIME_QUOTA_CMD = 0x2c,
/**
* @NON_QOS_TX_COUNTER_CMD:
* command is &struct iwl_nonqos_seq_query_cmd
*/
NON_QOS_TX_COUNTER_CMD = 0x2d,
/**
* @LQ_CMD: using &struct iwl_lq_cmd
*/
LQ_CMD = 0x4e,
/**
* @FW_PAGING_BLOCK_CMD:
* &struct iwl_fw_paging_cmd
*/
FW_PAGING_BLOCK_CMD = 0x4f,
/**
* @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac
*/
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
/**
* @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents
*/
SCAN_OFFLOAD_ABORT_CMD = 0x52,
/**
* @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req
*/
HOT_SPOT_CMD = 0x53,
/**
* @SCAN_OFFLOAD_COMPLETE:
* notification, &struct iwl_periodic_scan_complete
*/
SCAN_OFFLOAD_COMPLETE = 0x6D,
/**
* @SCAN_OFFLOAD_UPDATE_PROFILES_CMD:
* update scan offload (scheduled scan) profiles/blacklist/etc.
*/
SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
/**
* @MATCH_FOUND_NOTIFICATION: scan match found
*/
MATCH_FOUND_NOTIFICATION = 0xd9,
/**
* @SCAN_ITERATION_COMPLETE:
* uses &struct iwl_lmac_scan_complete_notif
*/
SCAN_ITERATION_COMPLETE = 0xe7,
/* Phy */
/**
* @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
*/
PHY_CONFIGURATION_CMD = 0x6a,
/**
* @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db
*/
CALIB_RES_NOTIF_PHY_DB = 0x6b,
/**
* @PHY_DB_CMD: &struct iwl_phy_db_cmd
*/
PHY_DB_CMD = 0x6c,
/**
* @TOF_CMD: &struct iwl_tof_config_cmd
*/
TOF_CMD = 0x10,
/**
* @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
*/
TOF_NOTIFICATION = 0x11,
/**
* @POWER_TABLE_CMD: &struct iwl_device_power_cmd
*/
POWER_TABLE_CMD = 0x77,
/**
* @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION:
* &struct iwl_uapsd_misbehaving_ap_notif
*/
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
/**
* @LTR_CONFIG: &struct iwl_ltr_config_cmd
*/
LTR_CONFIG = 0xee,
/**
* @REPLY_THERMAL_MNG_BACKOFF:
* Thermal throttling command
*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/**
* @DC2DC_CONFIG_CMD:
* Set/Get DC2DC frequency tune
* Command is &struct iwl_dc2dc_config_cmd,
* response is &struct iwl_dc2dc_config_resp
*/
DC2DC_CONFIG_CMD = 0x83,
/**
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
*/
NVM_ACCESS_CMD = 0x88,
/**
* @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif
*/
BEACON_NOTIFICATION = 0x90,
/**
* @BEACON_TEMPLATE_CMD:
* Uses one of &struct iwl_mac_beacon_cmd_v6,
* &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
* depending on the device version.
*/
BEACON_TEMPLATE_CMD = 0x91,
/**
* @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
*/
TX_ANT_CONFIGURATION_CMD = 0x98,
/**
* @STATISTICS_CMD: &struct iwl_statistics_cmd
*/
STATISTICS_CMD = 0x9c,
/**
* @STATISTICS_NOTIFICATION:
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_cdb
*/
STATISTICS_NOTIFICATION = 0x9d,
/**
* @EOSP_NOTIFICATION:
* Notify that a service period ended,
* &struct iwl_mvm_eosp_notification
*/
EOSP_NOTIFICATION = 0x9e,
/**
* @REDUCE_TX_POWER_CMD:
* &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd
*/
REDUCE_TX_POWER_CMD = 0x9f,
/**
* @CARD_STATE_NOTIFICATION:
* Card state (RF/CT kill) notification,
* uses &struct iwl_card_state_notif
*/
CARD_STATE_NOTIFICATION = 0xa1,
/**
* @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
*/
MISSED_BEACONS_NOTIFICATION = 0xa2,
/**
* @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
*/
MAC_PM_POWER_TABLE = 0xa9,
/**
* @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
*/
MFUART_LOAD_NOTIFICATION = 0xb1,
/**
* @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
*/
RSS_CONFIG_CMD = 0xb3,
/**
* @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
*/
REPLY_RX_PHY_CMD = 0xc0,
/**
* @REPLY_RX_MPDU_CMD:
* &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
*/
REPLY_RX_MPDU_CMD = 0xc1,
/**
* @FRAME_RELEASE:
* Frame release (reorder helper) notification, uses
* &struct iwl_frame_release
*/
FRAME_RELEASE = 0xc3,
/**
* @BA_NOTIF:
* BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
* or &struct iwl_mvm_ba_notif depending on the HW
*/
BA_NOTIF = 0xc5,
/* Location Aware Regulatory */
/**
* @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
*/
MCC_UPDATE_CMD = 0xc8,
/**
* @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
*/
MCC_CHUB_UPDATE_CMD = 0xc9,
/**
* @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker
*/
MARKER_CMD = 0xcb,
/**
* @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
*/
BT_PROFILE_NOTIFICATION = 0xce,
/**
* @BT_CONFIG: &struct iwl_bt_coex_cmd
*/
BT_CONFIG = 0x9b,
/**
* @BT_COEX_UPDATE_CORUN_LUT:
* &struct iwl_bt_coex_corun_lut_update_cmd
*/
BT_COEX_UPDATE_CORUN_LUT = 0x5b,
/**
* @BT_COEX_UPDATE_REDUCED_TXP:
* &struct iwl_bt_coex_reduced_txp_update_cmd
*/
BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
/**
* @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
*/
BT_COEX_CI = 0x5d,
/**
* @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
*/
REPLY_SF_CFG_CMD = 0xd1,
/**
* @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
*/
REPLY_BEACON_FILTERING_CMD = 0xd2,
/**
* @DTS_MEASUREMENT_NOTIFICATION:
* &struct iwl_dts_measurement_notif_v1 or
* &struct iwl_dts_measurement_notif_v2
*/
DTS_MEASUREMENT_NOTIFICATION = 0xdd,
/**
* @LDBG_CONFIG_CMD: configure continuous trace recording
*/
LDBG_CONFIG_CMD = 0xf6,
/**
* @DEBUG_LOG_MSG: Debugging log data from firmware
*/
DEBUG_LOG_MSG = 0xf7,
/**
* @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
*/
BCAST_FILTER_CMD = 0xcf,
/**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
MCAST_FILTER_CMD = 0xd0,
/**
* @D3_CONFIG_CMD: &struct iwl_d3_manager_config
*/
D3_CONFIG_CMD = 0xd3,
/**
* @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of
* &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2,
* &struct iwl_proto_offload_cmd_v3_small,
* &struct iwl_proto_offload_cmd_v3_large
*/
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
/**
* @OFFLOADS_QUERY_CMD:
* No data in command, response in &struct iwl_wowlan_status
*/
OFFLOADS_QUERY_CMD = 0xd5,
/**
* @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
*/
REMOTE_WAKE_CONFIG_CMD = 0xd6,
/**
* @D0I3_END_CMD: End D0i3/D3 state, no command data
*/
D0I3_END_CMD = 0xed,
/**
* @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
*/
WOWLAN_PATTERNS = 0xe0,
/**
* @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
*/
WOWLAN_CONFIGURATION = 0xe1,
/**
* @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
*/
WOWLAN_TSC_RSC_PARAM = 0xe2,
/**
* @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
*/
WOWLAN_TKIP_PARAM = 0xe3,
/**
* @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
*/
WOWLAN_KEK_KCK_MATERIAL = 0xe4,
/**
* @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
*/
WOWLAN_GET_STATUSES = 0xe5,
/**
* @SCAN_OFFLOAD_PROFILES_QUERY_CMD:
* No command data, response is &struct iwl_scan_offload_profiles_query
*/
SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
};
/**
* enum iwl_system_subcmd_ids - system group command IDs
*/
enum iwl_system_subcmd_ids {
/**
* @SHARED_MEM_CFG_CMD:
* response in &struct iwl_shared_mem_cfg or
* &struct iwl_shared_mem_cfg_v2
*/
SHARED_MEM_CFG_CMD = 0x0,
/**
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
/**
* @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
* mismatch during init. The format is specified in
* &struct iwl_fseq_ver_mismatch_ntf.
*/
FSEQ_VER_MISMATCH_NTF = 0xFF,
};
#endif /* __iwl_fw_api_commands_h__ */

Просмотреть файл

@ -0,0 +1,192 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_config_h__
#define __iwl_fw_api_config_h__
/*
* struct iwl_dqa_enable_cmd
* @cmd_queue: the TXQ number of the command queue
*/
struct iwl_dqa_enable_cmd {
__le32 cmd_queue;
} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
/*
* struct iwl_tx_ant_cfg_cmd
* @valid: valid antenna configuration
*/
struct iwl_tx_ant_cfg_cmd {
__le32 valid;
} __packed;
/**
* struct iwl_calib_ctrl - Calibration control struct.
* Sent as part of the phy configuration command.
* @flow_trigger: bitmap for which calibrations to perform according to
* flow triggers, using &enum iwl_calib_cfg
* @event_trigger: bitmap for which calibrations to perform according to
* event triggers, using &enum iwl_calib_cfg
*/
struct iwl_calib_ctrl {
__le32 flow_trigger;
__le32 event_trigger;
} __packed;
/* This enum defines the bitmap of various calibrations to enable in both
* init ucode and runtime ucode through CALIBRATION_CFG_CMD.
*/
enum iwl_calib_cfg {
IWL_CALIB_CFG_XTAL_IDX = BIT(0),
IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
IWL_CALIB_CFG_PAPD_IDX = BIT(3),
IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
IWL_CALIB_CFG_DC_IDX = BIT(5),
IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
IWL_CALIB_CFG_DAC_IDX = BIT(16),
IWL_CALIB_CFG_ABS_IDX = BIT(17),
IWL_CALIB_CFG_AGC_IDX = BIT(18),
};
/**
* struct iwl_phy_cfg_cmd - Phy configuration command
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
*/
struct iwl_phy_cfg_cmd {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
} __packed;
#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1))
#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3))
#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5))
#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
#define PHY_CFG_TX_CHAIN_A BIT(8)
#define PHY_CFG_TX_CHAIN_B BIT(9)
#define PHY_CFG_TX_CHAIN_C BIT(10)
#define PHY_CFG_RX_CHAIN_A BIT(12)
#define PHY_CFG_RX_CHAIN_B BIT(13)
#define PHY_CFG_RX_CHAIN_C BIT(14)
/*
* enum iwl_dc2dc_config_id - flag ids
*
* Ids of dc2dc configuration flags
*/
enum iwl_dc2dc_config_id {
DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
/**
* struct iwl_dc2dc_config_cmd - configure dc2dc values
*
* (DC2DC_CONFIG_CMD = 0x83)
*
* Set/Get & configure dc2dc values.
* The command always returns the current dc2dc values.
*
* @flags: set/get dc2dc
* @enable_low_power_mode: not used.
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_cmd {
__le32 flags;
__le32 enable_low_power_mode; /* not used */
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
*
* Current dc2dc values returned by the FW.
*
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_resp {
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
/**
* struct iwl_mvm_antenna_coupling_notif - antenna coupling notification
* @isolation: antenna isolation value
*/
struct iwl_mvm_antenna_coupling_notif {
__le32 isolation;
} __packed;
#endif /* __iwl_fw_api_config_h__ */

Просмотреть файл

@ -0,0 +1,94 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_context_h__
#define __iwl_fw_api_context_h__
/**
* enum iwl_ctxt_id_and_color - ID and color fields in context dword
* @FW_CTXT_ID_POS: position of the ID
* @FW_CTXT_ID_MSK: mask of the ID
* @FW_CTXT_COLOR_POS: position of the color
* @FW_CTXT_COLOR_MSK: mask of the color
* @FW_CTXT_INVALID: value used to indicate unused/invalid
*/
enum iwl_ctxt_id_and_color {
FW_CTXT_ID_POS = 0,
FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS,
FW_CTXT_COLOR_POS = 8,
FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
FW_CTXT_INVALID = 0xffffffff,
};
#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\
((_color) << FW_CTXT_COLOR_POS))
/* Possible actions on PHYs, MACs and Bindings */
enum iwl_ctxt_action {
FW_CTXT_ACTION_STUB = 0,
FW_CTXT_ACTION_ADD,
FW_CTXT_ACTION_MODIFY,
FW_CTXT_ACTION_REMOVE,
FW_CTXT_ACTION_NUM
}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
#endif /* __iwl_fw_api_context_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -64,8 +59,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_api_d3_h__
#define __fw_api_d3_h__
#ifndef __iwl_fw_api_d3_h__
#define __iwl_fw_api_d3_h__
/**
* enum iwl_d3_wakeup_flags - D3 manager wakeup flags
@ -468,4 +463,4 @@ struct iwl_wowlan_remote_wake_config {
/* TODO: NetDetect API */
#endif /* __fw_api_d3_h__ */
#endif /* __iwl_fw_api_d3_h__ */

Просмотреть файл

@ -0,0 +1,127 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_datapath_h__
#define __iwl_fw_api_datapath_h__
/**
* enum iwl_data_path_subcmd_ids - data path group commands
*/
enum iwl_data_path_subcmd_ids {
/**
* @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
*/
DQA_ENABLE_CMD = 0x0,
/**
* @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd
*/
UPDATE_MU_GROUPS_CMD = 0x1,
/**
* @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd
*/
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
* @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
*/
STA_PM_NOTIF = 0xFD,
/**
* @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif
*/
MU_GROUP_MGMT_NOTIF = 0xFE,
/**
* @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification
*/
RX_QUEUES_NOTIFICATION = 0xFF,
};
/**
* struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration
*
* @reserved: reserved
* @membership_status: a bitmap of MU groups
* @user_position:the position of station in a group. If the station is in the
* group then bits (group * 2) is the position -1
*/
struct iwl_mu_group_mgmt_cmd {
__le32 reserved;
__le32 membership_status[2];
__le32 user_position[4];
} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
/**
* struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
*
* @membership_status: a bitmap of MU groups
* @user_position: the position of station in a group. If the station is in the
* group then bits (group * 2) is the position -1
*/
struct iwl_mu_group_mgmt_notif {
__le32 membership_status[2];
__le32 user_position[4];
} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_datapath_h__ */

Просмотреть файл

@ -0,0 +1,345 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_debug_h__
#define __iwl_fw_api_debug_h__
/**
* enum iwl_debug_cmds - debug commands
*/
enum iwl_debug_cmds {
/**
* @LMAC_RD_WR:
* LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
* &struct iwl_dbg_mem_access_rsp
*/
LMAC_RD_WR = 0x0,
/**
* @UMAC_RD_WR:
* UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
* &struct iwl_dbg_mem_access_rsp
*/
UMAC_RD_WR = 0x1,
/**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
MFU_ASSERT_DUMP_NTF = 0xFE,
};
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
FW_ERR_INVALID_CMD_PARAM = 0x1,
FW_ERR_SERVICE = 0x2,
FW_ERR_ARC_MEMORY = 0x3,
FW_ERR_ARC_CODE = 0x4,
FW_ERR_WATCH_DOG = 0x5,
FW_ERR_WEP_GRP_KEY_INDX = 0x10,
FW_ERR_WEP_KEY_SIZE = 0x11,
FW_ERR_OBSOLETE_FUNC = 0x12,
FW_ERR_UNEXPECTED = 0xFE,
FW_ERR_FATAL = 0xFF
};
/**
* struct iwl_error_resp - FW error indication
* ( REPLY_ERROR = 0x2 )
* @error_type: one of FW_ERR_*
* @cmd_id: the command ID for which the error occurred
* @reserved1: reserved
* @bad_cmd_seq_num: sequence number of the erroneous command
* @error_service: which service created the error, applicable only if
* error_type = 2, otherwise 0
* @timestamp: TSF in usecs.
*/
struct iwl_error_resp {
__le32 error_type;
u8 cmd_id;
u8 reserved1;
__le16 bad_cmd_seq_num;
__le32 error_service;
__le64 timestamp;
} __packed;
#define TX_FIFO_MAX_NUM_9000 8
#define TX_FIFO_MAX_NUM 15
#define RX_FIFO_MAX_NUM 2
#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
* struct iwl_shared_mem_cfg_v2 - Shared memory configuration information
*
* @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
* accessible)
* @shared_mem_size: shared memory size
* @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
* 0x0 as accessible only via DBGM RDAT)
* @sample_buff_size: internal sample buff size
* @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
* 8000 HW set to 0x0 as not accessible)
* @txfifo_size: size of TXF0 ... TXF7
* @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
* @rxfifo_addr: Start address of rxFifo
* @internal_txfifo_addr: start address of internalFifo
* @internal_txfifo_size: internal fifos' size
*
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
* set, the last 3 members don't exist.
*/
struct iwl_shared_mem_cfg_v2 {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr;
__le32 page_buff_size;
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
/**
* struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
*
* @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
* @txfifo_size: size of TX FIFOs
* @rxfifo1_addr: RXF1 addr
* @rxfifo1_size: RXF1 size
*/
struct iwl_shared_mem_lmac_cfg {
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM];
__le32 rxfifo1_addr;
__le32 rxfifo1_size;
} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
/**
* struct iwl_shared_mem_cfg - Shared memory configuration information
*
* @shared_mem_addr: shared memory address
* @shared_mem_size: shared memory size
* @sample_buff_addr: internal sample (mon/adc) buff addr
* @sample_buff_size: internal sample buff size
* @rxfifo2_addr: start addr of RXF2
* @rxfifo2_size: size of RXF2
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
* @lmac_num: number of LMACs (1 or 2)
* @lmac_smem: per - LMAC smem data
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
__le32 rxfifo2_addr;
__le32 rxfifo2_size;
__le32 page_buff_addr;
__le32 page_buff_size;
__le32 lmac_num;
struct iwl_shared_mem_lmac_cfg lmac_smem[2];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
* struct iwl_mfuart_load_notif - mfuart image version & status
* ( MFUART_LOAD_NOTIFICATION = 0xb1 )
* @installed_ver: installed image version
* @external_ver: external image version
* @status: MFUART loading status
* @duration: MFUART loading time
* @image_size: MFUART image size in bytes
*/
struct iwl_mfuart_load_notif {
__le32 installed_ver;
__le32 external_ver;
__le32 status;
__le32 duration;
/* image size valid only in v2 of the command */
__le32 image_size;
} __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */
/**
* struct iwl_mfu_assert_dump_notif - mfuart dump logs
* ( MFU_ASSERT_DUMP_NTF = 0xfe )
* @assert_id: mfuart assert id that cause the notif
* @curr_reset_num: number of asserts since uptime
* @index_num: current chunk id
* @parts_num: total number of chunks
* @data_size: number of data bytes sent
* @data: data buffer
*/
struct iwl_mfu_assert_dump_notif {
__le32 assert_id;
__le32 curr_reset_num;
__le16 index_num;
__le16 parts_num;
__le32 data_size;
__le32 data[0];
} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */
/**
* enum iwl_mvm_marker_id - marker ids
*
* The ids for different type of markers to insert into the usniffer logs
*
* @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
*/
enum iwl_mvm_marker_id {
MARKER_ID_TX_FRAME_LATENCY = 1,
}; /* MARKER_ID_API_E_VER_1 */
/**
* struct iwl_mvm_marker - mark info into the usniffer logs
*
* (MARKER_CMD = 0xcb)
*
* Mark the UTC time stamp into the usniffer logs together with additional
* metadata, so the usniffer output can be parsed.
* In the command response the ucode will return the GP2 time.
*
* @dw_len: The amount of dwords following this byte including this byte.
* @marker_id: A unique marker id (iwl_mvm_marker_id).
* @reserved: reserved.
* @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC
* @metadata: additional meta data that will be written to the unsiffer log
*/
struct iwl_mvm_marker {
u8 dw_len;
u8 marker_id;
__le16 reserved;
__le64 timestamp;
__le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */
/* Operation types for the debug mem access */
enum {
DEBUG_MEM_OP_READ = 0,
DEBUG_MEM_OP_WRITE = 1,
DEBUG_MEM_OP_WRITE_BYTES = 2,
};
#define DEBUG_MEM_MAX_SIZE_DWORDS 32
/**
* struct iwl_dbg_mem_access_cmd - Request the device to read/write memory
* @op: DEBUG_MEM_OP_*
* @addr: address to read/write from/to
* @len: in dwords, to read/write
* @data: for write opeations, contains the source buffer
*/
struct iwl_dbg_mem_access_cmd {
__le32 op;
__le32 addr;
__le32 len;
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
/* Status responses for the debug mem access */
enum {
DEBUG_MEM_STATUS_SUCCESS = 0x0,
DEBUG_MEM_STATUS_FAILED = 0x1,
DEBUG_MEM_STATUS_LOCKED = 0x2,
DEBUG_MEM_STATUS_HIDDEN = 0x3,
DEBUG_MEM_STATUS_LENGTH = 0x4,
};
/**
* struct iwl_dbg_mem_access_rsp - Response to debug mem commands
* @status: DEBUG_MEM_STATUS_*
* @len: read dwords (0 for write operations)
* @data: contains the read DWs
*/
struct iwl_dbg_mem_access_rsp {
__le32 status;
__le32 len;
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
#define CONT_REC_COMMAND_SIZE 80
#define ENABLE_CONT_RECORDING 0x15
#define DISABLE_CONT_RECORDING 0x16
/*
* struct iwl_continuous_record_mode - recording mode
*/
struct iwl_continuous_record_mode {
__le16 enable_recording;
} __packed;
/*
* struct iwl_continuous_record_cmd - enable/disable continuous recording
*/
struct iwl_continuous_record_cmd {
struct iwl_continuous_record_mode record_mode;
u8 pad[CONT_REC_COMMAND_SIZE -
sizeof(struct iwl_continuous_record_mode)];
} __packed;
#endif /* __iwl_fw_api_debug_h__ */

Просмотреть файл

@ -0,0 +1,183 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_filter_h__
#define __iwl_fw_api_filter_h__
#include "fw/api/mac.h"
#define MAX_PORT_ID_NUM 2
#define MAX_MCAST_FILTERING_ADDRESSES 256
/**
* struct iwl_mcast_filter_cmd - configure multicast filter.
* @filter_own: Set 1 to filter out multicast packets sent by station itself
* @port_id: Multicast MAC addresses array specifier. This is a strange way
* to identify network interface adopted in host-device IF.
* It is used by FW as index in array of addresses. This array has
* MAX_PORT_ID_NUM members.
* @count: Number of MAC addresses in the array
* @pass_all: Set 1 to pass all multicast packets.
* @bssid: current association BSSID.
* @reserved: reserved
* @addr_list: Place holder for array of MAC addresses.
* IMPORTANT: add padding if necessary to ensure DWORD alignment.
*/
struct iwl_mcast_filter_cmd {
u8 filter_own;
u8 port_id;
u8 count;
u8 pass_all;
u8 bssid[6];
u8 reserved[2];
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
#define MAX_BCAST_FILTERS 8
#define MAX_BCAST_FILTER_ATTRS 2
/**
* enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
* @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
* @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
* start of ip payload).
*/
enum iwl_mvm_bcast_filter_attr_offset {
BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
BCAST_FILTER_OFFSET_IP_END = 1,
};
/**
* struct iwl_fw_bcast_filter_attr - broadcast filter attribute
* @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
* @offset: starting offset of this pattern.
* @reserved1: reserved
* @val: value to match - big endian (MSB is the first
* byte to match from offset pos).
* @mask: mask to match (big endian).
*/
struct iwl_fw_bcast_filter_attr {
u8 offset_type;
u8 offset;
__le16 reserved1;
__be32 val;
__be32 mask;
} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
/**
* enum iwl_mvm_bcast_filter_frame_type - filter frame type
* @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
* @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
*/
enum iwl_mvm_bcast_filter_frame_type {
BCAST_FILTER_FRAME_TYPE_ALL = 0,
BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
};
/**
* struct iwl_fw_bcast_filter - broadcast filter
* @discard: discard frame (1) or let it pass (0).
* @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
* @reserved1: reserved
* @num_attrs: number of valid attributes in this filter.
* @attrs: attributes of this filter. a filter is considered matched
* only when all its attributes are matched (i.e. AND relationship)
*/
struct iwl_fw_bcast_filter {
u8 discard;
u8 frame_type;
u8 num_attrs;
u8 reserved1;
struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
} __packed; /* BCAST_FILTER_S_VER_1 */
/**
* struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
* @default_discard: default action for this mac (discard (1) / pass (0)).
* @reserved1: reserved
* @attached_filters: bitmap of relevant filters for this mac.
*/
struct iwl_fw_bcast_mac {
u8 default_discard;
u8 reserved1;
__le16 attached_filters;
} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
/**
* struct iwl_bcast_filter_cmd - broadcast filtering configuration
* @disable: enable (0) / disable (1)
* @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
* @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
* @reserved1: reserved
* @filters: broadcast filters
* @macs: broadcast filtering configuration per-mac
*/
struct iwl_bcast_filter_cmd {
u8 disable;
u8 max_bcast_filters;
u8 max_macs;
u8 reserved1;
struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
#endif /* __iwl_fw_api_filter_h__ */

Просмотреть файл

@ -0,0 +1,152 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_mac_cfg_h__
#define __iwl_fw_api_mac_cfg_h__
/**
* enum iwl_mac_conf_subcmd_ids - mac configuration command IDs
*/
enum iwl_mac_conf_subcmd_ids {
/**
* @LINK_QUALITY_MEASUREMENT_CMD: &struct iwl_link_qual_msrmnt_cmd
*/
LINK_QUALITY_MEASUREMENT_CMD = 0x1,
/**
* @LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF:
* &struct iwl_link_qual_msrmnt_notif
*/
LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
/**
* @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
*/
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
};
#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
enum iwl_lqm_cmd_operatrions {
LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
};
enum iwl_lqm_status {
LQM_STATUS_SUCCESS = 0,
LQM_STATUS_TIMEOUT = 1,
LQM_STATUS_ABORT = 2,
};
/**
* struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command
* @cmd_operation: command operation to be performed (start or stop)
* as defined above.
* @mac_id: MAC ID the measurement applies to.
* @measurement_time: time of the total measurement to be performed, in uSec.
* @timeout: maximum time allowed until a response is sent, in uSec.
*/
struct iwl_link_qual_msrmnt_cmd {
__le32 cmd_operation;
__le32 mac_id;
__le32 measurement_time;
__le32 timeout;
} __packed /* LQM_CMD_API_S_VER_1 */;
/**
* struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification
*
* @frequent_stations_air_time: an array containing the total air time
* (in uSec) used by the most frequently transmitting stations.
* @number_of_stations: the number of uniqe stations included in the array
* (a number between 0 to 16)
* @total_air_time_other_stations: the total air time (uSec) used by all the
* stations which are not included in the above report.
* @time_in_measurement_window: the total time in uSec in which a measurement
* took place.
* @tx_frame_dropped: the number of TX frames dropped due to retry limit during
* measurement
* @mac_id: MAC ID the measurement applies to.
* @status: return status. may be one of the LQM_STATUS_* defined above.
* @reserved: reserved.
*/
struct iwl_link_qual_msrmnt_notif {
__le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
__le32 number_of_stations;
__le32 total_air_time_other_stations;
__le32 time_in_measurement_window;
__le32 tx_frame_dropped;
__le32 mac_id;
__le32 status;
u8 reserved[12];
} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
* @id_and_color: ID and color of the MAC
*/
struct iwl_channel_switch_noa_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_mac_cfg_h__ */

Просмотреть файл

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -16,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -31,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -60,8 +57,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_api_mac_h__
#define __fw_api_mac_h__
#ifndef __iwl_fw_api_mac_h__
#define __iwl_fw_api_mac_h__
/*
* The first MAC indices (starting from 0) are available to the driver,
@ -76,8 +73,6 @@
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_INVALID_STA 0xFF
#define IWL_MVM_TDLS_STA_COUNT 4
enum iwl_ac {
AC_BK,
AC_BE,
@ -393,4 +388,22 @@ struct iwl_nonqos_seq_query_cmd {
__le16 reserved;
} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
#endif /* __fw_api_mac_h__ */
/**
* struct iwl_missed_beacons_notif - information on missed beacons
* ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
* @mac_id: interface ID
* @consec_missed_beacons_since_last_rx: number of consecutive missed
* beacons since last RX.
* @consec_missed_beacons: number of consecutive missed beacons
* @num_expected_beacons: number of expected beacons
* @num_recvd_beacons: number of received beacons
*/
struct iwl_missed_beacons_notif {
__le32 mac_id;
__le32 consec_missed_beacons_since_last_rx;
__le32 consec_missed_beacons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
#endif /* __iwl_fw_api_mac_h__ */

Просмотреть файл

@ -0,0 +1,386 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
enum iwl_regulatory_and_nvm_subcmd_ids {
/**
* @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd
*/
NVM_ACCESS_COMPLETE = 0x0,
/**
* @NVM_GET_INFO:
* Command is &struct iwl_nvm_get_info,
* response is &struct iwl_nvm_get_info_rsp
*/
NVM_GET_INFO = 0x2,
};
/**
* enum iwl_nvm_access_op - NVM access opcode
* @IWL_NVM_READ: read NVM
* @IWL_NVM_WRITE: write NVM
*/
enum iwl_nvm_access_op {
IWL_NVM_READ = 0,
IWL_NVM_WRITE = 1,
};
/**
* enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD
* @NVM_ACCESS_TARGET_CACHE: access the cache
* @NVM_ACCESS_TARGET_OTP: access the OTP
* @NVM_ACCESS_TARGET_EEPROM: access the EEPROM
*/
enum iwl_nvm_access_target {
NVM_ACCESS_TARGET_CACHE = 0,
NVM_ACCESS_TARGET_OTP = 1,
NVM_ACCESS_TARGET_EEPROM = 2,
};
/**
* enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD
* @NVM_SECTION_TYPE_SW: software section
* @NVM_SECTION_TYPE_REGULATORY: regulatory section
* @NVM_SECTION_TYPE_CALIBRATION: calibration section
* @NVM_SECTION_TYPE_PRODUCTION: production section
* @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
* @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
* @NVM_MAX_NUM_SECTIONS: number of sections
*/
enum iwl_nvm_section_type {
NVM_SECTION_TYPE_SW = 1,
NVM_SECTION_TYPE_REGULATORY = 3,
NVM_SECTION_TYPE_CALIBRATION = 4,
NVM_SECTION_TYPE_PRODUCTION = 5,
NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
NVM_SECTION_TYPE_PHY_SKU = 12,
NVM_MAX_NUM_SECTIONS = 13,
};
/**
* struct iwl_nvm_access_cmd - Request the device to send an NVM section
* @op_code: &enum iwl_nvm_access_op
* @target: &enum iwl_nvm_access_target
* @type: &enum iwl_nvm_section_type
* @offset: offset in bytes into the section
* @length: in bytes, to read/write
* @data: if write operation, the data to write. On read its empty
*/
struct iwl_nvm_access_cmd {
u8 op_code;
u8 target;
__le16 type;
__le16 offset;
__le16 length;
u8 data[];
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
/**
* struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
* @length: in bytes, either how much was written or read
* @type: NVM_SECTION_TYPE_*
* @status: 0 for success, fail otherwise
* @data: if read operation, the data returned. Empty on write.
*/
struct iwl_nvm_access_resp {
__le16 offset;
__le16 length;
__le16 type;
__le16 status;
u8 data[];
} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
/*
* struct iwl_nvm_get_info - request to get NVM data
*/
struct iwl_nvm_get_info {
__le32 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
/**
* enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
* @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty
*/
enum iwl_nvm_info_general_flags {
NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0),
};
/**
* struct iwl_nvm_get_info_general - general NVM data
* @flags: bit 0: 1 - empty, 0 - non-empty
* @nvm_version: nvm version
* @board_type: board type
* @reserved: reserved
*/
struct iwl_nvm_get_info_general {
__le32 flags;
__le16 nvm_version;
u8 board_type;
u8 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
/**
* struct iwl_nvm_get_info_sku - mac information
* @enable_24g: band 2.4G enabled
* @enable_5g: band 5G enabled
* @enable_11n: 11n enabled
* @enable_11ac: 11ac enabled
* @mimo_disable: MIMO enabled
* @ext_crypto: Extended crypto enabled
*/
struct iwl_nvm_get_info_sku {
__le32 enable_24g;
__le32 enable_5g;
__le32 enable_11n;
__le32 enable_11ac;
__le32 mimo_disable;
__le32 ext_crypto;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */
/**
* struct iwl_nvm_get_info_phy - phy information
* @tx_chains: BIT 0 chain A, BIT 1 chain B
* @rx_chains: BIT 0 chain A, BIT 1 chain B
*/
struct iwl_nvm_get_info_phy {
__le32 tx_chains;
__le32 rx_chains;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
#define IWL_NUM_CHANNELS (51)
/**
* struct iwl_nvm_get_info_regulatory - regulatory information
* @lar_enabled: is LAR enabled
* @channel_profile: regulatory data of this channel
* @reserved: reserved
*/
struct iwl_nvm_get_info_regulatory {
__le32 lar_enabled;
__le16 channel_profile[IWL_NUM_CHANNELS];
__le16 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/**
* struct iwl_nvm_get_info_rsp - response to get NVM data
* @general: general NVM data
* @mac_sku: data relating to MAC sku
* @phy_sku: data relating to PHY sku
* @regulatory: regulatory data
*/
struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_general general;
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
* @reserved: reserved
*/
struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
* struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
* MCC in the cmd response will be the relevant MCC in the NVM.
* @mcc: given mobile country code
* @source_id: the source from where we got the MCC, see iwl_mcc_source
* @reserved: reserved for alignment
*/
struct iwl_mcc_update_cmd_v1 {
__le16 mcc;
u8 source_id;
u8 reserved;
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
/**
* struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
* MCC in the cmd response will be the relevant MCC in the NVM.
* @mcc: given mobile country code
* @source_id: the source from where we got the MCC, see iwl_mcc_source
* @reserved: reserved for alignment
* @key: integrity key for MCC API OEM testing
* @reserved2: reserved
*/
struct iwl_mcc_update_cmd {
__le16 mcc;
u8 source_id;
u8 reserved;
__le32 key;
u8 reserved2[20];
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
/**
* struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
* @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
* channels, depending on platform)
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp_v1 {
__le32 status;
__le16 mcc;
u8 cap;
u8 source_id;
__le32 n_channels;
__le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
/**
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
* @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source
* @time: time elapsed from the MCC test start (in 30 seconds TU)
* @reserved: reserved.
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
* channels, depending on platform)
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp {
__le32 status;
__le16 mcc;
u8 cap;
u8 source_id;
__le16 time;
__le16 reserved;
__le32 n_channels;
__le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */
/**
* struct iwl_mcc_chub_notif - chub notifies of mcc change
* (MCC_CHUB_UPDATE_CMD = 0xc9)
* The Chub (Communication Hub, CommsHUB) is a HW component that connects to
* the cellular and connectivity cores that gets updates of the mcc, and
* notifies the ucode directly of any mcc change.
* The ucode requests the driver to request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
* MCC in the cmd response will be the relevant MCC in the NVM.
* @mcc: given mobile country code
* @source_id: identity of the change originator, see iwl_mcc_source
* @reserved1: reserved for alignment
*/
struct iwl_mcc_chub_notif {
__le16 mcc;
u8 source_id;
u8 reserved1;
} __packed; /* LAR_MCC_NOTIFY_S */
enum iwl_mcc_update_status {
MCC_RESP_NEW_CHAN_PROFILE,
MCC_RESP_SAME_CHAN_PROFILE,
MCC_RESP_INVALID,
MCC_RESP_NVM_DISABLED,
MCC_RESP_ILLEGAL,
MCC_RESP_LOW_PRIORITY,
MCC_RESP_TEST_MODE_ACTIVE,
MCC_RESP_TEST_MODE_NOT_ACTIVE,
MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
};
enum iwl_mcc_source {
MCC_SOURCE_OLD_FW = 0,
MCC_SOURCE_ME = 1,
MCC_SOURCE_BIOS = 2,
MCC_SOURCE_3G_LTE_HOST = 3,
MCC_SOURCE_3G_LTE_DEVICE = 4,
MCC_SOURCE_WIFI = 5,
MCC_SOURCE_RESERVED = 6,
MCC_SOURCE_DEFAULT = 7,
MCC_SOURCE_UNINITIALIZED = 8,
MCC_SOURCE_MCC_API = 9,
MCC_SOURCE_GET_CURRENT = 0x10,
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
#endif /* __iwl_fw_api_nvm_reg_h__ */

Просмотреть файл

@ -0,0 +1,101 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
/**
* enum iwl_prot_offload_subcmd_ids - protocol offload commands
*/
enum iwl_prot_offload_subcmd_ids {
/**
* @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
*/
STORED_BEACON_NTF = 0xFF,
};
#define MAX_STORED_BEACON_SIZE 600
/**
* struct iwl_stored_beacon_notif - Stored beacon notification
*
* @system_time: system time on air rise
* @tsf: TSF on air rise
* @beacon_timestamp: beacon on air rise
* @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition
* @channel: channel this beacon was received on
* @rates: rate in ucode internal format
* @byte_count: frame's byte count
* @data: beacon data, length in @byte_count
*/
struct iwl_stored_beacon_notif {
__le32 system_time;
__le64 tsf;
__le32 beacon_timestamp;
__le16 band;
__le16 channel;
__le32 rates;
__le32 byte_count;
u8 data[MAX_STORED_BEACON_SIZE];
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
#endif /* __iwl_fw_api_offload_h__ */

Просмотреть файл

@ -0,0 +1,108 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_paging_h__
#define __iwl_fw_api_paging_h__
#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
/**
* struct iwl_fw_paging_cmd - paging layout
*
* Send to FW the paging layout in the driver.
*
* @flags: various flags for the command
* @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side
*/
struct iwl_fw_paging_cmd {
__le32 flags;
__le32 block_size;
__le32 block_num;
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
/**
* enum iwl_fw_item_id - FW item IDs
*
* @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
* download
*/
enum iwl_fw_item_id {
IWL_FW_ITEM_ID_PAGING = 3,
};
/**
* struct iwl_fw_get_item_cmd - get an item from the fw
* @item_id: ID of item to obtain, see &enum iwl_fw_item_id
*/
struct iwl_fw_get_item_cmd {
__le32 item_id;
} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
struct iwl_fw_get_item_resp {
__le32 item_id;
__le32 item_byte_cnt;
__le32 item_val;
} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
#endif /* __iwl_fw_api_paging_h__ */

Просмотреть файл

@ -0,0 +1,164 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_phy_ctxt_h__
#define __iwl_fw_api_phy_ctxt_h__
/* Supported bands */
#define PHY_BAND_5 (0)
#define PHY_BAND_24 (1)
/* Supported channel width, vary if there is VHT support */
#define PHY_VHT_CHANNEL_MODE20 (0x0)
#define PHY_VHT_CHANNEL_MODE40 (0x1)
#define PHY_VHT_CHANNEL_MODE80 (0x2)
#define PHY_VHT_CHANNEL_MODE160 (0x3)
/*
* Control channel position:
* For legacy set bit means upper channel, otherwise lower.
* For VHT - bit-2 marks if the control is lower/upper relative to center-freq
* bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
* center_freq
* |
* 40Mhz |_______|_______|
* 80Mhz |_______|_______|_______|_______|
* 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
* code 011 010 001 000 | 100 101 110 111
*/
#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
/*
* @band: PHY_BAND_*
* @channel: channel number
* @width: PHY_[VHT|LEGACY]_CHANNEL_*
* @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
*/
struct iwl_fw_channel_info {
u8 band;
u8 channel;
u8 width;
u8 ctrl_pos;
} __packed;
#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
(0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
#define PHY_RX_CHAIN_VALID_POS (1)
#define PHY_RX_CHAIN_VALID_MSK \
(0x7 << PHY_RX_CHAIN_VALID_POS)
#define PHY_RX_CHAIN_FORCE_SEL_POS (4)
#define PHY_RX_CHAIN_FORCE_SEL_MSK \
(0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
(0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
#define PHY_RX_CHAIN_CNT_POS (10)
#define PHY_RX_CHAIN_CNT_MSK \
(0x3 << PHY_RX_CHAIN_CNT_POS)
#define PHY_RX_CHAIN_MIMO_CNT_POS (12)
#define PHY_RX_CHAIN_MIMO_CNT_MSK \
(0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
#define PHY_RX_CHAIN_MIMO_FORCE_POS (14)
#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
(0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
/* TODO: fix the value, make it depend on firmware at runtime? */
#define NUM_PHY_CTX 3
/* TODO: complete missing documentation */
/**
* struct iwl_phy_context_cmd - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
* @action: action to perform, one of FW_CTXT_ACTION_*
* @apply_time: 0 means immediate apply and context switch.
* other value means apply new params after X usecs
* @tx_param_color: ???
* @ci: channel info
* @txchain_info: ???
* @rxchain_info: ???
* @acquisition_data: ???
* @dsp_cfg_flags: set to 0
*/
struct iwl_phy_context_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
/* PHY_CONTEXT_DATA_API_S_VER_1 */
__le32 apply_time;
__le32 tx_param_color;
struct iwl_fw_channel_info ci;
__le32 txchain_info;
__le32 rxchain_info;
__le32 acquisition_data;
__le32 dsp_cfg_flags;
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */

Просмотреть файл

@ -0,0 +1,258 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_phy_h__
#define __iwl_fw_api_phy_h__
/**
* enum iwl_phy_ops_subcmd_ids - PHY group commands
*/
enum iwl_phy_ops_subcmd_ids {
/**
* @CMD_DTS_MEASUREMENT_TRIGGER_WIDE:
* Uses either &struct iwl_dts_measurement_cmd or
* &struct iwl_ext_dts_measurement_cmd
*/
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
/**
* @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
*/
CTDP_CONFIG_CMD = 0x03,
/**
* @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
*/
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
/**
* @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd
*/
GEO_TX_POWER_LIMIT = 0x05,
/**
* @CT_KILL_NOTIFICATION: &struct ct_kill_notif
*/
CT_KILL_NOTIFICATION = 0xFE,
/**
* @DTS_MEASUREMENT_NOTIF_WIDE:
* &struct iwl_dts_measurement_notif_v1 or
* &struct iwl_dts_measurement_notif_v2
*/
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
/* DTS measurements */
enum iwl_dts_measurement_flags {
DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0),
DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1),
};
/**
* struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements
*
* @flags: indicates which measurements we want as specified in
* &enum iwl_dts_measurement_flags
*/
struct iwl_dts_measurement_cmd {
__le32 flags;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
/**
* enum iwl_dts_control_measurement_mode - DTS measurement type
* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
* back (latest value. Not waiting for new value). Use automatic
* SW DTS configuration.
* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
* trigger DTS reading and provide read back temperature read
* when available.
* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
* without measurement trigger.
*/
enum iwl_dts_control_measurement_mode {
DTS_AUTOMATIC = 0,
DTS_REQUEST_READ = 1,
DTS_OVER_WRITE = 2,
DTS_DIRECT_WITHOUT_MEASURE = 3,
};
/**
* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
* @DTS_USE_TOP: Top
* @DTS_USE_CHAIN_A: chain A
* @DTS_USE_CHAIN_B: chain B
* @DTS_USE_CHAIN_C: chain C
* @XTAL_TEMPERATURE: read temperature from xtal
*/
enum iwl_dts_used {
DTS_USE_TOP = 0,
DTS_USE_CHAIN_A = 1,
DTS_USE_CHAIN_B = 2,
DTS_USE_CHAIN_C = 3,
XTAL_TEMPERATURE = 4,
};
/**
* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
* @DTS_BIT6_MODE: bit 6 mode
* @DTS_BIT8_MODE: bit 8 mode
*/
enum iwl_dts_bit_mode {
DTS_BIT6_MODE = 0,
DTS_BIT8_MODE = 1,
};
/**
* struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements
* @control_mode: see &enum iwl_dts_control_measurement_mode
* @temperature: used when over write DTS mode is selected
* @sensor: set temperature sensor to use. See &enum iwl_dts_used
* @avg_factor: average factor to DTS in request DTS read mode
* @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
* @step_duration: step duration for the DTS
*/
struct iwl_ext_dts_measurement_cmd {
__le32 control_mode;
__le32 temperature;
__le32 sensor;
__le32 avg_factor;
__le32 bit_mode;
__le32 step_duration;
} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
/**
* struct iwl_dts_measurement_notif_v1 - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
*/
struct iwl_dts_measurement_notif_v1 {
__le32 temp;
__le32 voltage;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
/**
* struct iwl_dts_measurement_notif_v2 - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
* @threshold_idx: the trip index that was crossed
*/
struct iwl_dts_measurement_notif_v2 {
__le32 temp;
__le32 voltage;
__le32 threshold_idx;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
/**
* struct ct_kill_notif - CT-kill entry notification
*
* @temperature: the current temperature in celsius
* @reserved: reserved
*/
struct ct_kill_notif {
__le16 temperature;
__le16 reserved;
} __packed; /* GRP_PHY_CT_KILL_NTF */
/**
* enum ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
*/
enum iwl_mvm_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1,
CTDP_CMD_OPERATION_STOP = 0x2,
CTDP_CMD_OPERATION_REPORT = 0x4,
};/* CTDP_CMD_OPERATION_TYPE_E */
/**
* struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
*
* @operation: see &enum iwl_mvm_ctdp_cmd_operation
* @budget: the budget in milliwatt
* @window_size: defined in API but not used
*/
struct iwl_mvm_ctdp_cmd {
__le32 operation;
__le32 budget;
__le32 window_size;
} __packed;
#define IWL_MAX_DTS_TRIPS 8
/**
* struct temp_report_ths_cmd - set temperature thresholds
*
* @num_temps: number of temperature thresholds passed
* @thresholds: array with the thresholds to be configured
*/
struct temp_report_ths_cmd {
__le32 num_temps;
__le16 thresholds[IWL_MAX_DTS_TRIPS];
} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
#endif /* __iwl_fw_api_phy_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -65,8 +60,8 @@
*
*****************************************************************************/
#ifndef __fw_api_power_h__
#define __fw_api_power_h__
#ifndef __iwl_fw_api_power_h__
#define __iwl_fw_api_power_h__
/* Power Management Commands, Responses, Notifications */
@ -224,7 +219,7 @@ struct iwl_device_power_cmd {
/**
* struct iwl_mac_power_cmd - New power command containing uAPSD support
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
* @id_and_color: MAC contex identifier, &enum iwl_mvm_id_and_color
* @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color
* @flags: Power table command flags from POWER_FLAGS_*
* @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
* Minimum allowed:- 3 * DTIM. Keep alive period must be
@ -528,4 +523,4 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
#endif
#endif /* __iwl_fw_api_power_h__ */

Просмотреть файл

@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -62,10 +57,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_api_rs_h__
#define __fw_api_rs_h__
#ifndef __iwl_fw_api_rs_h__
#define __iwl_fw_api_rs_h__
#include "fw-api-mac.h"
#include "mac.h"
/*
* These serve as indexes into
@ -410,4 +405,4 @@ struct iwl_lq_cmd {
__le32 ss_params;
}; /* LINK_QUALITY_CMD_API_S_VER_1 */
#endif /* __fw_api_rs_h__ */
#endif /* __iwl_fw_api_rs_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -65,8 +60,8 @@
*
*****************************************************************************/
#ifndef __fw_api_rx_h__
#define __fw_api_rx_h__
#ifndef __iwl_fw_api_rx_h__
#define __iwl_fw_api_rx_h__
/* API for pre-9000 hardware */
@ -571,4 +566,24 @@ struct iwl_mvm_pm_state_notification {
__le16 reserved;
} __packed; /* PEER_PM_NTFY_API_S_VER_1 */
#endif /* __fw_api_rx_h__ */
#define BA_WINDOW_STREAMS_MAX 16
#define BA_WINDOW_STATUS_TID_MSK 0x000F
#define BA_WINDOW_STATUS_STA_ID_POS 4
#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0
#define BA_WINDOW_STATUS_VALID_MSK BIT(9)
/**
* struct iwl_ba_window_status_notif - reordering window's status notification
* @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
* @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
* @start_seq_num: the start sequence number of the bitmap
* @mpdu_rx_count: the number of received MPDUs since entering D0i3
*/
struct iwl_ba_window_status_notif {
__le64 bitmap[BA_WINDOW_STREAMS_MAX];
__le16 ra_tid[BA_WINDOW_STREAMS_MAX];
__le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_rx_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -65,8 +60,8 @@
*
*****************************************************************************/
#ifndef __fw_api_scan_h__
#define __fw_api_scan_h__
#ifndef __iwl_fw_api_scan_h__
#define __iwl_fw_api_scan_h__
/* Scan Commands, Responses, Notifications */
@ -789,4 +784,4 @@ struct iwl_umac_scan_iter_complete_notif {
struct iwl_scan_results_notif results[];
} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
#endif
#endif /* __iwl_fw_api_scan_h__ */

Просмотреть файл

@ -0,0 +1,138 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_sf_h__
#define __iwl_fw_api_sf_h__
/* Smart Fifo state */
enum iwl_sf_state {
SF_LONG_DELAY_ON = 0, /* should never be called by driver */
SF_FULL_ON,
SF_UNINIT,
SF_INIT_OFF,
SF_HW_NUM_STATES
};
/* Smart Fifo possible scenario */
enum iwl_sf_scenario {
SF_SCENARIO_SINGLE_UNICAST,
SF_SCENARIO_AGG_UNICAST,
SF_SCENARIO_MULTICAST,
SF_SCENARIO_BA_RESP,
SF_SCENARIO_TX_RESP,
SF_NUM_SCENARIO
};
#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
/* smart FIFO default values */
#define SF_W_MARK_SISO 6144
#define SF_W_MARK_MIMO2 8192
#define SF_W_MARK_MIMO3 6144
#define SF_W_MARK_LEGACY 4096
#define SF_W_MARK_SCAN 4096
/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
#define SF_CFG_DUMMY_NOTIF_OFF BIT(16)
/**
* struct iwl_sf_cfg_cmd - Smart Fifo configuration command.
* @state: smart fifo state, types listed in &enum iwl_sf_state.
* @watermark: Minimum allowed available free space in RXF for transient state.
* @long_delay_timeouts: aging and idle timer values for each scenario
* in long delay state.
* @full_on_timeouts: timer values for each scenario in full on state.
*/
struct iwl_sf_cfg_cmd {
__le32 state;
__le32 watermark[SF_TRANSIENT_STATES_NUMBER];
__le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
} __packed; /* SF_CFG_API_S_VER_2 */
#endif /* __iwl_fw_api_sf_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -64,8 +59,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_api_sta_h__
#define __fw_api_sta_h__
#ifndef __iwl_fw_api_sta_h__
#define __iwl_fw_api_sta_h__
/**
* enum iwl_sta_flags - flags for the ADD_STA host command
@ -291,7 +286,7 @@ struct iwl_mvm_keyinfo {
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to,
* see &enum iwl_mvm_id_and_color
* see &enum iwl_ctxt_id_and_color
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
@ -372,7 +367,7 @@ enum iwl_sta_type {
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to,
* see &enum iwl_mvm_id_and_color
* see &enum iwl_ctxt_id_and_color
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
@ -575,4 +570,4 @@ struct iwl_mvm_eosp_notification {
__le32 sta_id;
} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
#endif /* __fw_api_sta_h__ */
#endif /* __iwl_fw_api_sta_h__ */

Просмотреть файл

@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -64,9 +59,9 @@
*
*****************************************************************************/
#ifndef __fw_api_stats_h__
#define __fw_api_stats_h__
#include "fw-api-mac.h"
#ifndef __iwl_fw_api_stats_h__
#define __iwl_fw_api_stats_h__
#include "mac.h"
struct mvm_statistics_dbg {
__le32 burst_check;
@ -476,4 +471,4 @@ struct iwl_statistics_cmd {
__le32 flags;
} __packed; /* STATISTICS_CMD_API_S_VER_1 */
#endif /* __fw_api_stats_h__ */
#endif /* __iwl_fw_api_stats_h__ */

Просмотреть файл

@ -0,0 +1,208 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_tdls_h__
#define __iwl_fw_api_tdls_h__
#include "fw/api/tx.h"
#include "fw/api/phy-ctxt.h"
#define IWL_MVM_TDLS_STA_COUNT 4
/* Type of TDLS request */
enum iwl_tdls_channel_switch_type {
TDLS_SEND_CHAN_SW_REQ = 0,
TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
TDLS_MOVE_CH,
}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
/**
* struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch
* @frame_timestamp: GP2 timestamp of channel-switch request/response packet
* received from peer
* @max_offchan_duration: What amount of microseconds out of a DTIM is given
* to the TDLS off-channel communication. For instance if the DTIM is
* 200TU and the TDLS peer is to be given 25% of the time, the value
* given will be 50TU, or 50 * 1024 if translated into microseconds.
* @switch_time: switch time the peer sent in its channel switch timing IE
* @switch_timeout: switch timeout the peer sent in its channel switch timing IE
*/
struct iwl_tdls_channel_switch_timing {
__le32 frame_timestamp; /* GP2 time of peer packet Rx */
__le32 max_offchan_duration; /* given in micro-seconds */
__le32 switch_time; /* given in micro-seconds */
__le32 switch_timeout; /* given in micro-seconds */
} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
/**
* struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template
*
* A template representing a TDLS channel-switch request or response frame
*
* @switch_time_offset: offset to the channel switch timing IE in the template
* @tx_cmd: Tx parameters for the frame
* @data: frame data
*/
struct iwl_tdls_channel_switch_frame {
__le32 switch_time_offset;
struct iwl_tx_cmd tx_cmd;
u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
/**
* struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
*
* The command is sent to initiate a channel switch and also in response to
* incoming TDLS channel-switch request/response packets from remote peers.
*
* @switch_type: see &enum iwl_tdls_channel_switch_type
* @peer_sta_id: station id of TDLS peer
* @ci: channel we switch to
* @timing: timing related data for command
* @frame: channel-switch request/response template, depending to switch_type
*/
struct iwl_tdls_channel_switch_cmd {
u8 switch_type;
__le32 peer_sta_id;
struct iwl_fw_channel_info ci;
struct iwl_tdls_channel_switch_timing timing;
struct iwl_tdls_channel_switch_frame frame;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
/**
* struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification
*
* @status: non-zero on success
* @offchannel_duration: duration given in microseconds
* @sta_id: peer currently performing the channel-switch with
*/
struct iwl_tdls_channel_switch_notif {
__le32 status;
__le32 offchannel_duration;
__le32 sta_id;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
/**
* struct iwl_tdls_sta_info - TDLS station info
*
* @sta_id: station id of the TDLS peer
* @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
* @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
* @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
*/
struct iwl_tdls_sta_info {
u8 sta_id;
u8 tx_to_peer_tid;
__le16 tx_to_peer_ssn;
__le32 is_initiator;
} __packed; /* TDLS_STA_INFO_VER_1 */
/**
* struct iwl_tdls_config_cmd - TDLS basic config command
*
* @id_and_color: MAC id and color being configured
* @tdls_peer_count: amount of currently connected TDLS peers
* @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
* @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
* @sta_info: per-station info. Only the first tdls_peer_count entries are set
* @pti_req_data_offset: offset of network-level data for the PTI template
* @pti_req_tx_cmd: Tx parameters for PTI request template
* @pti_req_template: PTI request template data
*/
struct iwl_tdls_config_cmd {
__le32 id_and_color; /* mac id and color */
u8 tdls_peer_count;
u8 tx_to_ap_tid;
__le16 tx_to_ap_ssn;
struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
struct iwl_tx_cmd pti_req_tx_cmd;
u8 pti_req_template[0];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_tdls_config_sta_info_res - TDLS per-station config information
*
* @sta_id: station id of the TDLS peer
* @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
* the peer
*/
struct iwl_tdls_config_sta_info_res {
__le16 sta_id;
__le16 tx_to_peer_last_seq;
} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
/**
* struct iwl_tdls_config_res - TDLS config information from FW
*
* @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
* @sta_info: per-station TDLS config information
*/
struct iwl_tdls_config_res {
__le32 tx_to_ap_last_seq;
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
#endif /* __iwl_fw_api_tdls_h__ */

Просмотреть файл

@ -0,0 +1,386 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_time_event_h__
#define __iwl_fw_api_time_event_h__
#include "fw/api/phy-ctxt.h"
/* Time Event types, according to MAC type */
enum iwl_time_event_type {
/* BSS Station Events */
TE_BSS_STA_AGGRESSIVE_ASSOC,
TE_BSS_STA_ASSOC,
TE_BSS_EAP_DHCP_PROT,
TE_BSS_QUIET_PERIOD,
/* P2P Device Events */
TE_P2P_DEVICE_DISCOVERABLE,
TE_P2P_DEVICE_LISTEN,
TE_P2P_DEVICE_ACTION_SCAN,
TE_P2P_DEVICE_FULL_SCAN,
/* P2P Client Events */
TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
TE_P2P_CLIENT_ASSOC,
TE_P2P_CLIENT_QUIET_PERIOD,
/* P2P GO Events */
TE_P2P_GO_ASSOC_PROT,
TE_P2P_GO_REPETITIVET_NOA,
TE_P2P_GO_CT_WINDOW,
/* WiDi Sync Events */
TE_WIDI_TX_SYNC,
/* Channel Switch NoA */
TE_CHANNEL_SWITCH_PERIOD,
TE_MAX
}; /* MAC_EVENT_TYPE_API_E_VER_1 */
/* Time event - defines for command API v1 */
/*
* @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
* @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
* @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
* number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
TE_V1_FRAG_NONE = 0,
TE_V1_FRAG_SINGLE = 1,
TE_V1_FRAG_DUAL = 2,
TE_V1_FRAG_ENDLESS = 0xffffffff
};
/* If a Time Event can be fragmented, this is the max number of fragments */
#define TE_V1_FRAG_MAX_MSK 0x0fffffff
/* Repeat the time event endlessly (until removed) */
#define TE_V1_REPEAT_ENDLESS 0xffffffff
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
TE_V1_INDEPENDENT = 0,
TE_V1_DEP_OTHER = BIT(0),
TE_V1_DEP_TSF = BIT(1),
TE_V1_EVENT_SOCIOPATHIC = BIT(2),
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
/*
* @TE_V1_NOTIF_NONE: no notifications
* @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
*
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*/
enum {
TE_V1_NOTIF_NONE = 0,
TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
/* Time event - defines for command API */
/*
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
* @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
* @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
* number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
TE_V2_FRAG_NONE = 0,
TE_V2_FRAG_SINGLE = 1,
TE_V2_FRAG_DUAL = 2,
TE_V2_FRAG_MAX = 0xfe,
TE_V2_FRAG_ENDLESS = 0xff
};
/* Repeat the time event endlessly (until removed) */
#define TE_V2_REPEAT_ENDLESS 0xff
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_V2_REPEAT_MAX 0xfe
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
/**
* enum iwl_time_event_policy - Time event policy values
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*
* @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
* @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
* @T2_V2_START_IMMEDIATELY: start time event immediately
* @TE_V2_DEP_OTHER: depends on another time event
* @TE_V2_DEP_TSF: depends on a specific time
* @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
* @TE_V2_ABSENCE: are we present or absent during the Time Event.
*/
enum iwl_time_event_policy {
TE_V2_DEFAULT_POLICY = 0x0,
/* notifications (event start/stop, fragment start/stop) */
TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
T2_V2_START_IMMEDIATELY = BIT(11),
/* placement characteristics */
TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
/* are we present or absent during the Time Event. */
TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
};
/**
* struct iwl_time_event_cmd - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC,
* &enum iwl_ctxt_id_and_color
* @action: action to perform, one of &enum iwl_ctxt_action
* @id: this field has two meanings, depending on the action:
* If the action is ADD, then it means the type of event to add.
* For all other actions it is the unique event ID assigned when the
* event was added by the FW.
* @apply_time: When to start the Time Event (in GP2)
* @max_delay: maximum delay to event's start (apply time), in TU
* @depends_on: the unique ID of the event we depend on (if any)
* @interval: interval between repetitions, in TU
* @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
* @max_frags: maximal number of fragments the Time Event can be divided to
* @policy: defines whether uCode shall notify the host or other uCode modules
* on event and/or fragment start and/or end
* using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
* TE_EVENT_SOCIOPATHIC
* using TE_ABSENCE and using TE_NOTIF_*,
* &enum iwl_time_event_policy
*/
struct iwl_time_event_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
__le32 id;
/* MAC_TIME_EVENT_DATA_API_S_VER_2 */
__le32 apply_time;
__le32 max_delay;
__le32 depends_on;
__le32 interval;
__le32 duration;
u8 repeat;
u8 max_frags;
__le16 policy;
} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
/**
* struct iwl_time_event_resp - response structure to iwl_time_event_cmd
* @status: bit 0 indicates success, all others specify errors
* @id: the Time Event type
* @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
* @id_and_color: ID and color of the relevant MAC,
* &enum iwl_ctxt_id_and_color
*/
struct iwl_time_event_resp {
__le32 status;
__le32 id;
__le32 unique_id;
__le32 id_and_color;
} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
/**
* struct iwl_time_event_notif - notifications of time event start/stop
* ( TIME_EVENT_NOTIFICATION = 0x2a )
* @timestamp: action timestamp in GP2
* @session_id: session's unique id
* @unique_id: unique id of the Time Event itself
* @id_and_color: ID and color of the relevant MAC
* @action: &enum iwl_time_event_policy
* @status: true if scheduled, false otherwise (not executed)
*/
struct iwl_time_event_notif {
__le32 timestamp;
__le32 session_id;
__le32 unique_id;
__le32 id_and_color;
__le32 action;
__le32 status;
} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
/*
* Aux ROC command
*
* Command requests the firmware to create a time event for a certain duration
* and remain on the given channel. This is done by using the Aux framework in
* the FW.
* The command was first used for Hot Spot issues - but can be used regardless
* to Hot Spot.
*
* ( HOT_SPOT_CMD 0x53 )
*
* @id_and_color: ID and color of the MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
* @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the
* event_unique_id should be the id of the time event assigned by ucode.
* Otherwise ignore the event_unique_id.
* @sta_id_and_color: station id and color, resumed during "Remain On Channel"
* activity.
* @channel_info: channel info
* @node_addr: Our MAC Address
* @reserved: reserved for alignment
* @apply_time: GP2 value to start (should always be the current GP2 value)
* @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
* time by which start of the event is allowed to be postponed.
* @duration: event duration in TU To calculate event duration:
* timeEventDuration = min(duration, remainingQuota)
*/
struct iwl_hs20_roc_req {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
__le32 id_and_color;
__le32 action;
__le32 event_unique_id;
__le32 sta_id_and_color;
struct iwl_fw_channel_info channel_info;
u8 node_addr[ETH_ALEN];
__le16 reserved;
__le32 apply_time;
__le32 apply_time_max_delay;
__le32 duration;
} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
/*
* values for AUX ROC result values
*/
enum iwl_mvm_hot_spot {
HOT_SPOT_RSP_STATUS_OK,
HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
HOT_SPOT_MAX_NUM_OF_SESSIONS,
};
/*
* Aux ROC command response
*
* In response to iwl_hs20_roc_req the FW sends this command to notify the
* driver the uid of the timevent.
*
* ( HOT_SPOT_CMD 0x53 )
*
* @event_unique_id: Unique ID of time event assigned by ucode
* @status: Return status 0 is success, all the rest used for specific errors
*/
struct iwl_hs20_roc_res {
__le32 event_unique_id;
__le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
#endif /* __iwl_fw_api_time_event_h__ */

Просмотреть файл

@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -60,8 +55,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __fw_api_tof_h__
#define __fw_api_tof_h__
#ifndef __iwl_fw_api_tof_h__
#define __iwl_fw_api_tof_h__
/* ToF sub-group command IDs */
enum iwl_mvm_tof_sub_grp_ids {

Просмотреть файл

@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@ -62,8 +57,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_api_tx_h__
#define __fw_api_tx_h__
#ifndef __iwl_fw_api_tx_h__
#define __iwl_fw_api_tx_h__
/**
* enum iwl_tx_flags - bitmasks for tx_flags in TX command
@ -771,7 +766,8 @@ struct iwl_mac_beacon_cmd_v6 {
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
/**
* struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA
* struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
* @tx: the tx commands associated with the beacon frame
* @template_id: currently equal to the mac context id of the coresponding
* mac.
* @tim_idx: the offset of the tim IE in the beacon
@ -780,23 +776,14 @@ struct iwl_mac_beacon_cmd_v6 {
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_data {
struct iwl_mac_beacon_cmd_v7 {
struct iwl_tx_cmd tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
};
/**
* struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
* @tx: the tx commands associated with the beacon frame
* @data: see &iwl_mac_beacon_cmd_data
*/
struct iwl_mac_beacon_cmd_v7 {
struct iwl_tx_cmd tx;
struct iwl_mac_beacon_cmd_data data;
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
/**
@ -804,13 +791,24 @@ struct iwl_mac_beacon_cmd_v7 {
* @byte_cnt: byte count of the beacon frame
* @flags: for future use
* @reserved: reserved
* @data: see &iwl_mac_beacon_cmd_data
* @template_id: currently equal to the mac context id of the coresponding
* mac.
* @tim_idx: the offset of the tim IE in the beacon
* @tim_size: the length of the tim IE
* @ecsa_offset: offset to the ECSA IE if present
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd {
__le16 byte_cnt;
__le16 flags;
__le64 reserved;
struct iwl_mac_beacon_cmd_data data;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */
struct iwl_beacon_notif {
@ -914,4 +912,4 @@ struct iwl_scd_txq_cfg_rsp {
u8 scd_queue;
} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
#endif /* __fw_api_tx_h__ */
#endif /* __iwl_fw_api_tx_h__ */

Просмотреть файл

@ -0,0 +1,163 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_txq_h__
#define __iwl_fw_api_txq_h__
/*
* DQA queue numbers
*
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
* @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
* @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
* that we are never left without the possibility to connect to an AP.
* @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
* Each MGMT queue is mapped to a single STA
* MGMT frames are frames that return true on ieee80211_is_mgmt()
* @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
* @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
* responses
* @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
* DATA frames are intended for !ieee80211_is_mgmt() frames, but if
* the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
* as well
* @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
*/
enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_CMD_QUEUE = 0,
IWL_MVM_DQA_AUX_QUEUE = 1,
IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
};
enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0,
IWL_MVM_TX_FIFO_BE,
IWL_MVM_TX_FIFO_VI,
IWL_MVM_TX_FIFO_VO,
IWL_MVM_TX_FIFO_MCAST = 5,
IWL_MVM_TX_FIFO_CMD = 7,
};
enum iwl_gen2_tx_fifo {
IWL_GEN2_TX_FIFO_CMD = 0,
IWL_GEN2_EDCA_TX_FIFO_BK,
IWL_GEN2_EDCA_TX_FIFO_BE,
IWL_GEN2_EDCA_TX_FIFO_VI,
IWL_GEN2_EDCA_TX_FIFO_VO,
IWL_GEN2_TRIG_TX_FIFO_BK,
IWL_GEN2_TRIG_TX_FIFO_BE,
IWL_GEN2_TRIG_TX_FIFO_VI,
IWL_GEN2_TRIG_TX_FIFO_VO,
};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
* @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
*/
enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
* @tid: tid of the queue
* @flags: see &enum iwl_tx_queue_cfg_actions
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
* @byte_cnt_addr: address of byte count table
* @tfdq_addr: address of TFD circular buffer
*/
struct iwl_tx_queue_cfg_cmd {
u8 sta_id;
u8 tid;
__le16 flags;
__le32 cb_size;
__le64 byte_cnt_addr;
__le64 tfdq_addr;
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
/**
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
* @queue_number: queue number assigned to this RA -TID
* @flags: set on failure
* @write_pointer: initial value for write pointer
* @reserved: reserved
*/
struct iwl_tx_queue_cfg_rsp {
__le16 queue_number;
__le16 flags;
__le16 write_pointer;
__le16 reserved;
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
#endif /* __iwl_fw_api_txq_h__ */

Просмотреть файл

@ -0,0 +1,88 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-drv.h"
#include "runtime.h"
#include "fw/api/commands.h"
#include "fw/api/alive.h"
static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
__le32_to_cpu(fseq->aux_read_fseq_ver),
__le32_to_cpu(fseq->wifi_fseq_ver));
}
void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
switch (cmd) {
case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
break;
default:
break;
}
}
IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);

Просмотреть файл

@ -63,22 +63,37 @@
*
*****************************************************************************/
#include <linux/devcoredump.h>
#include "fw-dbg.h"
#include "iwl-drv.h"
#include "runtime.h"
#include "dbg.h"
#include "iwl-io.h"
#include "mvm.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
/**
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
*
* @fwrt_ptr: pointer to the buffer coming from fwrt
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
* @trans_len: length of the valid data in trans_ptr
* @fwrt_len: length of the valid data in fwrt_ptr
*/
struct iwl_fw_dump_ptrs {
struct iwl_trans_dump_data *trans_ptr;
void *fwrt_ptr;
u32 fwrt_len;
};
#define RADIO_REG_MAX_READ 0x2ad
static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data)
{
u8 *pos = (void *)(*dump_data)->data;
unsigned long flags;
int i;
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
@ -88,20 +103,20 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
u32 rd_cmd = RADIO_RSP_RD_CMD;
rd_cmd |= i << RADIO_RSP_ADDR_POS;
iwl_write_prph_no_grab(mvm->trans, RSP_RADIO_CMD, rd_cmd);
*pos = (u8)iwl_read_prph_no_grab(mvm->trans, RSP_RADIO_RDDAT);
iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
*pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
pos++;
}
*dump_data = iwl_fw_error_next_data(*dump_data);
iwl_trans_release_nic_access(mvm->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans, &flags);
}
static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data;
@ -122,41 +137,41 @@ static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
RXF_RD_D_SPACE + offset));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
RXF_RD_WR_PTR + offset));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
RXF_RD_RD_PTR + offset));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
RXF_RD_FENCE_PTR + offset));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
RXF_SET_FENCE_MODE + offset));
/* Lock fence */
iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
/* Set fence pointer to the same place like WR pointer */
iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
/* Set fence offset */
iwl_trans_write_prph(mvm->trans,
iwl_trans_write_prph(fwrt->trans,
RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (i = 0; i < fifo_len; i++)
fifo_data[i] = iwl_trans_read_prph(mvm->trans,
fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
RXF_FIFO_RD_FENCE_INC +
offset);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data;
@ -177,91 +192,91 @@ static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_FIFO_ITEM_CNT + offset));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_WR_PTR + offset));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_RD_PTR + offset));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_FENCE_PTR + offset));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_LOCK_FENCE + offset));
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
TXF_WR_PTR + offset);
/* Dummy-read to advance the read pointer to the head */
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (i = 0; i < fifo_len; i++)
fifo_data[i] = iwl_trans_read_prph(mvm->trans,
fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
TXF_READ_MODIFY_DATA +
offset);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
int i, j;
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
/* Pull RXF1 */
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
/* Pull RXF2 */
iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
RXF_DIFF_FROM_PREV, 1);
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
RXF_DIFF_FROM_PREV, 1);
/* Pull LMAC2 RXF1 */
if (mvm->smem_cfg.num_lmacs > 1)
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
LMAC2_PRPH_OFFSET, 2);
if (fwrt->smem_cfg.num_lmacs > 1)
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
LMAC2_PRPH_OFFSET, 2);
/* Pull TXF data from LMAC1 */
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
0, i);
iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
0, i);
}
/* Pull TXF data from LMAC2 */
if (mvm->smem_cfg.num_lmacs > 1) {
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
if (fwrt->smem_cfg.num_lmacs > 1) {
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans,
iwl_trans_write_prph(fwrt->trans,
TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
i);
iwl_mvm_dump_txf(mvm, dump_data,
cfg->lmac[1].txfifo_size[i],
LMAC2_PRPH_OFFSET,
i + cfg->num_txfifo_entries);
iwl_fwrt_dump_txf(fwrt, dump_data,
cfg->lmac[1].txfifo_size[i],
LMAC2_PRPH_OFFSET,
i + cfg->num_txfifo_entries);
}
}
if (fw_has_capa(&mvm->fw->ucode_capa,
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
i++) {
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
@ -276,52 +291,45 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
fifo_hdr->fifo_num = cpu_to_le32(i);
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
mvm->smem_cfg.num_txfifo_entries);
iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
fwrt->smem_cfg.num_txfifo_entries);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_FIFO_ITEM_CNT));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_WR_PTR));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_RD_PTR));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_FENCE_PTR));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_LOCK_FENCE));
/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
iwl_trans_write_prph(mvm->trans,
iwl_trans_write_prph(fwrt->trans,
TXF_CPU2_READ_MODIFY_ADDR,
TXF_CPU2_WR_PTR);
/* Dummy-read to advance the read pointer to head */
iwl_trans_read_prph(mvm->trans,
iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_READ_MODIFY_DATA);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++)
fifo_data[j] =
iwl_trans_read_prph(mvm->trans,
iwl_trans_read_prph(fwrt->trans,
TXF_CPU2_READ_MODIFY_DATA);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
}
iwl_trans_release_nic_access(mvm->trans, &flags);
}
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
{
if (mvm->fw_dump_desc != &iwl_mvm_dump_desc_assert)
kfree(mvm->fw_dump_desc);
mvm->fw_dump_desc = NULL;
iwl_trans_release_nic_access(fwrt->trans, &flags);
}
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
@ -531,37 +539,32 @@ static struct scatterlist *alloc_sgtable(int size)
return table;
}
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump;
struct iwl_fw_dump_ptrs *fw_error_dump;
struct scatterlist *sg_dump_data;
u32 sram_len, sram_ofs;
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv;
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len;
u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len;
u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
if (!IWL_MVM_COLLECT_FW_ERR_DUMP &&
!mvm->trans->dbg_dest_tlv)
return;
lockdep_assert_held(&mvm->mutex);
/* there's no point in fw dump if the bus is dead */
if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
goto out;
}
if (mvm->fw_dump_trig &&
mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
if (fwrt->dump.trig &&
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true;
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
@ -569,20 +572,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
goto out;
/* SRAM - include stack CCM if driver knows the values for it */
if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
img = &mvm->fw->img[mvm->cur_ucode];
img = &fwrt->fw->img[fwrt->cur_fw_img];
sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
} else {
sram_ofs = mvm->cfg->dccm_offset;
sram_len = mvm->cfg->dccm_len;
sram_ofs = fwrt->trans->cfg->dccm_offset;
sram_len = fwrt->trans->cfg->dccm_len;
}
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
fifo_data_len = 0;
@ -621,7 +624,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
}
if (fw_has_capa(&mvm->fw->ucode_capa,
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@ -638,7 +641,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Make room for PRPH registers */
if (!mvm->trans->cfg->gen2) {
if (!fwrt->trans->cfg->gen2) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) {
/* The range includes both boundaries */
@ -652,7 +655,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
}
if (!mvm->trans->cfg->gen2 && mvm->cfg->mq_rx_supported) {
if (!fwrt->trans->cfg->gen2 &&
fwrt->trans->cfg->mq_rx_supported) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
@ -666,7 +670,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
}
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
@ -686,16 +690,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
/* Make room for MEM segments */
for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
le32_to_cpu(fw_dbg_mem[i].len);
}
/* Make room for fw's virtual image pages, if it exists */
if (!mvm->trans->cfg->gen2 &&
mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
mvm->fw_paging_db[0].fw_paging_block)
file_len += mvm->num_of_paging_blk *
if (!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk *
(sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_paging) +
PAGING_BLOCK_SIZE);
@ -706,11 +710,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(*dump_info);
}
if (mvm->fw_dump_desc)
if (fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
fwrt->dump.desc->len;
if (!mvm->fw->n_dbg_mem_tlv)
if (!fwrt->fw->n_dbg_mem_tlv)
file_len += sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len);
@ -719,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
goto out;
}
fw_error_dump->op_mode_ptr = dump_file;
fw_error_dump->fwrt_ptr = dump_file;
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
@ -728,32 +732,32 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
dump_info->device_family =
mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
strncpy(dump_info->dev_human_readable, mvm->cfg->name,
strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
sizeof(dump_info->dev_human_readable));
strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable));
dump_data = iwl_fw_error_next_data(dump_data);
/* We only dump the FIFOs if the FW is in error state */
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
iwl_mvm_dump_fifos(mvm, &dump_data);
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
iwl_fw_dump_fifos(fwrt, &dump_data);
if (radio_len)
iwl_mvm_read_radio_reg(mvm, &dump_data);
iwl_read_radio_regs(fwrt, &dump_data);
}
if (mvm->fw_dump_desc) {
if (fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
mvm->fw_dump_desc->len);
fwrt->dump.desc->len);
dump_trig = (void *)dump_data->data;
memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
sizeof(*dump_trig) + mvm->fw_dump_desc->len);
memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
sizeof(*dump_trig) + fwrt->dump.desc->len);
dump_data = iwl_fw_error_next_data(dump_data);
}
@ -762,18 +766,18 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (monitor_dump_only)
goto dump_trans_data;
if (!mvm->fw->n_dbg_mem_tlv) {
if (!fwrt->fw->n_dbg_mem_tlv) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
dump_mem->offset = cpu_to_le32(sram_ofs);
iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
sram_len);
dump_data = iwl_fw_error_next_data(dump_data);
}
for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
u32 len = le32_to_cpu(fw_dbg_mem[i].len);
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
bool success;
@ -786,13 +790,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
iwl_trans_read_mem_bytes(mvm->trans, ofs,
iwl_trans_read_mem_bytes(fwrt->trans, ofs,
dump_mem->data,
len);
success = true;
break;
case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
success = iwl_read_prph_block(mvm->trans, ofs, len,
success = iwl_read_prph_block(fwrt->trans, ofs, len,
(void *)dump_mem->data);
break;
default:
@ -813,8 +817,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
iwl_trans_read_mem_bytes(fwrt->trans,
fwrt->trans->cfg->smem_offset,
dump_mem->data, smem_len);
dump_data = iwl_fw_error_next_data(dump_data);
}
@ -824,28 +829,29 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(fwrt->trans,
fwrt->trans->cfg->dccm2_offset,
dump_mem->data, sram2_len);
dump_data = iwl_fw_error_next_data(dump_data);
}
/* Dump fw's virtual image */
if (!mvm->trans->cfg->gen2 &&
mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
mvm->fw_paging_db[0].fw_paging_block) {
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
if (!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
mvm->fw_paging_db[i].fw_paging_block;
dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys;
fwrt->fw_paging_db[i].fw_paging_block;
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
paging = (void *)dump_data->data;
paging->index = cpu_to_le32(i);
dma_sync_single_for_cpu(mvm->trans->dev, addr,
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
PAGING_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
memcpy(paging->data, page_address(pages),
@ -855,20 +861,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
if (prph_len) {
iwl_dump_prph(mvm->trans, &dump_data,
iwl_dump_prph(fwrt->trans, &dump_data,
iwl_prph_dump_addr_comm,
ARRAY_SIZE(iwl_prph_dump_addr_comm));
if (mvm->cfg->mq_rx_supported)
iwl_dump_prph(mvm->trans, &dump_data,
if (fwrt->trans->cfg->mq_rx_supported)
iwl_dump_prph(fwrt->trans, &dump_data,
iwl_prph_dump_addr_9000,
ARRAY_SIZE(iwl_prph_dump_addr_9000));
}
dump_trans_data:
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
mvm->fw_dump_trig);
fw_error_dump->op_mode_len = file_len;
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
fwrt->dump.trig);
fw_error_dump->fwrt_len = file_len;
if (fw_error_dump->trans_ptr)
file_len += fw_error_dump->trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
@ -877,68 +883,72 @@ dump_trans_data:
if (sg_dump_data) {
sg_pcopy_from_buffer(sg_dump_data,
sg_nents(sg_dump_data),
fw_error_dump->op_mode_ptr,
fw_error_dump->op_mode_len, 0);
fw_error_dump->fwrt_ptr,
fw_error_dump->fwrt_len, 0);
if (fw_error_dump->trans_ptr)
sg_pcopy_from_buffer(sg_dump_data,
sg_nents(sg_dump_data),
fw_error_dump->trans_ptr->data,
fw_error_dump->trans_ptr->len,
fw_error_dump->op_mode_len);
dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
fw_error_dump->fwrt_len);
dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
GFP_KERNEL);
}
vfree(fw_error_dump->op_mode_ptr);
vfree(fw_error_dump->fwrt_ptr);
vfree(fw_error_dump->trans_ptr);
kfree(fw_error_dump);
out:
iwl_mvm_free_fw_dump_desc(mvm);
mvm->fw_dump_trig = NULL;
clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
iwl_fw_free_dump_desc(fwrt);
fwrt->dump.trig = NULL;
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
}
IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
.trig_desc = {
.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
},
};
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
const struct iwl_mvm_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger)
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger)
{
unsigned int delay = 0;
if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
if (WARN(mvm->trans->state == IWL_TRANS_NO_FW,
if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
"Can't collect dbg data when FW isn't alive\n"))
return -EIO;
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return -EBUSY;
if (WARN_ON(mvm->fw_dump_desc))
iwl_mvm_free_fw_dump_desc(mvm);
if (WARN_ON(fwrt->dump.desc))
iwl_fw_free_dump_desc(fwrt);
IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
mvm->fw_dump_desc = desc;
mvm->fw_dump_trig = trigger;
fwrt->dump.desc = desc;
fwrt->dump.trig = trigger;
schedule_delayed_work(&mvm->fw_dump_wk, delay);
schedule_delayed_work(&fwrt->dump.wk, delay);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger)
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_mvm_dump_desc *desc;
struct iwl_fw_dump_desc *desc;
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
if (!desc)
@ -948,12 +958,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
@ -978,8 +989,8 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
len = strlen(buf) + 1;
}
ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
trigger);
ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
trigger);
if (ret)
return ret;
@ -987,37 +998,42 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
trigger->occurrences = cpu_to_le16(occurrences - 1);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
{
u8 *ptr;
int ret;
int i;
if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
"Invalid configuration %d\n", conf_id))
return -EINVAL;
/* EARLY START - firmware's configuration is hard coded */
if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
!mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
!fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
conf_id == FW_DBG_START_FROM_ALIVE)
return 0;
if (!mvm->fw->dbg_conf_tlv[conf_id])
if (!fwrt->fw->dbg_conf_tlv[conf_id])
return -EINVAL;
if (mvm->fw_dbg_conf != FW_DBG_INVALID)
IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
mvm->fw_dbg_conf);
if (fwrt->dump.conf != FW_DBG_INVALID)
IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
fwrt->dump.conf);
/* Send all HCMDs for configuring the FW debug */
ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
struct iwl_host_cmd hcmd = {
.id = cmd->id,
.len = { le16_to_cpu(cmd->len), },
.data = { cmd->data, },
};
ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
le16_to_cpu(cmd->len), cmd->data);
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
if (ret)
return ret;
@ -1025,7 +1041,59 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
ptr += le16_to_cpu(cmd->len);
}
mvm->fw_dbg_conf = conf_id;
fwrt->dump.conf = conf_id;
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
void iwl_fw_error_dump_wk(struct work_struct *work)
{
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, dump.wk.work);
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/* stop recording */
iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
iwl_fw_error_dump(fwrt);
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
fwrt->fw->dbg_dest_tlv) {
iwl_clear_bits_prph(fwrt->trans,
MON_BUFF_SAMPLE_CTL, 0x100);
iwl_clear_bits_prph(fwrt->trans,
MON_BUFF_SAMPLE_CTL, 0x1);
iwl_set_bits_prph(fwrt->trans,
MON_BUFF_SAMPLE_CTL, 0x1);
}
} else {
u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
/* stop recording */
iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
udelay(100);
iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
/* wait before we collect the data till the DBGC stop */
udelay(500);
iwl_fw_error_dump(fwrt);
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
fwrt->fw->dbg_dest_tlv) {
iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
}
}
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}

Просмотреть файл

@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -63,24 +63,46 @@
*
*****************************************************************************/
#ifndef __mvm_fw_dbg_h__
#define __mvm_fw_dbg_h__
#include "fw/file.h"
#include "fw/error-dump.h"
#include "mvm.h"
#ifndef __iwl_fw_dbg_h__
#define __iwl_fw_dbg_h__
#include <linux/workqueue.h>
#include <net/cfg80211.h>
#include "runtime.h"
#include "file.h"
#include "error-dump.h"
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
const struct iwl_mvm_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
/**
* struct iwl_fw_dump_desc - describes the dump
* @len: length of trig_desc->data
* @trig_desc: the description of the dump
*/
struct iwl_fw_dump_desc {
size_t len;
/* must be last */
struct iwl_fw_error_dump_trigger_desc trig_desc;
};
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
{
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
@ -101,25 +123,25 @@ _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
static inline bool
iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
struct ieee80211_vif *vif)
struct wireless_dev *wdev)
{
u32 trig_vif = le32_to_cpu(trig->vif_type);
return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
ieee80211_vif_type_p2p(vif) == trig_vif;
wdev->iftype == trig_vif;
}
static inline bool
iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trig)
{
return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
(mvm->fw_dbg_conf == FW_DBG_INVALID ||
(BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
(fwrt->dump.conf == FW_DBG_INVALID ||
(BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids))));
}
static inline bool
iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm,
iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trig)
{
unsigned long wind_jiff =
@ -127,49 +149,66 @@ iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(trig->id);
/* If this is the first event checked, jump to update start ts */
if (mvm->fw_dbg_non_collect_ts_start[id] &&
(time_after(mvm->fw_dbg_non_collect_ts_start[id] + wind_jiff,
if (fwrt->dump.non_collect_ts_start[id] &&
(time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff,
jiffies)))
return true;
mvm->fw_dbg_non_collect_ts_start[id] = jiffies;
fwrt->dump.non_collect_ts_start[id] = jiffies;
return false;
}
static inline bool
iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
struct iwl_fw_dbg_trigger_tlv *trig)
{
if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
return false;
if (iwl_fw_dbg_no_trig_window(mvm, trig)) {
IWL_WARN(mvm, "Trigger %d occurred while no-collect window.\n",
if (iwl_fw_dbg_no_trig_window(fwrt, trig)) {
IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
trig->id);
return false;
}
return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
}
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
struct iwl_fw_dbg_trigger_tlv *trigger)
{
if (!trigger)
return;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
iwl_fw_dbg_collect_trig(fwrt, trigger, NULL);
}
#define iwl_fw_dbg_trigger_simple_stop(mvm, vif, trig) \
_iwl_fw_dbg_trigger_simple_stop((mvm), (vif), \
iwl_fw_dbg_get_trigger((mvm)->fw,\
#define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \
_iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
#endif /* __mvm_fw_dbg_h__ */
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
{
fwrt->dump.conf = FW_DBG_INVALID;
}
void iwl_fw_error_dump_wk(struct work_struct *work);
static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
{
flush_delayed_work(&fwrt->dump.wk);
}
static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
{
cancel_delayed_work_sync(&fwrt->dump.wk);
}
#endif /* __iwl_fw_dbg_h__ */

Просмотреть файл

@ -0,0 +1,75 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-drv.h"
#include "runtime.h"
#include "dbg.h"
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx)
{
memset(fwrt, 0, sizeof(*fwrt));
fwrt->trans = trans;
fwrt->fw = fw;
fwrt->dev = trans->dev;
fwrt->dump.conf = FW_DBG_INVALID;
fwrt->ops = ops;
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);

Просмотреть файл

@ -0,0 +1,414 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-drv.h"
#include "runtime.h"
#include "fw/api/commands.h"
void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt)
{
int i;
if (!fwrt->fw_paging_db[0].fw_paging_block)
return;
for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i];
if (!paging->fw_paging_block) {
IWL_DEBUG_FW(fwrt,
"Paging: block %d already freed, continue to next page\n",
i);
continue;
}
dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys,
paging->fw_paging_size, DMA_BIDIRECTIONAL);
__free_pages(paging->fw_paging_block,
get_order(paging->fw_paging_size));
paging->fw_paging_block = NULL;
}
kfree(fwrt->trans->paging_download_buf);
fwrt->trans->paging_download_buf = NULL;
fwrt->trans->paging_db = NULL;
memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
}
IWL_EXPORT_SYMBOL(iwl_free_fw_paging);
static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
const struct fw_img *image)
{
struct page *block;
dma_addr_t phys = 0;
int blk_idx, order, num_of_pages, size, dma_enabled;
if (fwrt->fw_paging_db[0].fw_paging_block)
return 0;
dma_enabled = is_device_dma_capable(fwrt->trans->dev);
/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
fwrt->num_of_paging_blk =
DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
fwrt->num_of_pages_in_last_blk =
num_of_pages -
NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1);
IWL_DEBUG_FW(fwrt,
"Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
fwrt->num_of_paging_blk,
fwrt->num_of_pages_in_last_blk);
/*
* Allocate CSS and paging blocks in dram.
*/
for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
/* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
order = get_order(size);
block = alloc_pages(GFP_KERNEL, order);
if (!block) {
/* free all the previous pages since we failed */
iwl_free_fw_paging(fwrt);
return -ENOMEM;
}
fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
if (dma_enabled) {
phys = dma_map_page(fwrt->trans->dev, block, 0,
PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(fwrt->trans->dev, phys)) {
/*
* free the previous pages and the current one
* since we failed to map_page.
*/
iwl_free_fw_paging(fwrt);
return -ENOMEM;
}
fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
} else {
fwrt->fw_paging_db[blk_idx].fw_paging_phys =
PAGING_ADDR_SIG |
blk_idx << BLOCK_2_EXP_SIZE;
}
if (!blk_idx)
IWL_DEBUG_FW(fwrt,
"Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
order);
else
IWL_DEBUG_FW(fwrt,
"Paging: allocated 32K bytes (order %d) for firmware paging.\n",
order);
}
return 0;
}
static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
const struct fw_img *image)
{
int sec_idx, idx;
u32 offset = 0;
/*
* find where is the paging image start point:
* if CPU2 exist and it's in paging format, then the image looks like:
* CPU1 sections (2 or more)
* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
* CPU2 sections (not paged)
* PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
* non paged to CPU2 paging sec
* CPU2 paging CSS
* CPU2 paging image (including instruction and data)
*/
for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
sec_idx++;
break;
}
}
/*
* If paging is enabled there should be at least 2 more sections left
* (one for CSS and one for Paging data)
*/
if (sec_idx >= image->num_sec - 1) {
IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(fwrt);
return -EINVAL;
}
/* copy the CSS block to the dram */
IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
sec_idx);
memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
fwrt->fw_paging_db[0].fw_paging_size);
dma_sync_single_for_device(fwrt->trans->dev,
fwrt->fw_paging_db[0].fw_paging_phys,
fwrt->fw_paging_db[0].fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(fwrt,
"Paging: copied %d CSS bytes to first block\n",
fwrt->fw_paging_db[0].fw_paging_size);
sec_idx++;
/*
* copy the paging blocks to the dram
* loop index start from 1 since that CSS block already copied to dram
* and CSS index is 0.
* loop stop at num_of_paging_blk since that last block is not full.
*/
for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
block->fw_paging_size);
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(fwrt,
"Paging: copied %d paging bytes to block %d\n",
fwrt->fw_paging_db[idx].fw_paging_size,
idx);
offset += fwrt->fw_paging_db[idx].fw_paging_size;
}
/* copy the last paging block */
if (fwrt->num_of_pages_in_last_blk > 0) {
struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(fwrt,
"Paging: copied %d pages in the last block %d\n",
fwrt->num_of_pages_in_last_blk, idx);
}
return 0;
}
static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
const struct fw_img *fw)
{
int ret;
ret = iwl_alloc_fw_paging_mem(fwrt, fw);
if (ret)
return ret;
return iwl_fill_paging_mem(fwrt, fw);
}
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
const struct fw_img *fw)
{
struct iwl_fw_paging_cmd paging_cmd = {
.flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED |
(fwrt->num_of_pages_in_last_blk <<
PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.len = { sizeof(paging_cmd), },
.data = { &paging_cmd, },
};
int blk_idx;
/* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
__le32 phy_addr;
addr = addr >> PAGE_2_EXP_SIZE;
phy_addr = cpu_to_le32(addr);
paging_cmd.device_phy_addr[blk_idx] = phy_addr;
}
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
/*
* Send paging item cmd to FW in case CPU2 has paging image
*/
static int iwl_trans_get_paging_item(struct iwl_fw_runtime *fwrt)
{
int ret;
struct iwl_fw_get_item_cmd fw_get_item_cmd = {
.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
};
struct iwl_fw_get_item_resp *item_resp;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &fw_get_item_cmd, },
.len = { sizeof(fw_get_item_cmd), },
};
ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
if (ret) {
IWL_ERR(fwrt,
"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
ret);
return ret;
}
item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
IWL_ERR(fwrt,
"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
le32_to_cpu(item_resp->item_id));
ret = -EIO;
goto exit;
}
/* Add an extra page for headers */
fwrt->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
FW_PAGING_SIZE,
GFP_KERNEL);
if (!fwrt->trans->paging_download_buf) {
ret = -ENOMEM;
goto exit;
}
fwrt->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
fwrt->trans->paging_db = fwrt->fw_paging_db;
IWL_DEBUG_FW(fwrt,
"Paging: got paging request address (paging_req_addr 0x%08x)\n",
fwrt->trans->paging_req_addr);
exit:
iwl_free_resp(&cmd);
return ret;
}
int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
{
const struct fw_img *fw = &fwrt->fw->img[type];
int ret;
if (fwrt->trans->cfg->gen2)
return 0;
/*
* Configure and operate fw paging mechanism.
* The driver configures the paging flow only once.
* The CPU2 paging image is included in the IWL_UCODE_INIT image.
*/
if (!fw->paging_mem_size)
return 0;
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(fwrt->trans->dev)) {
ret = iwl_trans_get_paging_item(fwrt);
if (ret) {
IWL_ERR(fwrt, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(fwrt, fw);
if (ret) {
IWL_ERR(fwrt, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(fwrt, fw);
if (ret) {
IWL_ERR(fwrt, "failed to send the paging cmd\n");
iwl_free_fw_paging(fwrt);
return ret;
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_init_paging);

Просмотреть файл

@ -0,0 +1,156 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
#include "iwl-config.h"
#include "iwl-trans.h"
#include "img.h"
#include "fw/api/debug.h"
#include "fw/api/paging.h"
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
};
#define MAX_NUM_LMAC 2
struct iwl_fwrt_shared_mem_cfg {
int num_lmacs;
int num_txfifo_entries;
struct {
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo1_size;
} lmac[MAX_NUM_LMAC];
u32 rxfifo2_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
enum iwl_fw_runtime_status {
IWL_FWRT_STATUS_DUMPING = 0,
};
/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
* @dev: device pointer
* @ops: user ops
* @ops_ctx: user ops context
* @status: status flags
* @fw_paging_db: paging database
* @num_of_paging_blk: number of paging blocks
* @num_of_pages_in_last_blk: number of pages in the last block
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
const struct iwl_fw *fw;
struct device *dev;
const struct iwl_fw_runtime_ops *ops;
void *ops_ctx;
unsigned long status;
/* Paging */
struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
u16 num_of_paging_blk;
u16 num_of_pages_in_last_blk;
enum iwl_ucode_type cur_fw_img;
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
const struct iwl_fw_dbg_trigger_tlv *trig;
struct delayed_work wk;
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
} dump;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx);
static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
enum iwl_ucode_type cur_fw_img)
{
fwrt->cur_fw_img = cur_fw_img;
}
int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type);
void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb);
#endif /* __iwl_fw_runtime_h__ */

Просмотреть файл

@ -0,0 +1,152 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-drv.h"
#include "runtime.h"
#include "fw/api/commands.h"
static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i, lmac;
int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
return;
fwrt->smem_cfg.num_lmacs = lmac_num;
fwrt->smem_cfg.num_txfifo_entries =
ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
for (lmac = 0; lmac < lmac_num; lmac++) {
struct iwl_shared_mem_lmac_cfg *lmac_cfg =
&mem_cfg->lmac_smem[lmac];
for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
fwrt->smem_cfg.lmac[lmac].txfifo_size[i] =
le32_to_cpu(lmac_cfg->txfifo_size[i]);
fwrt->smem_cfg.lmac[lmac].rxfifo1_size =
le32_to_cpu(lmac_cfg->rxfifo1_size);
}
}
static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
int i;
fwrt->smem_cfg.num_lmacs = 1;
fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
fwrt->smem_cfg.lmac[0].txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
fwrt->smem_cfg.lmac[0].rxfifo1_size =
le32_to_cpu(mem_cfg->rxfifo_size[0]);
fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
/* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
for (i = 0;
i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
i++)
fwrt->smem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
}
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
{
struct iwl_host_cmd cmd = {
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
struct iwl_rx_packet *pkt;
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
else
cmd.id = SHARED_MEM_CFG;
if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
return;
pkt = cmd.resp_pkt;
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
iwl_parse_shared_mem_a000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n");
iwl_free_resp(&cmd);
}
IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf);

Просмотреть файл

@ -76,7 +76,8 @@
#include "iwl-config.h"
#include "fw/img.h"
#include "iwl-op-mode.h"
#include "fw/api.h"
#include "fw/api/cmdhdr.h"
#include "fw/api/txq.h"
/**
* DOC: Transport layer - what is it ?

Просмотреть файл

@ -6,7 +6,7 @@ iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-y += tof.o fw-dbg.o
iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM) += d3.o
ccflags-y += -I$(src)/../

Просмотреть файл

@ -67,7 +67,7 @@
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "fw-api-coex.h"
#include "fw/api/coex.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-debug.h"

Просмотреть файл

@ -111,7 +111,6 @@
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
#define IWL_MVM_HW_CSUM_DISABLE 0
#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
@ -141,5 +140,6 @@
#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
#define IWL_MVM_ENABLE_EBS 1
#endif /* __MVM_CONSTANTS_H */

Просмотреть файл

@ -65,7 +65,7 @@
*
*****************************************************************************/
#include "mvm.h"
#include "fw-api-tof.h"
#include "fw/api/tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,

Просмотреть файл

@ -69,7 +69,6 @@
#include <linux/netdevice.h>
#include "mvm.h"
#include "fw-dbg.h"
#include "sta.h"
#include "iwl-io.h"
#include "debugfs.h"
@ -84,7 +83,7 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
int pos, budget;
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR)
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
mutex_lock(&mvm->mutex);
@ -105,7 +104,7 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
int ret;
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR)
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
mutex_lock(&mvm->mutex);
@ -122,7 +121,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
u32 flush_arg;
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR)
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
if (kstrtou32(buf, 0, &flush_arg))
@ -155,7 +154,7 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
int sta_id, drain, ret;
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR)
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
@ -192,7 +191,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
return -EINVAL;
/* default is to dump the entire data segment */
img = &mvm->fw->img[mvm->cur_ucode];
img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
len = img->sec[IWL_UCODE_SECTION_DATA].len;
@ -224,7 +223,7 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
if (!iwl_mvm_firmware_running(mvm))
return -EINVAL;
img = &mvm->fw->img[mvm->cur_ucode];
img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
@ -1123,7 +1122,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
int pos = 0;
mutex_lock(&mvm->mutex);
conf = mvm->fw_dbg_conf;
conf = mvm->fwrt.dump.conf;
mutex_unlock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
@ -1190,7 +1189,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
return -EINVAL;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id);
ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@ -1211,8 +1210,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (count == 0)
return 0;
iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -78,7 +78,7 @@
#include "iwl-eeprom-parse.h"
#include "mvm.h"
#include "fw-dbg.h"
#include "fw/dbg.h"
#include "iwl-phy-db.h"
#define MVM_UCODE_ALIVE_TIMEOUT HZ
@ -144,134 +144,6 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
return ret;
}
void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
if (!mvm->fw_paging_db[0].fw_paging_block)
return;
for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
if (!paging->fw_paging_block) {
IWL_DEBUG_FW(mvm,
"Paging: block %d already freed, continue to next page\n",
i);
continue;
}
dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
paging->fw_paging_size, DMA_BIDIRECTIONAL);
__free_pages(paging->fw_paging_block,
get_order(paging->fw_paging_size));
paging->fw_paging_block = NULL;
}
kfree(mvm->trans->paging_download_buf);
mvm->trans->paging_download_buf = NULL;
mvm->trans->paging_db = NULL;
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
{
int sec_idx, idx;
u32 offset = 0;
/*
* find where is the paging image start point:
* if CPU2 exist and it's in paging format, then the image looks like:
* CPU1 sections (2 or more)
* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
* CPU2 sections (not paged)
* PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
* non paged to CPU2 paging sec
* CPU2 paging CSS
* CPU2 paging image (including instruction and data)
*/
for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
sec_idx++;
break;
}
}
/*
* If paging is enabled there should be at least 2 more sections left
* (one for CSS and one for Paging data)
*/
if (sec_idx >= image->num_sec - 1) {
IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(mvm);
return -EINVAL;
}
/* copy the CSS block to the dram */
IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
sec_idx);
memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
mvm->fw_paging_db[0].fw_paging_size);
dma_sync_single_for_device(mvm->trans->dev,
mvm->fw_paging_db[0].fw_paging_phys,
mvm->fw_paging_db[0].fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d CSS bytes to first block\n",
mvm->fw_paging_db[0].fw_paging_size);
sec_idx++;
/*
* copy the paging blocks to the dram
* loop index start from 1 since that CSS block already copied to dram
* and CSS index is 0.
* loop stop at num_of_paging_blk since that last block is not full.
*/
for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
block->fw_paging_size);
dma_sync_single_for_device(mvm->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d paging bytes to block %d\n",
mvm->fw_paging_db[idx].fw_paging_size,
idx);
offset += mvm->fw_paging_db[idx].fw_paging_size;
}
/* copy the last paging block */
if (mvm->num_of_pages_in_last_blk > 0) {
struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
dma_sync_single_for_device(mvm->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d pages in the last block %d\n",
mvm->num_of_pages_in_last_blk, idx);
}
return 0;
}
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@ -293,178 +165,6 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
le32_to_cpu(dump_data[i]));
}
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
const struct fw_img *image)
{
struct page *block;
dma_addr_t phys = 0;
int blk_idx, order, num_of_pages, size, dma_enabled;
if (mvm->fw_paging_db[0].fw_paging_block)
return 0;
dma_enabled = is_device_dma_capable(mvm->trans->dev);
/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
mvm->num_of_paging_blk =
DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
mvm->num_of_pages_in_last_blk =
num_of_pages -
NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
IWL_DEBUG_FW(mvm,
"Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
mvm->num_of_paging_blk,
mvm->num_of_pages_in_last_blk);
/*
* Allocate CSS and paging blocks in dram.
*/
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
/* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
order = get_order(size);
block = alloc_pages(GFP_KERNEL, order);
if (!block) {
/* free all the previous pages since we failed */
iwl_free_fw_paging(mvm);
return -ENOMEM;
}
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
mvm->fw_paging_db[blk_idx].fw_paging_size = size;
if (dma_enabled) {
phys = dma_map_page(mvm->trans->dev, block, 0,
PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(mvm->trans->dev, phys)) {
/*
* free the previous pages and the current one
* since we failed to map_page.
*/
iwl_free_fw_paging(mvm);
return -ENOMEM;
}
mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
} else {
mvm->fw_paging_db[blk_idx].fw_paging_phys =
PAGING_ADDR_SIG |
blk_idx << BLOCK_2_EXP_SIZE;
}
if (!blk_idx)
IWL_DEBUG_FW(mvm,
"Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
order);
else
IWL_DEBUG_FW(mvm,
"Paging: allocated 32K bytes (order %d) for firmware paging.\n",
order);
}
return 0;
}
static int iwl_save_fw_paging(struct iwl_mvm *mvm,
const struct fw_img *fw)
{
int ret;
ret = iwl_alloc_fw_paging_mem(mvm, fw);
if (ret)
return ret;
return iwl_fill_paging_mem(mvm, fw);
}
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
struct iwl_fw_paging_cmd paging_cmd = {
.flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED |
(mvm->num_of_pages_in_last_blk <<
PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(mvm->num_of_paging_blk),
};
int blk_idx;
/* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
__le32 phy_addr;
addr = addr >> PAGE_2_EXP_SIZE;
phy_addr = cpu_to_le32(addr);
paging_cmd.device_phy_addr[blk_idx] = phy_addr;
}
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(paging_cmd), &paging_cmd);
}
/*
* Send paging item cmd to FW in case CPU2 has paging image
*/
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
int ret;
struct iwl_fw_get_item_cmd fw_get_item_cmd = {
.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
};
struct iwl_fw_get_item_resp *item_resp;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &fw_get_item_cmd, },
};
cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm,
"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
ret);
return ret;
}
item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
IWL_ERR(mvm,
"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
le32_to_cpu(item_resp->item_id));
ret = -EIO;
goto exit;
}
/* Add an extra page for headers */
mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
FW_PAGING_SIZE,
GFP_KERNEL);
if (!mvm->trans->paging_download_buf) {
ret = -ENOMEM;
goto exit;
}
mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
mvm->trans->paging_db = mvm->fw_paging_db;
IWL_DEBUG_FW(mvm,
"Paging: got paging request address (paging_req_addr 0x%08x)\n",
mvm->trans->paging_req_addr);
exit:
iwl_free_resp(&cmd);
return ret;
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@ -544,48 +244,6 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
{
const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
int ret;
/*
* Configure and operate fw paging mechanism.
* The driver configures the paging flow only once.
* The CPU2 paging image is included in the IWL_UCODE_INIT image.
*/
if (!fw->paging_mem_size)
return 0;
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(mvm->trans->dev)) {
ret = iwl_trans_get_paging_item(mvm);
if (ret) {
IWL_ERR(mvm, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to send the paging cmd\n");
iwl_free_fw_paging(mvm);
return ret;
}
return 0;
}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@ -593,7 +251,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_mvm_alive_data alive_data;
const struct fw_img *fw;
int ret, i;
enum iwl_ucode_type old_type = mvm->cur_ucode;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
@ -606,7 +264,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
fw = iwl_get_ucode_image(mvm->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
@ -615,7 +273,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
if (ret) {
mvm->cur_ucode = old_type;
iwl_fw_set_current_image(&mvm->fwrt, old_type);
iwl_remove_notification(&mvm->notif_wait, &alive_wait);
return ret;
}
@ -639,13 +297,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
iwl_fw_set_current_image(&mvm->fwrt, old_type);
return ret;
}
if (!alive_data.valid) {
IWL_ERR(mvm, "Loaded ucode is not valid!\n");
mvm->cur_ucode = old_type;
iwl_fw_set_current_image(&mvm->fwrt, old_type);
return -EIO;
}
@ -673,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
if (iwl_mvm_is_dqa_supported(mvm))
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
else
mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@ -774,7 +429,7 @@ error:
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
struct iwl_phy_cfg_cmd phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->cur_ucode;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
/* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@ -799,7 +454,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
};
int ret;
if (iwl_mvm_has_new_tx_api(mvm))
if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm, true);
lockdep_assert_held(&mvm->mutex);
@ -910,95 +565,6 @@ out:
return ret;
}
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i, lmac;
int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
return;
mvm->smem_cfg.num_lmacs = lmac_num;
mvm->smem_cfg.num_txfifo_entries =
ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
for (lmac = 0; lmac < lmac_num; lmac++) {
struct iwl_shared_mem_lmac_cfg *lmac_cfg =
&mem_cfg->lmac_smem[lmac];
for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
le32_to_cpu(lmac_cfg->txfifo_size[i]);
mvm->smem_cfg.lmac[lmac].rxfifo1_size =
le32_to_cpu(lmac_cfg->rxfifo1_size);
}
}
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
int i;
mvm->smem_cfg.num_lmacs = 1;
mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->smem_cfg.lmac[0].txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
mvm->smem_cfg.lmac[0].rxfifo1_size =
le32_to_cpu(mem_cfg->rxfifo_size[0]);
mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
/* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
for (i = 0;
i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++)
mvm->smem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
}
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
struct iwl_host_cmd cmd = {
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
struct iwl_rx_packet *pkt;
lockdep_assert_held(&mvm->mutex);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
else
cmd.id = SHARED_MEM_CFG;
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return;
pkt = cmd.resp_pkt;
if (iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_parse_shared_mem_a000(mvm, pkt);
else
iwl_mvm_parse_shared_mem(mvm, pkt);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
iwl_free_resp(&cmd);
}
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{
struct iwl_ltr_config_cmd cmd = {
@ -1048,8 +614,8 @@ static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
union acpi_object *data,
int data_size)
{
union acpi_object *wifi_pkg = NULL;
int i;
union acpi_object *wifi_pkg;
/*
* We need at least two packages, one for the revision and one
@ -1465,7 +1031,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{
int ret;
if (iwl_mvm_has_new_tx_api(mvm))
if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm, false);
ret = iwl_run_init_mvm_ucode(mvm, false);
@ -1495,7 +1061,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret)
return ret;
return iwl_mvm_init_paging(mvm);
return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
}
int iwl_mvm_up(struct iwl_mvm *mvm)
@ -1516,24 +1082,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
iwl_mvm_get_shared_mem_conf(mvm);
iwl_get_shared_mem_conf(&mvm->fwrt);
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
mvm->fw_dbg_conf = FW_DBG_INVALID;
mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */
if (mvm->fw->dbg_dest_tlv)
mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
goto error;
/* Send phy db control command and then phy db calibration*/
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (!iwl_mvm_has_unified_ucode(mvm)) {
/* Send phy db control command and then phy db calibration */
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
@ -1549,7 +1115,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Init RSS configuration */
/* TODO - remove a000 disablement when we have RXQ config API */
if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (iwl_mvm_has_new_rx_api(mvm) &&
mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@ -1567,14 +1134,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
/* Enable DQA-mode if required */
if (iwl_mvm_is_dqa_supported(mvm)) {
ret = iwl_mvm_send_dqa_cmd(mvm);
if (ret)
goto error;
} else {
IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
}
ret = iwl_mvm_send_dqa_cmd(mvm);
if (ret)
goto error;
/* Add auxiliary station for scanning */
ret = iwl_mvm_add_aux_sta(mvm);

Просмотреть файл

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -72,7 +72,6 @@
#include "fw-api.h"
#include "mvm.h"
#include "time-event.h"
#include "fw-dbg.h"
const u8 iwl_mvm_ac_to_tx_fifo[] = {
IWL_MVM_TX_FIFO_VO,
@ -81,6 +80,13 @@ const u8 iwl_mvm_ac_to_tx_fifo[] = {
IWL_MVM_TX_FIFO_BK,
};
const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_EDCA_TX_FIFO_VO,
IWL_GEN2_EDCA_TX_FIFO_VI,
IWL_GEN2_EDCA_TX_FIFO_BE,
IWL_GEN2_EDCA_TX_FIFO_BK,
};
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
@ -235,32 +241,17 @@ static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
}
static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
struct ieee80211_sta *sta)
{
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* Mark the queues used by the sta */
data->used_hw_queues |= mvmsta->tfd_queue_msk;
}
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif)
{
u8 sta_id;
struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue),
BIT(mvm->aux_queue) |
BIT(IWL_MVM_DQA_GCAST_QUEUE),
};
if (iwl_mvm_is_dqa_supported(mvm))
data.used_hw_queues |= BIT(IWL_MVM_DQA_GCAST_QUEUE);
else
data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
@ -268,26 +259,6 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_iface_hw_queues_iter, &data);
/*
* for DQA, the hw_queue in mac80211 is never really used for
* real traffic (only the few queue IDs covered above), so
* we can reuse the real HW queue IDs the stations use
*/
if (iwl_mvm_is_dqa_supported(mvm))
return data.used_hw_queues;
/* don't assign the same hw queues as TDLS stations */
ieee80211_iterate_stations_atomic(mvm->hw,
iwl_mvm_mac_sta_hw_queues_iter,
&data);
/*
* Some TDLS stations may be removed but are in the process of being
* drained. Don't touch their queues.
*/
for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
data.used_hw_queues |= mvm->tfd_drained[sta_id];
return data.used_hw_queues;
}
@ -338,8 +309,7 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
NUM_TSF_IDS);
}
static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_mac_iface_iterator_data data = {
@ -355,6 +325,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
int ret, i, queue_limit;
unsigned long used_hw_queues;
lockdep_assert_held(&mvm->mutex);
/*
* Allocate a MAC ID and a TSF for this MAC, along with the queues
* and other resources.
@ -438,19 +410,14 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
}
if (iwl_mvm_is_dqa_supported(mvm)) {
/*
* queues in mac80211 almost entirely independent of
* the ones here - no real limit
*/
queue_limit = IEEE80211_MAX_QUEUES;
BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]));
} else {
/* need to not use too many in this case */
queue_limit = mvm->first_agg_queue;
}
/*
* queues in mac80211 almost entirely independent of
* the ones here - no real limit
*/
queue_limit = IEEE80211_MAX_QUEUES;
BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]));
/*
* Find available queues, and allocate them to the ACs. When in
@ -472,27 +439,12 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) {
u8 queue;
if (!iwl_mvm_is_dqa_supported(mvm)) {
queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue);
if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate cab queue\n");
ret = -EIO;
goto exit_fail;
}
} else {
queue = IWL_MVM_DQA_GCAST_QUEUE;
}
/*
* For TVQM this will be overwritten later with the FW assigned
* queue value (when queue is enabled).
*/
mvmvif->cab_queue = queue;
vif->cab_queue = queue;
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
} else {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
@ -513,78 +465,6 @@ exit_fail:
return ret;
}
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
u32 ac;
int ret;
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
if (ret)
return ret;
/* If DQA is supported - queues will be enabled when needed */
if (iwl_mvm_is_dqa_supported(mvm))
return 0;
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], 0,
wdg_timeout);
break;
}
return 0;
}
void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
int ac;
lockdep_assert_held(&mvm->mutex);
/*
* If DQA is supported - queues were already disabled, since in
* DQA-mode the queues are a property of the STA and not of the
* vif, and at this point the STA was already deleted
*/
if (iwl_mvm_is_dqa_supported(mvm))
return;
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
IWL_MAX_TID_COUNT, 0);
}
}
static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
enum nl80211_band band,
@ -775,7 +655,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u8 txf = iwl_mvm_ac_to_tx_fifo[i];
u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
cmd->ac[txf].cw_min =
cpu_to_le16(mvmvif->queue_params[i].cw_min);
@ -908,18 +788,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
{
struct iwl_mac_ctx_cmd cmd = {};
u32 tfd_queue_msk = 0;
int ret, i;
int ret;
WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
if (!iwl_mvm_is_dqa_supported(mvm)) {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
tfd_queue_msk |= BIT(vif->hw_queue[i]);
}
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
MAC_FILTER_IN_CONTROL_AND_MGMT |
MAC_FILTER_IN_BEACON |
@ -1049,83 +923,26 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
return ie - beacon;
}
static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon)
static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
struct iwl_tx_cmd *tx)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_host_cmd cmd = {
.id = BEACON_TEMPLATE_CMD,
.flags = CMD_ASYNC,
};
union {
struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
struct iwl_mac_beacon_cmd_v7 beacon_cmd;
} u = {};
struct iwl_mac_beacon_cmd beacon_cmd = {};
struct ieee80211_tx_info *info;
u32 beacon_skb_len;
u32 rate, tx_flags;
if (WARN_ON(!beacon))
return -EINVAL;
beacon_skb_len = beacon->len;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
u32 csa_offset, ecsa_offset;
csa_offset = iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_CHANNEL_SWITCH,
beacon_skb_len);
ecsa_offset =
iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon_skb_len);
if (iwl_mvm_has_new_tx_api(mvm)) {
beacon_cmd.data.template_id =
cpu_to_le32((u32)mvmvif->id);
beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset);
beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len);
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm,
&beacon_cmd.data.tim_idx,
&beacon_cmd.data.tim_size,
beacon->data,
beacon_skb_len);
cmd.len[0] = sizeof(beacon_cmd);
cmd.data[0] = &beacon_cmd;
goto send;
} else {
u.beacon_cmd.data.ecsa_offset =
cpu_to_le32(ecsa_offset);
u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
cmd.len[0] = sizeof(u.beacon_cmd);
cmd.data[0] = &u;
}
} else {
cmd.len[0] = sizeof(u.beacon_cmd_v6);
cmd.data[0] = &u;
}
/* TODO: for now the beacon template id is set to be the mac context id.
* Might be better to handle it as another resource ... */
u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id);
info = IEEE80211_SKB_CB(beacon);
/* Set up TX command fields */
u.beacon_cmd_v6.tx.len = cpu_to_le16((u16)beacon_skb_len);
u.beacon_cmd_v6.tx.sta_id = mvmvif->bcast_sta.sta_id;
u.beacon_cmd_v6.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx->len = cpu_to_le16((u16)beacon->len);
tx->sta_id = mvmvif->bcast_sta.sta_id;
tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
tx_flags |=
iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
TX_CMD_FLG_BT_PRIO_POS;
u.beacon_cmd_v6.tx.tx_flags = cpu_to_le32(tx_flags);
tx->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
@ -1134,7 +951,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
mvm->mgmt_last_antenna_idx);
}
u.beacon_cmd_v6.tx.rate_n_flags =
tx->rate_n_flags =
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
RATE_MCS_ANT_POS);
@ -1142,29 +959,126 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
rate = IWL_FIRST_OFDM_RATE;
} else {
rate = IWL_FIRST_CCK_RATE;
u.beacon_cmd_v6.tx.rate_n_flags |=
cpu_to_le32(RATE_MCS_CCK_MSK);
tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
}
u.beacon_cmd_v6.tx.rate_n_flags |=
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
/* Set up TX beacon command fields */
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx,
&u.beacon_cmd_v6.tim_size,
beacon->data,
beacon_skb_len);
tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
}
send:
/* Submit command */
static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
struct sk_buff *beacon,
void *data, int len)
{
struct iwl_host_cmd cmd = {
.id = BEACON_TEMPLATE_CMD,
.flags = CMD_ASYNC,
};
cmd.len[0] = len;
cmd.data[0] = data;
cmd.dataflags[0] = 0;
cmd.len[1] = beacon_skb_len;
cmd.len[1] = beacon->len;
cmd.data[1] = beacon->data;
cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
return iwl_mvm_send_cmd(mvm, &cmd);
}
static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_beacon_cmd_v6 beacon_cmd = {};
iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
&beacon_cmd.tim_size,
beacon->data, beacon->len);
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_beacon_cmd_v7 beacon_cmd = {};
iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
&beacon_cmd.tim_size,
beacon->data, beacon->len);
beacon_cmd.csa_offset =
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_CHANNEL_SWITCH,
beacon->len));
beacon_cmd.ecsa_offset =
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon->len));
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
static int iwl_mvm_mac_ctxt_send_beacon_v8(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_beacon_cmd beacon_cmd = {};
beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm,
&beacon_cmd.tim_idx,
&beacon_cmd.tim_size,
beacon->data, beacon->len);
beacon_cmd.csa_offset =
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_CHANNEL_SWITCH,
beacon->len));
beacon_cmd.ecsa_offset =
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon->len));
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon)
{
if (WARN_ON(!beacon))
return -EINVAL;
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD))
return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
if (!iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon);
return iwl_mvm_mac_ctxt_send_beacon_v8(mvm, vif, beacon);
}
/* The beacon template for the AP/GO/IBSS has changed and needs update */
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
@ -1559,12 +1473,14 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
/* TODO: implement start trigger */
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif),
trigger))
return;
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
}
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,

Просмотреть файл

@ -87,7 +87,6 @@
#include "fw/error-dump.h"
#include "iwl-prph.h"
#include "iwl-nvm-parse.h"
#include "fw-dbg.h"
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
@ -446,8 +445,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
if (iwl_mvm_has_new_rx_api(mvm))
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
ieee80211_hw_set(hw, AP_LINK_PS);
} else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
/*
* we absolutely need this for the new TX API since that comes
* with many more queues than the current code can deal with
* for station powersave
*/
return -EINVAL;
}
if (mvm->trans->num_rx_queues > 1)
ieee80211_hw_set(hw, USES_RSS);
@ -455,10 +464,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->trans->max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
if (!iwl_mvm_is_dqa_supported(mvm))
hw->queues = mvm->first_agg_queue;
else
hw->queues = IEEE80211_MAX_QUEUES;
hw->queues = IEEE80211_MAX_QUEUES;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
@ -799,7 +805,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
goto drop;
}
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@ -807,9 +813,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
/* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_deauth(hdr->frame_control) &&
!ieee80211_is_disassoc(hdr->frame_control) &&
!ieee80211_is_action(hdr->frame_control)))
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)))
sta = NULL;
if (sta) {
@ -845,11 +849,11 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
return true;
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
do { \
if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
break; \
iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
do { \
if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
break; \
iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \
} while (0)
static void
@ -866,7 +870,8 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
switch (action) {
@ -1029,8 +1034,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
* on D3->D0 transition
*/
if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
iwl_mvm_fw_error_dump(mvm);
mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
iwl_fw_error_dump(&mvm->fwrt);
}
/* cleanup all stale references (scan, roc), but keep the
@ -1059,9 +1064,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@ -1072,7 +1075,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
mvm->fw_dbg_conf = FW_DBG_INVALID;
mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* keep statistics ticking */
iwl_mvm_accu_radio_stats(mvm);
@ -1255,16 +1258,16 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* Lock and clear the firmware running bit here already, so that
* new commands coming in elsewhere, e.g. from debugfs, will not
* be able to proceed. This is important here because one of those
* debugfs files causes the fw_dump_wk to be triggered, and if we
* debugfs files causes the firmware dump to be triggered, and if we
* don't stop debugfs accesses before canceling that it could be
* retriggered after we flush it but before we've cleared the bit.
*/
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
cancel_delayed_work_sync(&mvm->fw_dump_wk);
iwl_fw_cancel_dump(&mvm->fwrt);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
iwl_mvm_free_fw_dump_desc(mvm);
iwl_fw_free_dump_desc(&mvm->fwrt);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
@ -1370,17 +1373,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
}
if (iwl_mvm_is_dqa_supported(mvm)) {
/*
* Only queue for this station is the mcast queue,
* which shouldn't be in TFD mask anyway
*/
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
0, vif->type,
IWL_STA_MULTICAST);
if (ret)
goto out_release;
}
/*
* Only queue for this station is the mcast queue,
* which shouldn't be in TFD mask anyway
*/
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
0, vif->type,
IWL_STA_MULTICAST);
if (ret)
goto out_release;
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
@ -1426,7 +1427,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unref_phy;
ret = iwl_mvm_add_bcast_sta(mvm, vif);
ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
if (ret)
goto out_unbind;
@ -1454,8 +1455,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_release:
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@ -1467,40 +1466,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) {
/*
* mac80211 first removes all the stations of the vif and
* then removes the vif. When it removes a station it also
* flushes the AMPDU session. So by now, all the AMPDU sessions
* of all the stations of this vif are closed, and the queues
* of these AMPDU sessions are properly closed.
* We still need to take care of the shared queues of the vif.
* Flush them here.
* For DQA mode there is no need - broacast and multicast queue
* are flushed separately.
*/
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
mutex_unlock(&mvm->mutex);
/*
* There are transports that buffer a few frames in the host.
* For these, the flush above isn't enough since while we were
* flushing, the transport might have sent more frames to the
* device. To solve this, wait here until the transport is
* empty. Technically, this could have replaced the flush
* above, but flush is much faster than draining. So flush
* first, and drain to make sure we have no frames in the
* transport anymore.
* If a station still had frames on the shared queues, it is
* already marked as draining, so to complete the draining, we
* just need to wait until the transport is empty.
*/
iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
}
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
/*
* Flush the ROC worker which will flush the OFFCHANNEL queue.
@ -1508,14 +1473,6 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* queue are sent in ROC session.
*/
flush_work(&mvm->roc_done_wk);
} else {
/*
* By now, all the AC queues are empty. The AGG queues are
* empty too. We already got all the Tx responses for all the
* packets in the queues. The drain work can have been
* triggered. Flush it.
*/
flush_work(&mvm->sta_drained_wk);
}
}
@ -1556,7 +1513,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
mvm->p2p_device_vif = NULL;
iwl_mvm_rm_bcast_sta(mvm, vif);
iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
iwl_mvm_binding_remove_vif(mvm, vif);
iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
mvmvif->phy_ctxt = NULL;
@ -1569,7 +1526,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
iwl_mvm_mac_ctxt_release(mvm, vif);
mutex_unlock(&mvm->mutex);
}
@ -2405,15 +2361,18 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
unsigned long txqs = 0, tids = 0;
int tid;
/*
* If we have TVQM then we get too high queue numbers - luckily
* we really shouldn't get here with that because such hardware
* should have firmware supporting buffer station offload.
*/
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
if (!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
continue;
@ -2427,9 +2386,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
ieee80211_sta_set_buffered(sta, tid, true);
@ -2572,7 +2528,8 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
tdls_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(tdls_trig->action_bitmap & BIT(action)))
@ -2582,9 +2539,9 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"TDLS event occurred, peer %pM, action %d",
peer_addr, action);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"TDLS event occurred, peer %pM, action %d",
peer_addr, action);
}
static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
@ -2621,9 +2578,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
return -EINVAL;
/* if a STA is being removed, reuse its ID */
flush_work(&mvm->sta_drained_wk);
/*
* If we are in a STA removal flow and in DQA mode:
*
@ -2638,8 +2592,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* make sure the worker is no longer handling frames for this STA.
*/
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST &&
iwl_mvm_is_dqa_supported(mvm)) {
new_state == IEEE80211_STA_NOTEXIST) {
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
@ -3882,7 +3835,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1);
iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_CHANNEL_SWITCH);
switch (vif->type) {
case NL80211_IFTYPE_AP:
@ -4019,8 +3974,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
return;
/* Make sure we're done with the deferred traffic before flushing */
if (iwl_mvm_is_dqa_supported(mvm))
flush_work(&mvm->add_stream_wk);
flush_work(&mvm->add_stream_wk);
mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif);
@ -4157,11 +4111,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
{
#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \
do { \
if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \
break; \
iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt); \
#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \
do { \
if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \
break; \
iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \
} while (0)
struct iwl_fw_dbg_trigger_tlv *trig;
@ -4172,7 +4126,8 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (event->u.mlme.data == ASSOC_EVENT) {
@ -4213,16 +4168,17 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"BAR received from %pM, tid %d, ssn %d",
event->u.ba.sta->addr, event->u.ba.tid,
event->u.ba.ssn);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"BAR received from %pM, tid %d, ssn %d",
event->u.ba.sta->addr, event->u.ba.tid,
event->u.ba.ssn);
}
static void
@ -4238,15 +4194,16 @@ iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"Frame from %pM timed out, tid %d",
event->u.ba.sta->addr, event->u.ba.tid);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"Frame from %pM timed out, tid %d",
event->u.ba.sta->addr, event->u.ba.tid);
}
static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
@ -4280,7 +4237,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* TODO - remove a000 disablement when we have RXQ config API */
if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
if (!iwl_mvm_has_new_rx_api(mvm) ||
mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
return;
notif->cookie = mvm->queue_sync_cookie;

Просмотреть файл

@ -87,6 +87,8 @@
#include "fw-api.h"
#include "constants.h"
#include "tof.h"
#include "fw/runtime.h"
#include "fw/dbg.h"
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
@ -119,6 +121,9 @@
*/
#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
/* offchannel queue towards mac80211 */
#define IWL_MVM_OFFCHANNEL_QUEUE 0
extern const struct ieee80211_ops iwl_mvm_hw_ops;
/**
@ -137,34 +142,6 @@ struct iwl_mvm_mod_params {
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
/**
* struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump
*
* @op_mode_ptr: pointer to the buffer coming from the mvm op_mode
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
* @trans_len: length of the valid data in trans_ptr
* @op_mode_len: length of the valid data in op_mode_ptr
*/
struct iwl_mvm_dump_ptrs {
struct iwl_trans_dump_data *trans_ptr;
void *op_mode_ptr;
u32 op_mode_len;
};
/**
* struct iwl_mvm_dump_desc - describes the dump
* @len: length of trig_desc->data
* @trig_desc: the description of the dump
*/
struct iwl_mvm_dump_desc {
size_t len;
/* must be last */
struct iwl_fw_error_dump_trigger_desc trig_desc;
};
extern const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
struct iwl_mvm_phy_ctxt {
u16 id;
u16 color;
@ -606,19 +583,6 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE,
};
#define MAX_NUM_LMAC 2
struct iwl_mvm_shared_mem_cfg {
int num_lmacs;
int num_txfifo_entries;
struct {
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo1_size;
} lmac[MAX_NUM_LMAC];
u32 rxfifo2_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
/**
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
@ -766,7 +730,6 @@ struct iwl_mvm {
*/
struct iwl_mvm_vif *bf_allowed_vif;
enum iwl_ucode_type cur_ucode;
bool hw_registered;
bool calibrating;
u32 error_event_table[2];
@ -815,10 +778,7 @@ struct iwl_mvm {
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* Paging section */
struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
u16 num_of_paging_blk;
u16 num_of_pages_in_last_blk;
struct iwl_fw_runtime fwrt;
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@ -826,11 +786,7 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk;
unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions;
/* configured by mac80211 */
@ -847,9 +803,6 @@ struct iwl_mvm {
/* max number of simultaneous scans the FW supports */
unsigned int max_scans;
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long fw_dbg_non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
/* UMAC scan tracking */
u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
@ -925,10 +878,6 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 fw_restart;
u8 fw_dbg_conf;
struct delayed_work fw_dump_wk;
const struct iwl_mvm_dump_desc *fw_dump_desc;
const struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@ -1010,9 +959,6 @@ struct iwl_mvm {
u16 probe_queue;
u16 p2p_dev_queue;
u8 first_agg_queue;
u8 last_agg_queue;
/* Indicate if device power save is allowed */
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
unsigned int max_amsdu_len; /* used for debugfs only */
@ -1055,7 +1001,6 @@ struct iwl_mvm {
} peer;
} tdls_cs;
struct iwl_mvm_shared_mem_cfg smem_cfg;
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
@ -1095,7 +1040,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
* @IWL_MVM_STATUS_DUMPING_FW_LOG: FW log is being dumped
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
*/
enum iwl_mvm_status {
@ -1107,7 +1051,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_D3_RECONFIG,
IWL_MVM_STATUS_DUMPING_FW_LOG,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
};
@ -1180,12 +1123,6 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
}
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
@ -1287,6 +1224,12 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
return mvm->trans->cfg->use_tfh;
}
static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
{
/* TODO - better define this */
return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000;
}
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
{
/*
@ -1340,6 +1283,14 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
return iwl_mvm_has_new_tx_api(mvm) ?
iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
@ -1510,7 +1461,6 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
/* MAC (virtual interface) programming */
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override);
@ -1573,9 +1523,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Paging */
void iwl_free_fw_paging(struct iwl_mvm *mvm);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@ -1764,10 +1711,6 @@ bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout);
/*
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
*/
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
@ -1777,33 +1720,15 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
*/
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
{
u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
IWL_MVM_CMD_QUEUE;
return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
~BIT(cmd_queue));
}
static inline
void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 fifo, u16 ssn, unsigned int wdg_timeout)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
~BIT(IWL_MVM_DQA_CMD_QUEUE));
}
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_free_fw_paging(mvm);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_fw_dump_conf_clear(&mvm->fwrt);
iwl_trans_stop_device(mvm->trans);
}

Просмотреть файл

@ -576,11 +576,8 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm)
}
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->general.flags)) {
IWL_ERR(mvm, "Invalid NVM data from FW\n");
ret = -EINVAL;
goto out;
}
if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
IWL_INFO(mvm, "OTP is empty\n");
mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) +
sizeof(struct ieee80211_channel) *

Просмотреть файл

@ -82,11 +82,10 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
#include "fw/api/scan.h"
#include "time-event.h"
#include "fw-dbg.h"
#include "fw-api.h"
#include "fw-api-scan.h"
#include "fw/api/scan.h"
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@ -510,8 +509,6 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
return 0;
}
static void iwl_mvm_fw_error_dump_wk(struct work_struct *work);
static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
{
struct iwl_mvm *mvm =
@ -535,6 +532,34 @@ unlock:
mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_fwrt_dump_start(void *ctx)
{
struct iwl_mvm *mvm = ctx;
int ret;
ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
return 0;
}
static void iwl_mvm_fwrt_dump_end(void *ctx)
{
struct iwl_mvm *mvm = ctx;
mutex_unlock(&mvm->mutex);
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
}
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
};
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@ -580,6 +605,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm);
mvm->init_status = 0;
if (iwl_mvm_has_new_rx_api(mvm)) {
@ -596,32 +623,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
if (!iwl_mvm_is_dqa_supported(mvm)) {
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
BUILD_BUG_ON(BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]) < 12);
} else {
mvm->aux_queue = 15;
mvm->first_agg_queue = 16;
BUILD_BUG_ON(BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]) < 16);
}
} else {
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
mvm->sf_state = SF_UNINIT;
if (iwl_mvm_has_new_tx_api(mvm))
mvm->cur_ucode = IWL_UCODE_REGULAR;
if (iwl_mvm_has_unified_ucode(mvm))
iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
else
mvm->cur_ucode = IWL_UCODE_INIT;
iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
mvm->drop_bcn_ap_mode = true;
mutex_init(&mvm->mutex);
@ -635,9 +645,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
@ -688,10 +696,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
if (iwl_mvm_is_dqa_supported(mvm))
trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
else
trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;
@ -800,7 +805,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
flush_delayed_work(&mvm->fw_dump_wk);
iwl_fw_flush_dump(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
return op_mode;
@ -920,7 +925,7 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
cmds_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
@ -932,9 +937,9 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
continue;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"CMD 0x%02x.%02x received",
pkt->hdr.group_id, pkt->hdr.cmd);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"CMD 0x%02x.%02x received",
pkt->hdr.group_id, pkt->hdr.cmd);
break;
}
}
@ -980,8 +985,10 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
break;
return;
}
iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
}
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
@ -1131,7 +1138,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
* Stop the device if we run OPERATIONAL firmware or if we are in the
* middle of the calibrations.
*/
return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
@ -1160,57 +1167,6 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
{
struct iwl_mvm *mvm =
container_of(work, struct iwl_mvm, fw_dump_wk.work);
if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
return;
mutex_lock(&mvm->mutex);
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/* stop recording */
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
iwl_mvm_fw_error_dump(mvm);
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
mvm->fw->dbg_dest_tlv) {
iwl_clear_bits_prph(mvm->trans,
MON_BUFF_SAMPLE_CTL, 0x100);
iwl_clear_bits_prph(mvm->trans,
MON_BUFF_SAMPLE_CTL, 0x1);
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1);
}
} else {
u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
/* stop recording */
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
udelay(100);
iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
/* wait before we collect the data till the DBGC stop */
udelay(500);
iwl_mvm_fw_error_dump(mvm);
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
mvm->fw->dbg_dest_tlv) {
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
}
}
mutex_unlock(&mvm->mutex);
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
}
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
{
iwl_abort_notification_waits(&mvm->notif_wait);
@ -1234,7 +1190,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* can't recover this since we're already half suspended.
*/
if (!mvm->fw_restart && fw_error) {
iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@ -1260,7 +1216,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
mvm->hw_registered) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
@ -1439,7 +1395,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR))
if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
return -EINVAL;
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
@ -1665,7 +1621,7 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR))
if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
return -EINVAL;
mutex_lock(&mvm->d0i3_suspend_mutex);

Просмотреть файл

@ -251,7 +251,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
lockdep_assert_held(&mvm->mutex);

Просмотреть файл

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -75,7 +75,7 @@
#include "iwl-debug.h"
#include "mvm.h"
#include "iwl-modparams.h"
#include "fw-api-power.h"
#include "fw/api/power.h"
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
@ -186,7 +186,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
if (!mvmvif->queue_params[ac].uapsd)
continue;
if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN)
cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
@ -220,14 +220,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
BIT(IEEE80211_AC_BK))) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
cmd->snooze_window =
(mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags &
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
@ -502,7 +503,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd cmd = {};
iwl_mvm_power_build_cmd(mvm, vif, &cmd,
mvm->cur_ucode != IWL_UCODE_WOWLAN);
mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
@ -525,8 +526,8 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
mvm->disable_power_off)
if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
mvm->disable_power_off_d3 : mvm->disable_power_off)
cmd.flags &=
cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
@ -933,7 +934,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
if (!mvmvif->bf_data.bf_enabled)
return 0;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||

Просмотреть файл

@ -67,7 +67,6 @@
#include "iwl-trans.h"
#include "mvm.h"
#include "fw-api.h"
#include "fw-dbg.h"
/*
* iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
@ -397,10 +396,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rssi = le32_to_cpu(rssi_trig->rssi);
trig_check =
iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(mvmsta->vif),
trig);
if (trig_check && rx_status->signal < rssi)
iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
if (ieee80211_is_data(hdr->frame_control))
@ -624,7 +625,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
trig_stats = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
trig_offset = le32_to_cpu(trig_stats->stop_offset);
@ -636,7 +637,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
}
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,

Просмотреть файл

@ -63,7 +63,6 @@
#include "iwl-trans.h"
#include "mvm.h"
#include "fw-api.h"
#include "fw-dbg.h"
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
int queue, struct ieee80211_sta *sta)
@ -852,7 +851,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
@ -906,10 +905,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rssi = le32_to_cpu(rssi_trig->rssi);
trig_check =
iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(mvmsta->vif),
trig);
if (trig_check && rx_status->signal < rssi)
iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
if (ieee80211_is_data(hdr->frame_control))

Просмотреть файл

@ -69,7 +69,7 @@
#include <net/mac80211.h>
#include "mvm.h"
#include "fw-api-scan.h"
#include "fw/api/scan.h"
#include "iwl-io.h"
#define IWL_DENSE_EBS_SCAN_RATIO 5
@ -743,7 +743,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
* 4. it's not a p2p find operation.
*/
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful &&
mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
vif->type != NL80211_IFTYPE_P2P_DEVICE);
}

Просмотреть файл

@ -296,60 +296,6 @@ unlock:
rcu_read_unlock();
}
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
unsigned long used_hw_queues;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
u32 ac;
lockdep_assert_held(&mvm->mutex);
used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue);
if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate STA queue\n");
return -EBUSY;
}
__set_bit(queue, &used_hw_queues);
mvmsta->hw_queue[ac] = queue;
}
/* Found a place for all queues - enable them */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
mvmsta->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], 0,
wdg_timeout);
mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
}
return 0;
}
static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
unsigned long sta_msk;
int i;
lockdep_assert_held(&mvm->mutex);
/* disable the TDLS STA-specific queues */
sta_msk = mvmsta->tfd_queue_msk;
for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
}
/* Disable aggregations for a bitmap of TIDs for a given station */
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
unsigned long disable_agg_tids,
@ -757,7 +703,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = iwl_mvm_ac_to_tx_fifo[ac],
.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
.sta_id = mvmsta->sta_id,
.tid = tid,
.frame_limit = IWL_FRAME_LIMIT,
@ -1315,7 +1261,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
cfg.tid = i;
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
txq_id ==
IWL_MVM_DQA_BSS_CLIENT_QUEUE);
@ -1329,8 +1275,6 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
}
atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
}
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@ -1355,9 +1299,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
spin_lock_init(&mvm_sta->lock);
/* In DQA mode, if this is a HW restart, re-alloc existing queues */
if (iwl_mvm_is_dqa_supported(mvm) &&
test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
/* if this is a HW restart re-alloc existing queues */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
goto update_fw;
}
@ -1375,33 +1318,15 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
/* HW restart, don't assume the memory has been zeroed */
atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
mvm_sta->tfd_queue_msk = 0;
/*
* Allocate new queues for a TDLS station, unless we're in DQA mode,
* and then they'll be allocated dynamically
*/
if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret)
return ret;
} else if (!iwl_mvm_is_dqa_supported(mvm)) {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
}
/* for HW restart - reset everything but the sequence number */
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_sta->tid_data[i].seq_number;
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
mvm_sta->tid_data[i].seq_number = seq;
if (!iwl_mvm_is_dqa_supported(mvm))
continue;
/*
* Mark all queues for this STA as unallocated and defer TX
* frames until the queue is allocated
@ -1435,7 +1360,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data;
}
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (!iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
ieee80211_vif_type_p2p(vif));
if (ret)
@ -1461,8 +1386,6 @@ update_fw:
return 0;
err:
if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
iwl_mvm_tdls_sta_deinit(mvm, sta);
return ret;
}
@ -1535,79 +1458,6 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
return 0;
}
void iwl_mvm_sta_drained_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
u8 sta_id;
/*
* The mutex is needed because of the SYNC cmd, but not only: if the
* work would run concurrently with iwl_mvm_rm_sta, it would run before
* iwl_mvm_rm_sta sets the station as busy, and exit. Then
* iwl_mvm_rm_sta would set the station as busy, and nobody will clean
* that later.
*/
mutex_lock(&mvm->mutex);
for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
int ret;
struct ieee80211_sta *sta =
rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/*
* This station is in use or RCU-removed; the latter happens in
* managed mode, where mac80211 removes the station before we
* can remove it from firmware (we can only do that after the
* MAC is marked unassociated), and possibly while the deauth
* frame to disconnect from the AP is still queued. Then, the
* station pointer is -ENOENT when the last skb is reclaimed.
*/
if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
continue;
if (PTR_ERR(sta) == -EINVAL) {
IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
sta_id);
continue;
}
if (!sta) {
IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
sta_id);
continue;
}
WARN_ON(PTR_ERR(sta) != -EBUSY);
/* This station was removed and we waited until it got drained,
* we can now proceed and remove it.
*/
ret = iwl_mvm_rm_sta_common(mvm, sta_id);
if (ret) {
IWL_ERR(mvm,
"Couldn't remove sta %d after it was drained\n",
sta_id);
continue;
}
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained);
if (mvm->tfd_drained[sta_id]) {
unsigned long i, msk = mvm->tfd_drained[sta_id];
for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i, i,
IWL_MAX_TID_COUNT, 0);
mvm->tfd_drained[sta_id] = 0;
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
sta_id, msk);
}
}
mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta)
@ -1631,10 +1481,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
{
int i, ret;
int i;
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
u16 txq_id;
int ret;
spin_lock_bh(&mvm_sta->lock);
txq_id = mvm_sta->tid_data[i].txq_id;
@ -1645,10 +1496,10 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
if (ret)
break;
return ret;
}
return ret;
return 0;
}
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
@ -1665,79 +1516,65 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (iwl_mvm_has_new_rx_api(mvm))
kfree(mvm_sta->dup_data);
if ((vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == sta_id) ||
iwl_mvm_is_dqa_supported(mvm)){
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
return ret;
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
if (ret)
return ret;
if (iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
} else {
u32 q_mask = mvm_sta->tfd_queue_msk;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
return ret;
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
q_mask);
}
if (ret)
return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
if (ret)
return ret;
if (iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
} else {
u32 q_mask = mvm_sta->tfd_queue_msk;
/* If DQA is supported - the queues can be disabled now */
if (iwl_mvm_is_dqa_supported(mvm)) {
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
/*
* If pending_frames is set at this point - it must be
* driver internal logic error, since queues are empty
* and removed successuly.
* warn on it but set it to 0 anyway to avoid station
* not being removed later in the function
*/
WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
}
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
q_mask);
}
if (ret)
return ret;
/* If there is a TXQ still marked as reserved - free it */
if (iwl_mvm_is_dqa_supported(mvm) &&
mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
u8 reserved_txq = mvm_sta->reserved_queue;
enum iwl_mvm_queue_status *status;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/*
* If no traffic has gone through the reserved TXQ - it
* is still marked as IWL_MVM_QUEUE_RESERVED, and
* should be manually marked as free again
*/
spin_lock_bh(&mvm->queue_info_lock);
status = &mvm->queue_info[reserved_txq].status;
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d",
sta_id, reserved_txq, *status)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL;
}
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
*status = IWL_MVM_QUEUE_FREE;
/* If there is a TXQ still marked as reserved - free it */
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
u8 reserved_txq = mvm_sta->reserved_queue;
enum iwl_mvm_queue_status *status;
/*
* If no traffic has gone through the reserved TXQ - it
* is still marked as IWL_MVM_QUEUE_RESERVED, and
* should be manually marked as free again
*/
spin_lock_bh(&mvm->queue_info_lock);
status = &mvm->queue_info[reserved_txq].status;
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d",
sta_id, reserved_txq, *status)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL;
}
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
*status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
}
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
}
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
}
/*
@ -1754,32 +1591,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* calls the drain worker.
*/
spin_lock_bh(&mvm_sta->lock);
spin_unlock_bh(&mvm_sta->lock);
/*
* There are frames pending on the AC queues for this station.
* We need to wait until all the frames are drained...
*/
if (atomic_read(&mvm->pending_frames[sta_id])) {
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock);
/* disable TDLS sta queues on drain complete */
if (sta->tdls) {
mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
}
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else {
spin_unlock_bh(&mvm_sta->lock);
if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
iwl_mvm_tdls_sta_deinit(mvm, sta);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
return ret;
}
@ -1878,7 +1693,7 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
IWL_MAX_TID_COUNT,
wdg_timeout);
mvm->aux_queue = queue;
} else if (iwl_mvm_is_dqa_supported(mvm)) {
} else {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvm->aux_sta.sta_id,
@ -1889,9 +1704,6 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
wdg_timeout);
} else {
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
}
}
@ -1991,7 +1803,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
queue = mvm->probe_queue;
@ -2078,8 +1890,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_free_bcast_sta_queues(mvm, vif);
iwl_mvm_free_bcast_sta_queues(mvm, vif);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
if (ret)
@ -2090,23 +1901,10 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 qmask = 0;
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_is_dqa_supported(mvm)) {
qmask = iwl_mvm_mac_get_queues_mask(vif);
/*
* The firmware defines the TFD queue mask to only be relevant
* for *unicast* queues, so the multicast (CAB) queue shouldn't
* be included. This only happens in NL80211_IFTYPE_AP vif type,
* so the next line will only have an effect there.
*/
qmask &= ~BIT(vif->cab_queue);
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
ieee80211_vif_type_p2p(vif),
IWL_STA_GENERAL_PURPOSE);
}
@ -2118,7 +1916,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* @mvm: the mvm component
* @vif: the interface to which the broadcast station is added
* @bsta: the broadcast station to add. */
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
@ -2149,7 +1947,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* Send the FW a request to remove the station from it's internal data
* structures, and in addition remove it from the local data structure.
*/
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
int ret;
@ -2188,9 +1986,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_is_dqa_supported(mvm))
return 0;
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC))
return -ENOTSUPP;
@ -2255,9 +2050,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_is_dqa_supported(mvm))
return 0;
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
@ -2507,8 +2299,6 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm_sta->tid_disable_agg &= ~BIT(tid);
} else {
/* In DQA-mode the queue isn't removed on agg termination */
if (!iwl_mvm_is_dqa_supported(mvm))
mvm_sta->tfd_queue_msk &= ~BIT(queue);
mvm_sta->tid_disable_agg |= BIT(tid);
}
@ -2611,19 +2401,17 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = -ENXIO;
goto release_locks;
}
} else if (iwl_mvm_is_dqa_supported(mvm) &&
unlikely(mvm->queue_info[txq_id].status ==
} else if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n",
tid);
goto release_locks;
} else if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
} else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
mvm->first_agg_queue,
mvm->last_agg_queue);
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (txq_id < 0) {
ret = txq_id;
IWL_ERR(mvm, "Failed to allocate agg queue\n");
@ -2741,37 +2529,34 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
queue_status = mvm->queue_info[queue].status;
spin_unlock_bh(&mvm->queue_info_lock);
/* In DQA mode, the existing queue might need to be reconfigured */
if (iwl_mvm_is_dqa_supported(mvm)) {
/* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
alloc_queue = false;
/* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
alloc_queue = false;
/*
* Only reconfig the SCD for the queue if the window size has
* changed from current (become smaller)
*/
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
/*
* Only reconfig the SCD for the queue if the window size has
* changed from current (become smaller)
* If reconfiguring an existing queue, it first must be
* drained
*/
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
/*
* If reconfiguring an existing queue, it first must be
* drained
*/
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
BIT(queue));
if (ret) {
IWL_ERR(mvm,
"Error draining queue before reconfig\n");
return ret;
}
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
BIT(queue));
if (ret) {
IWL_ERR(mvm,
"Error draining queue before reconfig\n");
return ret;
}
ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
mvmsta->sta_id, tid,
buf_size, ssn);
if (ret) {
IWL_ERR(mvm,
"Error reconfiguring TXQ #%d\n", queue);
return ret;
}
ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
mvmsta->sta_id, tid,
buf_size, ssn);
if (ret) {
IWL_ERR(mvm,
"Error reconfiguring TXQ #%d\n", queue);
return ret;
}
}
@ -2867,18 +2652,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
"ssn = %d, next_recl = %d\n",
tid_data->ssn, tid_data->next_reclaimed);
/*
* There are still packets for this RA / TID in the HW.
* Not relevant for DQA mode, since there is no need to disable
* the queue.
*/
if (!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->ssn != tid_data->next_reclaimed) {
tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
err = 0;
break;
}
tid_data->ssn = 0xffff;
tid_data->state = IWL_AGG_OFF;
spin_unlock_bh(&mvmsta->lock);
@ -2886,12 +2659,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
if (!iwl_mvm_is_dqa_supported(mvm)) {
int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
}
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@ -2961,13 +2728,6 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_drain_sta(mvm, mvmsta, false);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
if (!iwl_mvm_is_dqa_supported(mvm)) {
int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
tid, 0);
}
}
return 0;
@ -3586,15 +3346,6 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
u16 n_queued;
tid_data = &mvmsta->tid_data[tid];
if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
"TID %d state is %d\n",
tid, tid_data->state)) {
spin_unlock_bh(&mvmsta->lock);
ieee80211_sta_eosp(sta);
return;
}
n_queued = iwl_mvm_tid_queued(mvm, tid_data);
if (n_queued > remaining) {
@ -3688,13 +3439,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
mvm_sta->disable_tx = disable;
/*
* Tell mac80211 to start/stop queuing tx for this station,
* but don't stop queuing if there are still pending frames
* for this station.
*/
if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
ieee80211_sta_block_awake(mvm->hw, sta, disable);
/* Tell mac80211 to start/stop queuing tx for this station */
ieee80211_sta_block_awake(mvm->hw, sta, disable);
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);

Просмотреть файл

@ -222,16 +222,7 @@ struct iwl_mvm_vif;
* we remove the STA of the AP. The flush can be done synchronously against the
* fw.
* Drain means that the fw will drop all the frames sent to a specific station.
* This is useful when a client (if we are IBSS / GO or AP) disassociates. In
* that case, we need to drain all the frames for that client from the AC queues
* that are shared with the other clients. Only then, we can remove the STA in
* the fw. In order to do so, we track the non-AMPDU packets for each station.
* If mac80211 removes a STA and if it still has non-AMPDU packets pending in
* the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
* the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
* (we know about it with its Tx response), we remove the station in fw and set
* it as %NULL in %fw_id_to_mac_id: this is the purpose of
* %iwl_mvm_sta_drained_wk.
* This is useful when a client (if we are IBSS / GO or AP) disassociates.
*/
/**
@ -371,7 +362,6 @@ struct iwl_mvm_rxq_dup_data {
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
* @tfd_queue_msk: the tfd queues used by the station
* @hw_queue: per-AC mapping of the TFD queues used by station
* @mac_id_n_color: the MAC context this station is linked to
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
@ -409,7 +399,6 @@ struct iwl_mvm_rxq_dup_data {
struct iwl_mvm_sta {
u32 sta_id;
u32 tfd_queue_msk;
u8 hw_queue[IEEE80211_NUM_ACS];
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
@ -533,9 +522,9 @@ void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
@ -548,7 +537,6 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
void iwl_mvm_sta_drained_wk(struct work_struct *wk);
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,

Просмотреть файл

@ -73,7 +73,6 @@
#include "mvm.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "fw-dbg.h"
/*
* For the high priority TE use a time event type that has similar priority to
@ -130,10 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* issue as it will have to complete before the next command is
* executed, and a new time event means a new command.
*/
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
else
iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@ -248,7 +244,9 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
te_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(te_data->vif),
trig))
return;
for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
@ -263,11 +261,11 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
!(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
continue;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"Time event %d Action 0x%x received status: %d",
te_data->id,
le32_to_cpu(notif->action),
le32_to_cpu(notif->status));
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"Time event %d Action 0x%x received status: %d",
te_data->id,
le32_to_cpu(notif->action),
le32_to_cpu(notif->status));
break;
}
}

Просмотреть файл

@ -61,7 +61,7 @@
*
*****************************************************************************/
#include "mvm.h"
#include "fw-api-tof.h"
#include "fw/api/tof.h"
#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256

Просмотреть файл

@ -63,7 +63,7 @@
#ifndef __tof_h__
#define __tof_h__
#include "fw-api-tof.h"
#include "fw/api/tof.h"
struct iwl_mvm_tof_data {
struct iwl_tof_config_cmd tof_cfg;

Просмотреть файл

@ -629,7 +629,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
mutex_lock(&mvm->mutex);
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR) {
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
ret = -EIO;
goto out;
}
@ -680,7 +680,7 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
mutex_lock(&mvm->mutex);
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR) {
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
ret = -EIO;
goto out;
}
@ -795,7 +795,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&mvm->mutex);
if (!iwl_mvm_firmware_running(mvm) ||
mvm->cur_ucode != IWL_UCODE_REGULAR) {
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
ret = -EIO;
goto unlock;
}

Просмотреть файл

@ -74,7 +74,6 @@
#include "iwl-eeprom-parse.h"
#include "mvm.h"
#include "sta.h"
#include "fw-dbg.h"
static void
iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
@ -89,15 +88,15 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"BAR sent to %pM, tid %d, ssn %d",
addr, tid, ssn);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"BAR sent to %pM, tid %d, ssn %d",
addr, tid, ssn);
}
#define OPT_HDR(type, skb, off) \
@ -553,9 +552,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif;
if (!iwl_mvm_is_dqa_supported(mvm))
return info->hw_queue;
mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
switch (info->control.vif->type) {
@ -654,8 +650,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_MONITOR) {
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->aux_queue;
}
}
@ -674,17 +669,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
}
/*
* Increase the pending frames counter, so that later when a reply comes
* in and the counter is decreased - we don't start getting negative
* values.
* Note that we don't need to make sure it isn't agg'd, since we're
* TXing non-sta
* For DQA mode - we shouldn't increase it though
*/
if (!iwl_mvm_is_dqa_supported(mvm))
atomic_inc(&mvm->pending_frames[sta_id]);
return 0;
}
@ -752,7 +736,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
max_amsdu_len = sta->max_amsdu_len;
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
/*
* Don't send an AMSDU that will be longer than the TXF.
@ -761,7 +745,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* fifo to be able to send bursts.
*/
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
256);
if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@ -994,22 +979,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
}
}
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id;
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
txq_id = mvmsta->tid_data[tid].txq_id;
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active) &&
iwl_mvm_is_dqa_supported(mvm)) {
!mvmsta->tid_data[tid].is_tid_active)) {
/* If TXQ needs to be allocated... */
if (txq_id == IWL_MVM_INVALID_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
@ -1036,7 +1012,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id);
}
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (!iwl_mvm_has_new_tx_api(mvm)) {
/* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
@ -1070,10 +1046,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
/* Increase pending frames count if this isn't AMPDU or DQA queue */
if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
drop_unlock_sta:
@ -1142,8 +1114,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON ||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
iwl_mvm_is_dqa_supported(mvm)) &&
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
iwl_mvm_tid_queued(mvm, tid_data) == 0) {
/*
* Now that this aggregation or DQA queue is empty tell
@ -1177,13 +1148,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
if (!iwl_mvm_is_dqa_supported(mvm)) {
u8 mac80211_ac = tid_to_mac80211_ac[tid];
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
vif->hw_queue[mac80211_ac], tid,
CMD_ASYNC);
}
tid_data->state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@ -1295,7 +1259,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
status_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
@ -1306,9 +1270,9 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
continue;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"Tx status %d was received",
status & TX_STATUS_MSK);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
"Tx status %d was received",
status & TX_STATUS_MSK);
break;
}
}
@ -1381,10 +1345,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->flags |= IEEE80211_TX_STAT_ACK;
break;
case TX_STATUS_FAIL_DEST_PS:
/* In DQA, the FW should have stopped the queue and not
/* the FW should have stopped the queue and not
* return this status
*/
WARN_ON(iwl_mvm_is_dqa_supported(mvm));
WARN_ON(1);
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
break;
default:
@ -1440,26 +1404,21 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status(mvm->hw, skb);
}
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which
* this Tx response relates. But if there is a hole in the
* bitmap of the BA we received, this Tx response may allow to
* reclaim the hole and all the subsequent packets that were
* already acked. In that case, seq_ctl != ssn, and the next
* packet to be reclaimed will be ssn and not seq_ctl. In that
* case, several packets will be reclaimed even if
* frame_count = 1.
*
* The ssn is the index (% 256) of the latest packet that has
* treated (acked / dropped) + 1.
*/
next_reclaimed = ssn;
} else {
/* The next packet to be reclaimed is the one after this one */
next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
}
/* This is an aggregation queue or might become one, so we use
* the ssn since: ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which
* this Tx response relates. But if there is a hole in the
* bitmap of the BA we received, this Tx response may allow to
* reclaim the hole and all the subsequent packets that were
* already acked. In that case, seq_ctl != ssn, and the next
* packet to be reclaimed will be ssn and not seq_ctl. In that
* case, several packets will be reclaimed even if
* frame_count = 1.
*
* The ssn is the index (% 256) of the latest packet that has
* treated (acked / dropped) + 1.
*/
next_reclaimed = ssn;
IWL_DEBUG_TX_REPLY(mvm,
"TXQ %d status %s (0x%08x)\n",
@ -1542,49 +1501,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
mvmsta = NULL;
}
/*
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
goto out;
/* We can't free more than one frame at once on a shared queue */
WARN_ON(skb_freed > 1);
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
goto out;
if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
/*
* If there are no pending frames for this STA and
* the tx to this station is not disabled, notify
* mac80211 that this station can now wake up in its
* STA table.
* If mvmsta is not NULL, sta is valid.
*/
spin_lock_bh(&mvmsta->lock);
if (!mvmsta->disable_tx)
ieee80211_sta_block_awake(mvm->hw, sta, false);
spin_unlock_bh(&mvmsta->lock);
}
if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
/*
* We are draining and this was the last packet - pre_rcu_remove
* has been called already. We might be after the
* synchronize_net already.
* Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
*/
set_bit(sta_id, mvm->sta_drained);
schedule_work(&mvm->sta_drained_wk);
}
out:
rcu_read_unlock();
}
@ -1648,9 +1564,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta;
int queue = SEQ_TO_QUEUE(sequence);
if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
(!iwl_mvm_is_dqa_supported(mvm) ||
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))

Просмотреть файл

@ -70,9 +70,8 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
#include "fw-dbg.h"
#include "mvm.h"
#include "fw-api-rs.h"
#include "fw/api/rs.h"
/*
* Will return 0 even if the cmd failed when RFKILL is asserted unless
@ -464,8 +463,8 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(mvm->cur_ucode == IWL_UCODE_INIT)
? "Init" : "RT");
(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@ -500,7 +499,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
struct iwl_error_event_table table;
u32 val;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
if (!base)
base = mvm->fw->init_errlog_ptr;
} else {
@ -512,8 +511,8 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(mvm->cur_ucode == IWL_UCODE_INIT)
? "Init" : "RT");
(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@ -1190,14 +1189,15 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
goto out;
if (trig_mlme->stop_connection_loss &&
--trig_mlme->stop_connection_loss)
goto out;
iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg);
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
out:
ieee80211_connection_loss(vif);

Просмотреть файл

@ -805,11 +805,11 @@ static int iwl_pci_resume(struct device *device)
/*
* Enable rfkill interrupt (in order to keep track of the rfkill
* status). Must be locked to avoid processing a possible rfkill
* interrupt while in iwl_trans_check_hw_rf_kill().
* interrupt while in iwl_pcie_check_hw_rf_kill().
*/
mutex_lock(&trans_pcie->mutex);
iwl_enable_rfkill_int(trans);
iwl_trans_check_hw_rf_kill(trans);
iwl_pcie_check_hw_rf_kill(trans);
mutex_unlock(&trans_pcie->mutex);
return 0;

Просмотреть файл

@ -791,7 +791,7 @@ void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);

Просмотреть файл

@ -307,7 +307,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
mutex_lock(&trans_pcie->mutex);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill) {
ret = -ERFKILL;
goto out;
@ -340,7 +340,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
ret = -ERFKILL;

Просмотреть файл

@ -986,7 +986,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
&first_ucode_section);
}
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill = iwl_is_rfkill_set(trans);
@ -1252,7 +1252,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
mutex_lock(&trans_pcie->mutex);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill) {
ret = -ERFKILL;
goto out;
@ -1300,7 +1300,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
ret = iwl_pcie_load_given_ucode(trans, fw);
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
ret = -ERFKILL;
@ -1663,7 +1663,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = false;
/* ...rfkill can call stop_device and set it false if needed */
iwl_trans_check_hw_rf_kill(trans);
iwl_pcie_check_hw_rf_kill(trans);
/* Make sure we sync here, because we'll need full access later */
if (low_power)

Просмотреть файл

@ -55,7 +55,7 @@
#include "iwl-csr.h"
#include "iwl-io.h"
#include "internal.h"
#include "mvm/fw-api.h"
#include "fw/api/tx.h"
/*
* iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
@ -422,9 +422,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (amsdu) {
if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
tb1_len + IWL_FIRST_TB_SIZE,
hdr_len, dev_cmd))
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
tb1_len + IWL_FIRST_TB_SIZE,
hdr_len, dev_cmd))
goto out_err;
/*

Просмотреть файл

@ -43,8 +43,7 @@
#include "iwl-scd.h"
#include "iwl-op-mode.h"
#include "internal.h"
/* FIXME: need to abstract out TX command (once we know what it looks like) */
#include "dvm/commands.h"
#include "fw/api/tx.h"
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
@ -2370,7 +2369,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
} else {
tb1_len = len;
}

Просмотреть файл

@ -125,8 +125,8 @@ void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
else
unregister_netdev(dev);
/* dev->destructor = free_netdev() will free the device data, including
* private data, when removing the device */
/* 'dev->needs_free_netdev = true' implies device data, including
* private data, will be freed when the device is removed */
}

Просмотреть файл

@ -572,6 +572,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
memset(&add_ba_req, 0, sizeof(add_ba_req));
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->is_hw_11ac_capable &&

Просмотреть файл

@ -3123,11 +3123,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
priv->dfs_chan_sw_workqueue = NULL;
}
/* Clear the priv in adapter */
priv->netdev->ieee80211_ptr = NULL;
priv->netdev = NULL;
priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
priv->media_connected = false;
switch (priv->bss_mode) {
case NL80211_IFTYPE_UNSPECIFIED:
@ -4215,7 +4211,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);

Просмотреть файл

@ -180,11 +180,9 @@ static struct region_code_mapping region_code_mapping_t[] = {
u8 *mwifiex_11d_code_2_region(u8 code)
{
u8 i;
u8 size = sizeof(region_code_mapping_t)/
sizeof(struct region_code_mapping);
/* Look for code in mapping table */
for (i = 0; i < size; i++)
for (i = 0; i < ARRAY_SIZE(region_code_mapping_t); i++)
if (region_code_mapping_t[i].code == code)
return region_code_mapping_t[i].region;

Просмотреть файл

@ -26,6 +26,8 @@
#include "11n.h"
#include "11ac.h"
static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
/*
* This function initializes a command node.
*
@ -427,7 +429,7 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
* The function calls the completion callback for all the command
* buffers that still have response buffers associated with them.
*/
int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_array;
u32 i;
@ -436,7 +438,7 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
if (!adapter->cmd_pool) {
mwifiex_dbg(adapter, FATAL,
"info: FREE_CMD_BUF: cmd_pool is null\n");
return 0;
return;
}
cmd_array = adapter->cmd_pool;
@ -464,8 +466,6 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
kfree(adapter->cmd_pool);
adapter->cmd_pool = NULL;
}
return 0;
}
/*
@ -666,7 +666,7 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
mwifiex_queue_scan_cmd(priv, cmd_node);
} else {
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
if (cmd_node->wait_q_enabled)
ret = mwifiex_wait_queue_complete(adapter, cmd_node);
@ -684,11 +684,12 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
*/
void
mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node, u32 add_tail)
struct cmd_ctrl_node *cmd_node)
{
struct host_cmd_ds_command *host_cmd = NULL;
u16 command;
unsigned long flags;
bool add_tail = true;
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
if (!host_cmd) {
@ -1075,7 +1076,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
* In case of scan commands, all pending commands in scan pending queue
* are cancelled.
*/
void
static void
mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL;

Просмотреть файл

@ -940,8 +940,6 @@ mwifiex_reset_write(struct file *file,
if (adapter->if_ops.card_reset) {
dev_info(adapter->dev, "Resetting per request\n");
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
mwifiex_cancel_all_pending_cmd(adapter);
adapter->if_ops.card_reset(adapter);
}

Просмотреть файл

@ -337,17 +337,9 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
unsigned long dev_queue_flags;
unsigned int i;
spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
for (i = 0; i < netdev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
}
netif_tx_wake_all_queues(netdev);
spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
}
@ -358,30 +350,20 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
unsigned long dev_queue_flags;
unsigned int i;
spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
for (i = 0; i < netdev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
if (!netif_tx_queue_stopped(txq))
netif_tx_stop_queue(txq);
}
netif_tx_stop_all_queues(netdev);
spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
}
/*
* This function releases the lock variables and frees the locks and
* associated locks.
* This function invalidates the list heads.
*/
static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
s32 i, j;
/* Free lists */
list_del(&adapter->cmd_free_q);
list_del(&adapter->cmd_pending_q);
list_del(&adapter->scan_pending_q);
@ -418,9 +400,11 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);
}
/* Free lock variables */
mwifiex_free_lock_list(adapter);
void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter)
{
mwifiex_invalidate_lists(adapter);
/* Free command buffer */
mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");

Просмотреть файл

@ -46,7 +46,7 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
bool aggr_ctrl;
module_param(aggr_ctrl, bool, 0000);
MODULE_PARM_DESC(aggr_ctrl, "usb tx aggreataon enable:1, disable:0");
MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0");
/*
* This function registers the device and performs all the necessary
@ -588,7 +588,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (mwifiex_init_channel_scan_gap(adapter)) {
mwifiex_dbg(adapter, ERROR,
"could not init channel stats table\n");
goto err_init_fw;
goto err_init_chan_scan;
}
if (driver_mode) {
@ -636,6 +636,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
err_add_intf:
vfree(adapter->chan_stats);
err_init_chan_scan:
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
err_init_fw:
@ -653,6 +654,7 @@ err_dnld_fw:
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
mwifiex_shutdown_drv(adapter);
mwifiex_free_cmd_buffers(adapter);
}
init_failed = true;
@ -665,8 +667,11 @@ done:
release_firmware(adapter->firmware);
adapter->firmware = NULL;
}
if (init_failed)
if (init_failed) {
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
mwifiex_free_adapter(adapter);
}
/* Tell all current and future waiters we're finished */
complete_all(fw_done);
@ -1352,26 +1357,12 @@ static void mwifiex_main_work_queue(struct work_struct *work)
mwifiex_main_process(adapter);
}
/*
* This function gets called during PCIe function level reset. Required
* code is extracted from mwifiex_remove_card()
*/
int
mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
/* Common teardown code used for both device removal and reset */
static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
int i;
if (!adapter)
goto exit_return;
wait_for_completion(adapter->fw_done);
/* Caller should ensure we aren't suspending while this happens */
reinit_completion(adapter->fw_done);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
/* We can no longer handle interrupts once we start doing the teardown
* below.
*/
@ -1380,6 +1371,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
adapter->surprise_removed = true;
mwifiex_terminate_workqueue(adapter);
adapter->int_status = 0;
/* Stop data */
for (i = 0; i < adapter->priv_num; i++) {
@ -1393,12 +1385,9 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
}
mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
mwifiex_shutdown_drv(adapter);
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n");
if (atomic_read(&adapter->rx_pending) ||
atomic_read(&adapter->tx_pending) ||
atomic_read(&adapter->cmd_pending)) {
@ -1420,10 +1409,37 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
vfree(adapter->chan_stats);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
exit_return:
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
adapter->wiphy = NULL;
vfree(adapter->chan_stats);
mwifiex_free_cmd_buffers(adapter);
}
/*
* This function gets called during PCIe function level reset.
*/
int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
if (!adapter)
return 0;
wait_for_completion(adapter->fw_done);
/* Caller should ensure we aren't suspending while this happens */
reinit_completion(adapter->fw_done);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
mwifiex_uninit_sw(adapter);
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
return 0;
}
EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
@ -1506,6 +1522,7 @@ err_kmalloc:
mwifiex_dbg(adapter, ERROR,
"info: %s: shutdown mwifiex\n", __func__);
mwifiex_shutdown_drv(adapter);
mwifiex_free_cmd_buffers(adapter);
}
complete_all(adapter->fw_done);
@ -1605,10 +1622,8 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) {
if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
adapter->rx_work_enabled = true;
pr_notice("rx work enabled, cpus %d\n", num_possible_cpus());
}
adapter->workqueue =
alloc_workqueue("MWIFIEX_WORK_QUEUE",
@ -1653,8 +1668,11 @@ err_registerdev:
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
mwifiex_shutdown_drv(adapter);
mwifiex_free_cmd_buffers(adapter);
}
err_kmalloc:
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
mwifiex_free_adapter(adapter);
err_init_sw:
@ -1676,64 +1694,10 @@ EXPORT_SYMBOL_GPL(mwifiex_add_card);
*/
int mwifiex_remove_card(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv = NULL;
int i;
if (!adapter)
goto exit_remove;
return 0;
/* We can no longer handle interrupts once we start doing the teardown
* below. */
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
adapter->surprise_removed = true;
mwifiex_terminate_workqueue(adapter);
/* Stop data */
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (priv && priv->netdev) {
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
}
mwifiex_dbg(adapter, CMD,
"cmd: calling mwifiex_shutdown_drv...\n");
mwifiex_shutdown_drv(adapter);
mwifiex_dbg(adapter, CMD,
"cmd: mwifiex_shutdown_drv done\n");
if (atomic_read(&adapter->rx_pending) ||
atomic_read(&adapter->tx_pending) ||
atomic_read(&adapter->cmd_pending)) {
mwifiex_dbg(adapter, ERROR,
"rx_pending=%d, tx_pending=%d,\t"
"cmd_pending=%d\n",
atomic_read(&adapter->rx_pending),
atomic_read(&adapter->tx_pending),
atomic_read(&adapter->cmd_pending));
}
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (!priv)
continue;
rtnl_lock();
if (priv->netdev &&
priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
vfree(adapter->chan_stats);
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
mwifiex_uninit_sw(adapter);
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
@ -1748,7 +1712,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
"info: free adapter\n");
mwifiex_free_adapter(adapter);
exit_remove:
return 0;
}
EXPORT_SYMBOL_GPL(mwifiex_remove_card);

Просмотреть файл

@ -1077,9 +1077,9 @@ int mwifiex_get_debug_info(struct mwifiex_private *,
struct mwifiex_debug_info *);
int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter);
int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter);
void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter);
void mwifiex_cancel_scan(struct mwifiex_adapter *adapter);
@ -1087,8 +1087,7 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node);
void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node,
u32 addtail);
struct cmd_ctrl_node *cmd_node);
int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter);
int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter);

Просмотреть файл

@ -1043,12 +1043,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
card->cmdrsp_buf = NULL;
}
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
return 0;
}
@ -1983,7 +1985,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
* (3) wifi image.
*
* This function bypass the header and bluetooth part, return
* the offset of tail wifi-only part.
* the offset of tail wifi-only part. If the image is already wifi-only,
* that is start with CMD1, return 0.
*/
static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
@ -1991,7 +1994,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
const struct mwifiex_fw_data *fwdata;
u32 offset = 0, data_len, dnld_cmd;
int ret = 0;
bool cmd7_before = false;
bool cmd7_before = false, first_cmd = false;
while (1) {
/* Check for integer and buffer overflow */
@ -2012,20 +2015,29 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
switch (dnld_cmd) {
case MWIFIEX_FW_DNLD_CMD_1:
if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
ret = -1;
goto done;
}
/* Image start with cmd1, already wifi-only firmware */
if (!first_cmd) {
mwifiex_dbg(adapter, MSG,
"input wifi-only firmware\n");
return 0;
}
if (!cmd7_before) {
mwifiex_dbg(adapter, ERROR,
"no cmd7 before cmd1!\n");
ret = -1;
goto done;
}
if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
ret = -1;
goto done;
}
offset += data_len;
break;
case MWIFIEX_FW_DNLD_CMD_5:
first_cmd = true;
/* Check for integer overflow */
if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
@ -2035,6 +2047,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
offset += data_len;
break;
case MWIFIEX_FW_DNLD_CMD_6:
first_cmd = true;
/* Check for integer overflow */
if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
@ -2051,6 +2064,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
}
goto done;
case MWIFIEX_FW_DNLD_CMD_7:
first_cmd = true;
cmd7_before = true;
break;
default:
@ -2428,7 +2442,7 @@ exit:
* In case of Rx packets received, the packets are uploaded from card to
* host and processed accordingly.
*/
static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
{
int ret;
u32 pcie_ireg = 0;
@ -2471,28 +2485,24 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
}
if (pcie_ireg & HOST_INTR_DNLD_DONE) {
pcie_ireg &= ~HOST_INTR_DNLD_DONE;
mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n");
ret = mwifiex_pcie_send_data_complete(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_UPLD_RDY) {
pcie_ireg &= ~HOST_INTR_UPLD_RDY;
mwifiex_dbg(adapter, INTR, "info: Rx DATA\n");
ret = mwifiex_pcie_process_recv_data(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_EVENT_RDY) {
pcie_ireg &= ~HOST_INTR_EVENT_RDY;
mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n");
ret = mwifiex_pcie_process_event_ready(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_CMD_DONE) {
pcie_ireg &= ~HOST_INTR_CMD_DONE;
if (adapter->cmd_sent) {
mwifiex_dbg(adapter, INTR,
"info: CMD sent Interrupt\n");
@ -2507,75 +2517,13 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INTR,
"info: cmd_sent=%d data_sent=%d\n",
adapter->cmd_sent, adapter->data_sent);
if (!card->msi_enable && adapter->ps_state != PS_STATE_SLEEP)
if (!card->msi_enable && !card->msix_enable &&
adapter->ps_state != PS_STATE_SLEEP)
mwifiex_pcie_enable_host_int(adapter);
return 0;
}
static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter)
{
int ret;
u32 pcie_ireg;
unsigned long flags;
spin_lock_irqsave(&adapter->int_lock, flags);
/* Clear out unused interrupts */
pcie_ireg = adapter->int_status;
adapter->int_status = 0;
spin_unlock_irqrestore(&adapter->int_lock, flags);
if (pcie_ireg & HOST_INTR_DNLD_DONE) {
mwifiex_dbg(adapter, INTR,
"info: TX DNLD Done\n");
ret = mwifiex_pcie_send_data_complete(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_UPLD_RDY) {
mwifiex_dbg(adapter, INTR,
"info: Rx DATA\n");
ret = mwifiex_pcie_process_recv_data(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_EVENT_RDY) {
mwifiex_dbg(adapter, INTR,
"info: Rx EVENT\n");
ret = mwifiex_pcie_process_event_ready(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_CMD_DONE) {
if (adapter->cmd_sent) {
mwifiex_dbg(adapter, INTR,
"info: CMD sent Interrupt\n");
adapter->cmd_sent = false;
}
/* Handle command response */
ret = mwifiex_pcie_process_cmd_complete(adapter);
if (ret)
return ret;
}
mwifiex_dbg(adapter, INTR,
"info: cmd_sent=%d data_sent=%d\n",
adapter->cmd_sent, adapter->data_sent);
return 0;
}
static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
if (card->msix_enable)
return mwifiex_process_msix_int(adapter);
else
return mwifiex_process_pcie_int(adapter);
}
/*
* This function downloads data from driver to card.
*
@ -2934,7 +2882,6 @@ static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
mwifiex_pcie_delete_evtbd_ring(adapter);
mwifiex_pcie_delete_rxbd_ring(adapter);
mwifiex_pcie_delete_txbd_ring(adapter);
card->cmdrsp_buf = NULL;
}
/*
@ -3036,15 +2983,14 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
"Failed to write driver not-ready signature\n");
}
mwifiex_pcie_free_buffers(adapter);
pci_disable_device(pdev);
if (pdev) {
pci_iounmap(pdev, card->pci_mmap);
pci_iounmap(pdev, card->pci_mmap1);
pci_disable_device(pdev);
pci_release_region(pdev, 2);
pci_release_region(pdev, 0);
}
pci_iounmap(pdev, card->pci_mmap);
pci_iounmap(pdev, card->pci_mmap1);
pci_release_region(pdev, 2);
pci_release_region(pdev, 0);
mwifiex_pcie_free_buffers(adapter);
}
static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
@ -3220,7 +3166,6 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
int ret;
struct pci_dev *pdev = card->dev;
/* tx_buf_size might be changed to 3584 by firmware during
@ -3228,11 +3173,9 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
*/
adapter->tx_buf_size = card->pcie.tx_buf_size;
ret = mwifiex_pcie_alloc_buffers(adapter);
if (!ret)
return;
mwifiex_pcie_alloc_buffers(adapter);
pci_iounmap(pdev, card->pci_mmap1);
pci_set_master(pdev);
}
/* This function cleans up the PCI-E host memory space. */
@ -3240,10 +3183,13 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
struct pci_dev *pdev = card->dev;
if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
pci_clear_master(pdev);
adapter->seq_num = 0;
mwifiex_pcie_free_buffers(adapter);

Просмотреть файл

@ -1534,8 +1534,7 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
/* Perform internal scan synchronously */
@ -2033,7 +2032,7 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
}
return;
@ -2492,6 +2491,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_stats);
for (i = 0 ; i < num_chan; i++) {
if (adapter->survey_idx >= adapter->num_in_chan_stats) {
mwifiex_dbg(adapter, WARN,
"FW reported too many channel results (max %d)\n",
adapter->num_in_chan_stats);
return;
}
chan_stats.chan_num = fw_chan_stats->chan_num;
chan_stats.bandcfg = fw_chan_stats->bandcfg;
chan_stats.flags = fw_chan_stats->flags;

Просмотреть файл

@ -390,7 +390,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) {
if (!ret && firmware_stat == FIRMWARE_READY_SDIO &&
!adapter->mfg_mode) {
mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);

Просмотреть файл

@ -189,9 +189,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
if (pbitmap_rates != NULL) {
rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
for (i = 0;
i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16);
i++)
for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
rate_scope->ht_mcs_rate_bitmap[i] =
cpu_to_le16(pbitmap_rates[2 + i]);
if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
@ -206,9 +204,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
cpu_to_le16(priv->bitmap_rates[0]);
rate_scope->ofdm_rate_bitmap =
cpu_to_le16(priv->bitmap_rates[1]);
for (i = 0;
i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16);
i++)
for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
rate_scope->ht_mcs_rate_bitmap[i] =
cpu_to_le16(priv->bitmap_rates[2 + i]);
if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
@ -1755,7 +1751,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
struct mwifiex_ie_types_vhtcap *vht_capab;
struct mwifiex_ie_types_aid *aid;
struct mwifiex_ie_types_tdls_idle_timeout *timeout;
u8 *pos, qos_info;
u8 *pos;
u16 config_len = 0;
struct station_parameters *params = priv->sta_params;
@ -1789,12 +1785,11 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
put_unaligned_le16(params->capability, pos);
config_len += sizeof(params->capability);
qos_info = params->uapsd_queues | (params->max_sp << 5);
wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
config_len);
wmm_qos_info = (void *)(pos + config_len);
wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
wmm_qos_info->qos_info = qos_info;
wmm_qos_info->header.len =
cpu_to_le16(sizeof(wmm_qos_info->qos_info));
wmm_qos_info->qos_info = 0;
config_len += sizeof(struct mwifiex_ie_types_qos_info);
if (params->ht_capa) {

Просмотреть файл

@ -298,9 +298,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
priv->bitmap_rates[1] =
le16_to_cpu(rate_scope->ofdm_rate_bitmap);
for (i = 0;
i <
sizeof(rate_scope->ht_mcs_rate_bitmap) /
sizeof(u16); i++)
i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap);
i++)
priv->bitmap_rates[2 + i] =
le16_to_cpu(rate_scope->
ht_mcs_rate_bitmap[i]);

Просмотреть файл

@ -654,9 +654,9 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
*/
int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
{
struct mwifiex_ds_auto_ds auto_ds;
auto_ds.auto_ds = DEEP_SLEEP_OFF;
struct mwifiex_ds_auto_ds auto_ds = {
.auto_ds = DEEP_SLEEP_OFF,
};
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true);
@ -811,8 +811,8 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
* is checked to determine WPA version. If buffer length is zero, the existing
* WPA IE is reset.
*/
static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
u8 *ie_data_ptr, u16 ie_len)
static int mwifiex_set_wpa_ie(struct mwifiex_private *priv,
u8 *ie_data_ptr, u16 ie_len)
{
if (ie_len) {
if (ie_len > sizeof(priv->wpa_ie)) {
@ -1351,101 +1351,96 @@ static int
mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
u16 ie_len)
{
int ret = 0;
struct ieee_types_vendor_header *pvendor_ie;
const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 };
const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 };
u16 unparsed_len = ie_len;
int find_wpa_ie = 0;
u16 unparsed_len = ie_len, cur_ie_len;
/* If the passed length is zero, reset the buffer */
if (!ie_len) {
priv->gen_ie_buf_len = 0;
priv->wps.session_enable = false;
return 0;
} else if (!ie_data_ptr) {
} else if (!ie_data_ptr ||
ie_len <= sizeof(struct ieee_types_header)) {
return -1;
}
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
while (pvendor_ie) {
if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) {
/* Test to see if it is a WPA IE, if not, then it is a
* gen IE
*/
if (!memcmp(pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
find_wpa_ie = 1;
break;
}
/* Test to see if it is a WPS IE, if so, enable
* wps session flag
*/
if (!memcmp(pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
priv->wps.session_enable = true;
mwifiex_dbg(priv->adapter, MSG,
"info: WPS Session Enabled.\n");
ret = mwifiex_set_wps_ie(priv,
(u8 *)pvendor_ie,
unparsed_len);
}
}
cur_ie_len = pvendor_ie->len + sizeof(struct ieee_types_header);
if (pvendor_ie->element_id == WLAN_EID_RSN) {
find_wpa_ie = 1;
break;
/* IE is a WPA/WPA2 IE so call set_wpa function */
mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, cur_ie_len);
priv->wps.session_enable = false;
goto next_ie;
}
if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) {
/* IE is a WAPI IE so call set_wapi function */
ret = mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie,
unparsed_len);
return ret;
/* IE is a WAPI IE so call set_wapi function */
mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie,
cur_ie_len);
goto next_ie;
}
unparsed_len -= (pvendor_ie->len +
sizeof(struct ieee_types_header));
if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) {
/* Test to see if it is a WPA IE, if not, then
* it is a gen IE
*/
if (!memcmp(pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
/* IE is a WPA/WPA2 IE so call set_wpa function
*/
mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie,
cur_ie_len);
priv->wps.session_enable = false;
goto next_ie;
}
if (!memcmp(pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
/* Test to see if it is a WPS IE,
* if so, enable wps session flag
*/
priv->wps.session_enable = true;
mwifiex_dbg(priv->adapter, MSG,
"WPS Session Enabled.\n");
mwifiex_set_wps_ie(priv, (u8 *)pvendor_ie,
cur_ie_len);
goto next_ie;
}
}
/* Saved in gen_ie, such as P2P IE.etc.*/
/* Verify that the passed length is not larger than the
* available space remaining in the buffer
*/
if (cur_ie_len <
(sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
/* Append the passed data to the end
* of the genIeBuffer
*/
memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len,
(u8 *)pvendor_ie, cur_ie_len);
/* Increment the stored buffer length by the
* size passed
*/
priv->gen_ie_buf_len += cur_ie_len;
}
next_ie:
unparsed_len -= cur_ie_len;
if (unparsed_len <= sizeof(struct ieee_types_header))
pvendor_ie = NULL;
else
pvendor_ie = (struct ieee_types_vendor_header *)
(((u8 *)pvendor_ie) + pvendor_ie->len +
sizeof(struct ieee_types_header));
(((u8 *)pvendor_ie) + cur_ie_len);
}
if (find_wpa_ie) {
/* IE is a WPA/WPA2 IE so call set_wpa function */
ret = mwifiex_set_wpa_ie_helper(priv, (u8 *)pvendor_ie,
unparsed_len);
priv->wps.session_enable = false;
return ret;
}
/*
* Verify that the passed length is not larger than the
* available space remaining in the buffer
*/
if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
/* Append the passed data to the end of the
genIeBuffer */
memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr,
ie_len);
/* Increment the stored buffer length by the
size passed */
priv->gen_ie_buf_len += ie_len;
} else {
/* Passed data does not fit in the remaining
buffer space */
ret = -1;
}
/* Return 0, or -1 for error case */
return ret;
return 0;
}
/*

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше