* add BCM43454/6 support
 
 rtw89
  * add support for 160 MHz channels and 6 GHz band
  * hardware scan support
 
 iwlwifi
  * support UHB TAS enablement via BIOS
  * remove a bunch of W=1 warnings
  * add support for channel switch offload
  * support 32 Rx AMPDU sessions in newer devices
  * add support for a couple of new devices
  * add support for band disablement via BIOS
 
 mt76
  * mt7915 thermal management improvements
  * SAR support for more mt76 drivers
  * mt7986 wmac support on mt7915
 
 ath11k
  * debugfs interface to configure firmware debug log level
  * debugfs interface to test Target Wake Time (TWT)
  * provide 802.11ax High Efficiency (HE) data via radiotap
 
 ath9k
  * use hw_random API instead of directly dumping into random.c
 
 wcn36xx
  * fix wcn3660 to work on 5 GHz band
 
 ath6kl
  * add device ID for WLU5150-D81
 
 cfg80211/mac80211
  * initial EHT (from 802.11be) support
    (EHT rates, 320 MHz, larger block-ack)
  * support disconnect on HW restart
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEH1e1rEeCd0AIMq6MB8qZga/fl8QFAmIrQoUACgkQB8qZga/f
 l8SV9RAAhZwiX4tkcjOYh3vDCOlmZRZV7dy0CYtcRlyHvO/4xH0DJUCbItW3hkeY
 HwLeaTE9J6INCui/iWbWVWsBKoiYQHEWxbfLYg6xDeQR4ijYQaz1c9inevu6qdOn
 3STKzBjsJ8uQF81ANjTFsL33B9olceIrHttqVI0Ezv6YlAQ1JYRNBBikh8NM+XPN
 /AUdsG9KyWRuraPbPf1sZapMJBGpvDMhKlo8LW08Xv9sC8to57Tw5AHVwMY71Ipu
 ClE0EyDGYRm8W+cbJvZ1bp7D/TGcIspAdpPR9JAznXWeFhyl6bswGtUsf3FGxXNk
 1i+1tonRlL3Xi9CvXDmGk2fstYe4MSmWXVFehoulMY9F2C1ibp6PrLa8SLjC+wzu
 1QDfM65ggc90uu0AJLTOp9qnkapvz3/FGL5z9sx2OEM1Iks2RwOpbB6gKo+C0A9W
 3wMxgPPt4mMV2WIgYv1okfcghUoH2l3b1n+Iq+osOa9pbdLrMhvzsrhIQZBaFnBa
 3S5yhGh8djEla2+FmmMs0RKvRX+m+FeVjkJ8ozPLZl880A0OLmZZ+6Wnoa3ZQHmi
 AkuOLhCGm3PVXCN8Mb0nwHmc+LJS/V/U5VBDzieOXMKM4OjMlbGQNt4+2bEJ+Qd3
 jlTkt1cLI/gFvdoFmsJUEOrpT49qZ94obmX8u07pEO/fI+bXHF4=
 =ccps
 -----END PGP SIGNATURE-----

Merge tag 'wireless-next-2022-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Johannes Berg says:

====================
brcmfmac
 * add BCM43454/6 support

rtw89
 * add support for 160 MHz channels and 6 GHz band
 * hardware scan support

iwlwifi
 * support UHB TAS enablement via BIOS
 * remove a bunch of W=1 warnings
 * add support for channel switch offload
 * support 32 Rx AMPDU sessions in newer devices
 * add support for a couple of new devices
 * add support for band disablement via BIOS

mt76
 * mt7915 thermal management improvements
 * SAR support for more mt76 drivers
 * mt7986 wmac support on mt7915

ath11k
 * debugfs interface to configure firmware debug log level
 * debugfs interface to test Target Wake Time (TWT)
 * provide 802.11ax High Efficiency (HE) data via radiotap

ath9k
 * use hw_random API instead of directly dumping into random.c

wcn36xx
 * fix wcn3660 to work on 5 GHz band

ath6kl
 * add device ID for WLU5150-D81

cfg80211/mac80211
 * initial EHT (from 802.11be) support
   (EHT rates, 320 MHz, larger block-ack)
 * support disconnect on HW restart

* tag 'wireless-next-2022-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (247 commits)
  mac80211: Add support to trigger sta disconnect on hardware restart
  mac80211: fix potential double free on mesh join
  mac80211: correct legacy rates check in ieee80211_calc_rx_airtime
  nl80211: fix typo of NL80211_IF_TYPE_OCB in documentation
  mac80211: Use GFP_KERNEL instead of GFP_ATOMIC when possible
  mac80211: replace DEFINE_SIMPLE_ATTRIBUTE with DEFINE_DEBUGFS_ATTRIBUTE
  rtw89: 8852c: process logic efuse map
  rtw89: 8852c: process efuse of phycap
  rtw89: support DAV efuse reading operation
  rtw89: 8852c: add chip::dle_mem
  rtw89: add page_regs to handle v1 chips
  rtw89: add chip_info::{h2c,c2h}_reg to support more chips
  rtw89: add hci_func_en_addr to support variant generation
  rtw89: add power_{on/off}_func
  rtw89: read chip version depends on chip ID
  rtw89: pci: use a struct to describe all registers address related to DMA channel
  rtw89: pci: add V1 of PCI channel address
  rtw89: pci: add struct rtw89_pci_info
  rtw89: 8852c: add 8852c empty files
  MAINTAINERS: add devicetree bindings entry for mt76
  ...

====================

Link: https://lore.kernel.org/r/20220311124029.213470-1-johannes@sipsolutions.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-03-11 13:00:16 -08:00
Родитель 59d5923536 7d352ccf1e
Коммит 0b3660695e
231 изменённых файлов: 13605 добавлений и 3800 удалений

Просмотреть файл

@ -18,7 +18,7 @@ description: |
wireless device. The node is expected to be specified as a child
node of the PCI controller to which the wireless chip is connected.
Alternatively, it can specify the wireless part of the MT7628/MT7688
or MT7622 SoC.
or MT7622/MT7986 SoC.
allOf:
- $ref: ieee80211.yaml#
@ -29,9 +29,13 @@ properties:
- mediatek,mt76
- mediatek,mt7628-wmac
- mediatek,mt7622-wmac
- mediatek,mt7986-wmac
reg:
maxItems: 1
minItems: 1
maxItems: 3
description:
MT7986 should contain 3 regions consys, dcm, and sku, in this order.
interrupts:
maxItems: 1
@ -39,6 +43,17 @@ properties:
power-domains:
maxItems: 1
memory-region:
maxItems: 1
resets:
maxItems: 1
description:
Specify the consys reset for mt7986.
reset-name:
const: consys
mediatek,infracfg:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@ -174,7 +189,7 @@ required:
- compatible
- reg
additionalProperties: false
unevaluatedProperties: false
examples:
- |
@ -240,3 +255,15 @@ examples:
power-domains = <&scpsys 3>;
};
- |
wifi@18000000 {
compatible = "mediatek,mt7986-wmac";
resets = <&watchdog 23>;
reset-names = "consys";
reg = <0x18000000 0x1000000>,
<0x10003000 0x1000>,
<0x11d10000 0x1000>;
interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
memory-region = <&wmcpu_emi>;
};

Просмотреть файл

@ -3821,9 +3821,6 @@ BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Arend van Spriel <aspriel@gmail.com>
M: Franky Lin <franky.lin@broadcom.com>
M: Hante Meuleman <hante.meuleman@broadcom.com>
M: Chi-hsien Lin <chi-hsien.lin@infineon.com>
M: Wright Feng <wright.feng@infineon.com>
M: Chung-hsien Hsu <chung-hsien.hsu@infineon.com>
L: linux-wireless@vger.kernel.org
L: brcm80211-dev-list.pdl@broadcom.com
L: SHA-cyfmac-dev-list@infineon.com
@ -12173,6 +12170,7 @@ R: Shayne Chen <shayne.chen@mediatek.com>
R: Sean Wang <sean.wang@mediatek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
F: drivers/net/wireless/mediatek/mt76/
MEDIATEK MT7601U WIRELESS LAN DRIVER

Просмотреть файл

@ -303,7 +303,7 @@ u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_outen);
/*
* If the bit is set to 0, chipcommon controlls this GPIO,
* If the bit is set to 0, chipcommon controls this GPIO,
* if the bit is set to 1, it is used by some part of the chip and not our code.
*/
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)

Просмотреть файл

@ -206,7 +206,7 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
usleep_range(2000, 2500);
}
/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
/* Disable to allow reading SPROM. Don't know the advantages of enabling it. */
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
{
struct bcma_bus *bus = cc->core->bus;
@ -234,7 +234,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
/*
* enable 12 mA drive strenth for 4313 and set chipControl
* enable 12 mA drive strength for 4313 and set chipControl
* register bit 1
*/
bcma_chipco_chipctl_maskset(cc, 0,
@ -249,7 +249,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
/*
* enable 12 mA drive strenth for 43224 and set chipControl
* enable 12 mA drive strength for 43224 and set chipControl
* register bit 15
*/
if (bus->chipinfo.rev == 0) {

Просмотреть файл

@ -181,7 +181,6 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->owner = THIS_MODULE;
chip->parent = bus->dev;
#if IS_BUILTIN(CONFIG_OF)
chip->of_node = cc->core->dev.of_node;

Просмотреть файл

@ -61,7 +61,7 @@ static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
{
u32 addr = 0;
/* Issue config commands only when the data link is up (atleast
/* Issue config commands only when the data link is up (at least
* one external pcie device is present).
*/
if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
@ -295,7 +295,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
if (cap_ptr == 0x00)
return cap_ptr;
/* loop thr'u the capability list and see if the requested capabilty
/* loop through the capability list and see if the requested capability
* exists */
bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
while (cap_id != req_cap_id) {
@ -317,7 +317,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
*buflen = 0;
/* copy the cpability data excluding cap ID and next ptr */
/* copy the capability data excluding cap ID and next ptr */
cap_data = cap_ptr + 2;
if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;

Просмотреть файл

@ -293,7 +293,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
/* We support that cores ourself */
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
case BCMA_CORE_CHIPCOMMON:
@ -369,7 +369,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog);
/* Now noone uses internally-handled cores, we can free them */
/* Now no one uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
put_device(&core->dev);

Просмотреть файл

@ -28,7 +28,7 @@ static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
* callback handler which fills the SPROM data structure. The fallback is
* used for PCI based BCMA devices, where no valid SPROM can be found
* in the shadow registers and to provide the SPROM for SoCs where BCMA is
* to controll the system bus.
* to control the system bus.
*
* This function is useful for weird architectures that have a half-assed
* BCMA device hardwired to their PCI bus.
@ -281,7 +281,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
/* Extract cores power info info */
/* Extract core's power info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
o = pwr_info_offset[i];
SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,

Просмотреть файл

@ -429,7 +429,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
RX_MSDU_END_INFO0_LAST_MSDU;
/* FIXME: why are we skipping the first part of the rx_desc? */
trace_ath10k_htt_rx_desc(ar, rx_desc + sizeof(u32),
trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu)

Просмотреть файл

@ -1551,11 +1551,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);

Просмотреть файл

@ -17,7 +17,7 @@ struct ath10k_fw_file;
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
u8 data[0];
u8 data[];
} __packed;
struct ath10k_swap_code_seg_tail {

Просмотреть файл

@ -391,6 +391,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
@ -466,7 +468,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
{
struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
@ -574,7 +576,7 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
}
/* Configure external interrupts */
ret = ath11k_ahb_ext_irq_config(ab);
ret = ath11k_ahb_config_ext_irq(ab);
return ret;
}

Просмотреть файл

@ -145,7 +145,7 @@ struct ath11k_ce_ring {
u32 hal_ring_id;
/* keep last */
struct sk_buff *skb[0];
struct sk_buff *skb[];
};
struct ath11k_ce_pipe {

Просмотреть файл

@ -99,6 +99,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@ -164,6 +165,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
},
{
.name = "qca6390 hw2.0",
@ -228,6 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
},
{
.name = "qcn9074 hw1.0",
@ -292,6 +295,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
},
{
.name = "wcn6855 hw2.0",
@ -356,6 +360,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
},
{
.name = "wcn6855 hw2.1",
@ -419,6 +424,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
},
};
@ -1411,7 +1417,6 @@ EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
flush_workqueue(ab->workqueue);
destroy_workqueue(ab->workqueue);
kfree(ab);

Просмотреть файл

@ -263,6 +263,9 @@ struct ath11k_vif {
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_twt;
#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {
@ -441,6 +444,8 @@ struct ath11k_dbg_htt_stats {
spinlock_t lock;
};
#define MAX_MODULE_ID_BITMAP_WORDS 16
struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
@ -454,6 +459,9 @@ struct ath11k_debug {
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
u32 mem_offset;
u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
};
struct ath11k_per_peer_tx_stats {
@ -604,6 +612,7 @@ struct ath11k {
bool pending_11d;
bool regdom_set_by_user;
int hw_rate_code;
u8 twt_enabled;
};
struct ath11k_band_cap {
@ -807,7 +816,7 @@ struct ath11k_base {
} id;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
u8 drv_priv[] __aligned(sizeof(void *));
};
struct ath11k_fw_stats_pdev {

Просмотреть файл

@ -37,7 +37,8 @@ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_element *buff)
struct ath11k_dbring_element *buff,
enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@ -84,6 +85,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
ath11k_hal_srng_access_end(ab, srng);
return 0;
@ -101,7 +103,8 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
struct ath11k_dbring *ring)
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@ -129,7 +132,7 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
kfree(buff);
break;
}
ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@ -210,7 +213,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
ret = ath11k_dbring_fill_bufs(ar, ring);
ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
return ret;
}
@ -270,7 +273,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
u8 pdev_idx, rbm;
u8 pdev_idx, rbm, module_id;
u32 cookie;
int buf_id;
int size;
@ -278,6 +281,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
module_id = ev->fixed.module_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
@ -346,6 +350,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
ath11k_debugfs_add_dbring_entry(ar, module_id,
ATH11K_DBG_DBR_EVENT_RX, srng);
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
@ -357,7 +364,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
buff->paddr = 0;
memset(buff->payload, 0, size);
ath11k_dbring_bufs_replenish(ar, ring, buff);
ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
}
spin_unlock_bh(&srng->lock);

Просмотреть файл

@ -52,6 +52,45 @@ static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
"MONITOR_DEST_RING",
};
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
struct hal_srng *srng)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_data;
struct ath11k_dbg_dbr_entry *entry;
if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
return;
dbr_debug = ar->debug.dbr_debug[id];
if (!dbr_debug)
return;
if (!dbr_debug->dbr_debug_enabled)
return;
dbr_data = &dbr_debug->dbr_dbg_data;
spin_lock_bh(&dbr_data->lock);
if (dbr_data->entries) {
entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
entry->hp = srng->u.src_ring.hp;
entry->tp = *srng->u.src_ring.tp_addr;
entry->timestamp = jiffies;
entry->event = event;
dbr_data->dbr_debug_idx++;
if (dbr_data->dbr_debug_idx ==
dbr_data->num_ring_debug_entries)
dbr_data->dbr_debug_idx = 0;
}
spin_unlock_bh(&dbr_data->lock);
}
static void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath11k_fw_stats_pdev *i, *tmp;
@ -876,6 +915,69 @@ static const struct file_operations fops_soc_dp_stats = {
.llseek = default_llseek,
};
static ssize_t ath11k_write_fw_dbglog(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[128] = {0};
struct ath11k_fw_dbglog dbglog;
unsigned int param, mod_id_index, is_end;
u64 value;
int ret, num;
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
if (num < 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
ret = -EINVAL;
goto out;
}
ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
if (!is_end) {
ret = count;
goto out;
}
} else {
if (num != 2) {
ret = -EINVAL;
goto out;
}
}
dbglog.param = param;
dbglog.value = lower_32_bits(value);
ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
if (ret) {
ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
ret);
goto out;
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_fw_dbglog = {
.write = ath11k_write_fw_dbglog,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@ -1113,6 +1215,169 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
char *buf;
int i, ret;
int len = 0;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len += scnprintf(buf + len, size - len,
"-----------------------------------------\n");
len += scnprintf(buf + len, size - len,
"| idx | hp | tp | timestamp | event |\n");
len += scnprintf(buf + len, size - len,
"-----------------------------------------\n");
spin_lock_bh(&dbr_dbg_data->lock);
for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
len += scnprintf(buf + len, size - len,
"|%4u|%8u|%8u|%11llu|%8s|\n", i,
dbr_dbg_data->entries[i].hp,
dbr_dbg_data->entries[i].tp,
dbr_dbg_data->entries[i].timestamp,
event_id_to_string[dbr_dbg_data->entries[i].event]);
}
spin_unlock_bh(&dbr_dbg_data->lock);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations fops_debug_dump_dbr_entries = {
.read = ath11k_debug_dump_dbr_entries,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_dbg_data;
if (!ar->debug.dbr_debug[dbr_id])
return;
dbr_debug = ar->debug.dbr_debug[dbr_id];
dbr_dbg_data = &dbr_debug->dbr_dbg_data;
debugfs_remove_recursive(dbr_debug->dbr_debugfs);
kfree(dbr_dbg_data->entries);
kfree(dbr_debug);
ar->debug.dbr_debug[dbr_id] = NULL;
}
static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_dbg_data;
static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
if (ar->debug.dbr_debug[dbr_id])
return 0;
ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
GFP_KERNEL);
if (!ar->debug.dbr_debug[dbr_id])
return -ENOMEM;
dbr_debug = ar->debug.dbr_debug[dbr_id];
dbr_dbg_data = &dbr_debug->dbr_dbg_data;
if (dbr_debug->dbr_debugfs)
return 0;
dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
ar->debug.debugfs_pdev);
if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
if (IS_ERR(dbr_debug->dbr_debugfs))
return PTR_ERR(dbr_debug->dbr_debugfs);
return -ENOMEM;
}
dbr_debug->dbr_debug_enabled = true;
dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
dbr_dbg_data->dbr_debug_idx = 0;
dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
sizeof(struct ath11k_dbg_dbr_entry),
GFP_KERNEL);
if (!dbr_dbg_data->entries)
return -ENOMEM;
spin_lock_init(&dbr_dbg_data->lock);
debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
dbr_dbg_data, &fops_debug_dump_dbr_entries);
return 0;
}
static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32] = {0};
u32 dbr_id, enable;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
goto out;
buf[ret] = '\0';
ret = sscanf(buf, "%u %u", &dbr_id, &enable);
if (ret != 2 || dbr_id > 1 || enable > 1) {
ret = -EINVAL;
ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
goto out;
}
if (enable) {
ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
if (ret) {
ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
ret);
goto out;
}
} else {
ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_dbr_debug = {
.write = ath11k_debugfs_write_enable_dbr_dbg,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@ -1142,6 +1407,9 @@ int ath11k_debugfs_register(struct ath11k *ar)
debugfs_create_file("pktlog_filter", 0644,
ar->debug.debugfs_pdev, ar,
&fops_pktlog_filter);
debugfs_create_file("fw_dbglog_config", 0600,
ar->debug.debugfs_pdev, ar,
&fops_fw_dbglog);
if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
debugfs_create_file("dfs_simulate_radar", 0200,
@ -1152,9 +1420,250 @@ int ath11k_debugfs_register(struct ath11k *ar)
&ar->dfs_block_radar_events);
}
if (ab->hw_params.dbr_debug_support)
debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
ar, &fops_dbr_debug);
return 0;
}
void ath11k_debugfs_unregister(struct ath11k *ar)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_dbg_data;
int i;
for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
dbr_debug = ar->debug.dbr_debug[i];
if (!dbr_debug)
continue;
dbr_dbg_data = &dbr_debug->dbr_dbg_data;
kfree(dbr_dbg_data->entries);
debugfs_remove_recursive(dbr_debug->dbr_debugfs);
kfree(dbr_debug);
ar->debug.dbr_debug[i] = NULL;
}
}
static ssize_t ath11k_write_twt_add_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_add_dialog_params params = { 0 };
u8 buf[128] = {0};
int ret;
if (arvif->ar->twt_enabled == 0) {
ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf,
"%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
&params.peer_macaddr[0],
&params.peer_macaddr[1],
&params.peer_macaddr[2],
&params.peer_macaddr[3],
&params.peer_macaddr[4],
&params.peer_macaddr[5],
&params.dialog_id,
&params.wake_intvl_us,
&params.wake_intvl_mantis,
&params.wake_dura_us,
&params.sp_offset_us,
&params.twt_cmd,
&params.flag_bcast,
&params.flag_trigger,
&params.flag_flow_type,
&params.flag_protection);
if (ret != 16)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
if (ret)
return ret;
return count;
}
static ssize_t ath11k_write_twt_del_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_del_dialog_params params = { 0 };
u8 buf[64] = {0};
int ret;
if (arvif->ar->twt_enabled == 0) {
ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
&params.peer_macaddr[0],
&params.peer_macaddr[1],
&params.peer_macaddr[2],
&params.peer_macaddr[3],
&params.peer_macaddr[4],
&params.peer_macaddr[5],
&params.dialog_id);
if (ret != 7)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
if (ret)
return ret;
return count;
}
static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_pause_dialog_params params = { 0 };
u8 buf[64] = {0};
int ret;
if (arvif->ar->twt_enabled == 0) {
ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
&params.peer_macaddr[0],
&params.peer_macaddr[1],
&params.peer_macaddr[2],
&params.peer_macaddr[3],
&params.peer_macaddr[4],
&params.peer_macaddr[5],
&params.dialog_id);
if (ret != 7)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
if (ret)
return ret;
return count;
}
static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_resume_dialog_params params = { 0 };
u8 buf[64] = {0};
int ret;
if (arvif->ar->twt_enabled == 0) {
ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
&params.peer_macaddr[0],
&params.peer_macaddr[1],
&params.peer_macaddr[2],
&params.peer_macaddr[3],
&params.peer_macaddr[4],
&params.peer_macaddr[5],
&params.dialog_id,
&params.sp_offset_us,
&params.next_twt_size);
if (ret != 9)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
if (ret)
return ret;
return count;
}
static const struct file_operations ath11k_fops_twt_add_dialog = {
.write = ath11k_write_twt_add_dialog,
.open = simple_open
};
static const struct file_operations ath11k_fops_twt_del_dialog = {
.write = ath11k_write_twt_del_dialog,
.open = simple_open
};
static const struct file_operations ath11k_fops_twt_pause_dialog = {
.write = ath11k_write_twt_pause_dialog,
.open = simple_open
};
static const struct file_operations ath11k_fops_twt_resume_dialog = {
.write = ath11k_write_twt_resume_dialog,
.open = simple_open
};
int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
if (arvif->vif->type == NL80211_IFTYPE_AP && !arvif->debugfs_twt) {
arvif->debugfs_twt = debugfs_create_dir("twt",
arvif->vif->debugfs_dir);
if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
ath11k_warn(arvif->ar->ab,
"failed to create directory %p\n",
arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
return -1;
}
debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_add_dialog);
debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_del_dialog);
debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_pause_dialog);
debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_resume_dialog);
}
return 0;
}
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
debugfs_remove_recursive(arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
}

Просмотреть файл

@ -47,6 +47,36 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_NUM_EXT_STATS,
};
#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
enum ath11k_dbg_dbr_event {
ATH11K_DBG_DBR_EVENT_INVALID,
ATH11K_DBG_DBR_EVENT_RX,
ATH11K_DBG_DBR_EVENT_REPLENISH,
ATH11K_DBG_DBR_EVENT_MAX,
};
struct ath11k_dbg_dbr_entry {
u32 hp;
u32 tp;
u64 timestamp;
enum ath11k_dbg_dbr_event event;
};
struct ath11k_dbg_dbr_data {
/* protects ath11k_db_ring_debug data */
spinlock_t lock;
struct ath11k_dbg_dbr_entry *entries;
u32 dbr_debug_idx;
u32 num_ring_debug_entries;
};
struct ath11k_debug_dbr {
struct ath11k_dbg_dbr_data dbr_dbg_data;
struct dentry *dbr_debugfs;
bool dbr_debug_enabled;
};
struct debug_htt_stats_req {
bool done;
u8 pdev_id;
@ -88,6 +118,7 @@ enum ath11k_pktlog_mode {
};
enum ath11k_pktlog_enum {
ATH11K_PKTLOG_TYPE_INVALID = 0,
ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
ATH11K_PKTLOG_TYPE_TX_STAT = 2,
ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
@ -107,6 +138,130 @@ enum ath11k_dbg_aggr_mode {
ATH11K_DBG_AGGR_MODE_MAX,
};
enum fw_dbglog_wlan_module_id {
WLAN_MODULE_ID_MIN = 0,
WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
WLAN_MODULE_WMI,
WLAN_MODULE_STA_PWRSAVE,
WLAN_MODULE_WHAL,
WLAN_MODULE_COEX,
WLAN_MODULE_ROAM,
WLAN_MODULE_RESMGR_CHAN_MANAGER,
WLAN_MODULE_RESMGR,
WLAN_MODULE_VDEV_MGR,
WLAN_MODULE_SCAN,
WLAN_MODULE_RATECTRL,
WLAN_MODULE_AP_PWRSAVE,
WLAN_MODULE_BLOCKACK,
WLAN_MODULE_MGMT_TXRX,
WLAN_MODULE_DATA_TXRX,
WLAN_MODULE_HTT,
WLAN_MODULE_HOST,
WLAN_MODULE_BEACON,
WLAN_MODULE_OFFLOAD,
WLAN_MODULE_WAL,
WLAN_WAL_MODULE_DE,
WLAN_MODULE_PCIELP,
WLAN_MODULE_RTT,
WLAN_MODULE_RESOURCE,
WLAN_MODULE_DCS,
WLAN_MODULE_CACHEMGR,
WLAN_MODULE_ANI,
WLAN_MODULE_P2P,
WLAN_MODULE_CSA,
WLAN_MODULE_NLO,
WLAN_MODULE_CHATTER,
WLAN_MODULE_WOW,
WLAN_MODULE_WAL_VDEV,
WLAN_MODULE_WAL_PDEV,
WLAN_MODULE_TEST,
WLAN_MODULE_STA_SMPS,
WLAN_MODULE_SWBMISS,
WLAN_MODULE_WMMAC,
WLAN_MODULE_TDLS,
WLAN_MODULE_HB,
WLAN_MODULE_TXBF,
WLAN_MODULE_BATCH_SCAN,
WLAN_MODULE_THERMAL_MGR,
WLAN_MODULE_PHYERR_DFS,
WLAN_MODULE_RMC,
WLAN_MODULE_STATS,
WLAN_MODULE_NAN,
WLAN_MODULE_IBSS_PWRSAVE,
WLAN_MODULE_HIF_UART,
WLAN_MODULE_LPI,
WLAN_MODULE_EXTSCAN,
WLAN_MODULE_UNIT_TEST,
WLAN_MODULE_MLME,
WLAN_MODULE_SUPPL,
WLAN_MODULE_ERE,
WLAN_MODULE_OCB,
WLAN_MODULE_RSSI_MONITOR,
WLAN_MODULE_WPM,
WLAN_MODULE_CSS,
WLAN_MODULE_PPS,
WLAN_MODULE_SCAN_CH_PREDICT,
WLAN_MODULE_MAWC,
WLAN_MODULE_CMC_QMIC,
WLAN_MODULE_EGAP,
WLAN_MODULE_NAN20,
WLAN_MODULE_QBOOST,
WLAN_MODULE_P2P_LISTEN_OFFLOAD,
WLAN_MODULE_HALPHY,
WLAN_WAL_MODULE_ENQ,
WLAN_MODULE_GNSS,
WLAN_MODULE_WAL_MEM,
WLAN_MODULE_SCHED_ALGO,
WLAN_MODULE_TX,
WLAN_MODULE_RX,
WLAN_MODULE_WLM,
WLAN_MODULE_RU_ALLOCATOR,
WLAN_MODULE_11K_OFFLOAD,
WLAN_MODULE_STA_TWT,
WLAN_MODULE_AP_TWT,
WLAN_MODULE_UL_OFDMA,
WLAN_MODULE_HPCS_PULSE,
WLAN_MODULE_DTF,
WLAN_MODULE_QUIET_IE,
WLAN_MODULE_SHMEM_MGR,
WLAN_MODULE_CFIR,
WLAN_MODULE_CODE_COVER,
WLAN_MODULE_SHO,
WLAN_MODULE_MLO_MGR,
WLAN_MODULE_PEER_INIT,
WLAN_MODULE_STA_MLO_PS,
WLAN_MODULE_ID_MAX,
WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
};
enum fw_dbglog_log_level {
ATH11K_FW_DBGLOG_ML = 0,
ATH11K_FW_DBGLOG_VERBOSE = 0,
ATH11K_FW_DBGLOG_INFO,
ATH11K_FW_DBGLOG_INFO_LVL_1,
ATH11K_FW_DBGLOG_INFO_LVL_2,
ATH11K_FW_DBGLOG_WARN,
ATH11K_FW_DBGLOG_ERR,
ATH11K_FW_DBGLOG_LVL_MAX
};
struct ath11k_fw_dbglog {
enum wmi_debug_log_param param;
union {
struct {
/* log_level values are given in enum fw_dbglog_log_level */
u16 log_level;
/* module_id values are given in enum fw_dbglog_wlan_module_id */
u16 module_id;
};
/* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
* according to param
*/
u32 value;
};
};
#ifdef CONFIG_ATH11K_DEBUGFS
int ath11k_debugfs_soc_create(struct ath11k_base *ab);
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
@ -151,6 +306,13 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
int ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
struct hal_srng *srng);
#else
static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
@ -224,6 +386,22 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
#endif /* CONFIG_MAC80211_DEBUGFS*/
static inline int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
return 0;
}
static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
}
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
struct hal_srng *srng)
{
}
#endif /* CONFIG_ATH11K_DEBUGFS*/
#endif /* _ATH11K_DEBUGFS_H_ */

Просмотреть файл

@ -115,6 +115,8 @@ struct ath11k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
u32 dest_mon_stuck;
u32 dest_mon_not_reaped;
};
struct dp_full_mon_mpdu {
@ -167,6 +169,7 @@ struct ath11k_mon_data {
struct ath11k_pdev_dp {
u32 mac_id;
u32 mon_dest_ring_stuck_cnt;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
@ -1170,12 +1173,12 @@ struct ath11k_htt_ppdu_stats_msg {
u32 ppdu_id;
u32 timestamp;
u32 rsvd;
u8 data[0];
u8 data[];
} __packed;
struct htt_tlv {
u32 header;
u8 value[0];
u8 value[];
} __packed;
#define HTT_TLV_TAG GENMASK(11, 0)
@ -1362,7 +1365,7 @@ struct htt_ppdu_stats_usr_cmn_array {
* tx_ppdu_stats_info is variable length, with length =
* number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
*/
struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
struct htt_tx_ppdu_stats_info tx_ppdu_info[];
} __packed;
struct htt_ppdu_user_stats {
@ -1424,7 +1427,7 @@ struct htt_ppdu_stats_info {
*/
struct htt_pktlog_msg {
u32 hdr;
u8 payload[0];
u8 payload[];
};
/**
@ -1645,7 +1648,7 @@ struct ath11k_htt_extd_stats_msg {
u32 info0;
u64 cookie;
u32 info1;
u8 data[0];
u8 data[];
} __packed;
#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)

Просмотреть файл

@ -2652,9 +2652,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
spin_lock_bh(&srng->lock);
try_again:
ath11k_hal_srng_access_begin(ab, srng);
try_again:
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
@ -4807,7 +4807,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
u8 *dest, decap_format;
@ -4849,38 +4848,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
__le16 qos_field;
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
if (ieee80211_is_data_qos(wh->frame_control)) {
struct ieee80211_qos_hdr *qwh =
(struct ieee80211_qos_hdr *)hdr_desc;
qos_field = qwh->qos_ctrl;
if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
}
msdu = head_msdu;
while (msdu) {
rx_desc = (struct hal_rx_desc *)msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
ath11k_dp_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
memcpy(dest, hdr_desc, wifi_hdr_len);
memcpy(dest + wifi_hdr_len,
(u8 *)&qos_field, sizeof(__le16));
memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
@ -4904,8 +4892,98 @@ err_merge_fail:
return NULL;
}
static void
ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
}
static void
ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
rtap_len += 2;
rtap_buf[rtap_len] = rx_status->he_RU[0];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[1];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[2];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[3];
}
static void ath11k_update_radiotap(struct ath11k *ar,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *mon_skb,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
if (ppduinfo->nss)
rxs->nss = ppduinfo->nss;
if (ppduinfo->he_mu_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
} else if (ppduinfo->he_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->vht_flags) {
rxs->encoding = RX_ENC_VHT;
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->ht_flags) {
rxs->encoding = RX_ENC_HT;
rxs->rate_idx = ppduinfo->rate;
} else {
rxs->encoding = RX_ENC_LEGACY;
sband = &ar->mac.sbands[rxs->band];
rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
ppduinfo->cck_flag);
}
rxs->mactime = ppduinfo->tsft;
}
static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct sk_buff *head_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *tail_msdu,
struct napi_struct *napi)
{
@ -4940,7 +5018,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
@ -4959,6 +5037,12 @@ mon_deliver_fail:
return -EINVAL;
}
/* The destination ring processing is stuck if the destination is not
* moving while status ring moves 16 PPDU. The destination ring processing
* skips this destination ring PPDU as a workaround.
*/
#define MON_DEST_RING_STUCK_MAX_CNT 16
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
@ -4972,6 +5056,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
u32 mpdu_rx_bufs_used;
if (ar->ab->hw_params.rxdma1_enable)
ring_id = dp->rxdma_mon_dst_ring.ring_id;
@ -5001,20 +5086,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
head_msdu = NULL;
tail_msdu = NULL;
rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
rx_bufs_used += mpdu_rx_bufs_used;
if (mpdu_rx_bufs_used) {
dp->mon_dest_ring_stuck_cnt = 0;
} else {
dp->mon_dest_ring_stuck_cnt++;
rx_mon_stats->dest_mon_not_reaped++;
}
if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
rx_mon_stats->dest_mon_stuck++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
pmon->mon_ppdu_info.ppdu_id, ppdu_id,
dp->mon_dest_ring_stuck_cnt,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
pmon->mon_ppdu_info.ppdu_id = ppdu_id;
continue;
}
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"dest_rx: new ppdu_id %x != status ppdu_id %x",
ppdu_id, pmon->mon_ppdu_info.ppdu_id);
"dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
ppdu_id, pmon->mon_ppdu_info.ppdu_id,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
&pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
}
@ -5054,7 +5163,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct ath11k_sta *arsta;
int num_buffs_reaped = 0;
u32 rx_buf_sz;
u16 log_type = 0;
u16 log_type;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
@ -5076,11 +5185,15 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
rx_buf_sz = DP_RX_BUFFER_SIZE;
} else {
log_type = ATH11K_PKTLOG_TYPE_INVALID;
rx_buf_sz = 0;
}
if (log_type)
if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
@ -5341,6 +5454,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
&pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");

Просмотреть файл

@ -427,7 +427,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate, ru_tones;
u8 mcs, rate_idx, ofdma;
u8 mcs, rate_idx = 0, ofdma;
int ret;
spin_lock_bh(&ab->base_lock);
@ -519,9 +519,13 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ieee80211_tx_status status = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@ -584,12 +588,26 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
* Might end up reporting it out-of-band from HTT stats.
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"dp_tx: failed to find the peer with peer_id %d\n",
ts->peer_id);
spin_unlock_bh(&ab->base_lock);
dev_kfree_skb_any(msdu);
return;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
status.sta = peer->sta;
status.skb = msdu;
status.info = info;
rate = arsta->last_txrate;
status.rate = &rate;
ieee80211_tx_status(ar->hw, msdu);
spin_unlock_bh(&ab->base_lock);
ieee80211_tx_status_ext(ar->hw, &status);
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,

Просмотреть файл

@ -474,6 +474,7 @@ enum hal_tlv_tag {
#define HAL_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_HDR_LEN GENMASK(25, 10)
#define HAL_TLV_USR_ID GENMASK(31, 26)
#define HAL_TLV_ALIGN 4

Просмотреть файл

@ -453,10 +453,12 @@ void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
desc->info0));
ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
ath11k_dbg(ab, ATH11k_DBG_HAL,
"last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
ath11k_dbg(ab, ATH11k_DBG_HAL,
"rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
@ -802,12 +804,75 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
#define HAL_MAX_UL_MU_USERS 37
static inline void
ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
}
static inline void
ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->mpdu_ok_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->rsvd2[6]));
rx_user_status->mpdu_err_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->rsvd2[8]));
}
static inline void
ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
rx_user_status->ast_index = ppdu_info->ast_index;
rx_user_status->tid = ppdu_info->tid;
rx_user_status->tcp_msdu_count =
ppdu_info->tcp_msdu_count;
rx_user_status->udp_msdu_count =
ppdu_info->udp_msdu_count;
rx_user_status->other_msdu_count =
ppdu_info->other_msdu_count;
rx_user_status->frame_control = ppdu_info->frame_control;
rx_user_status->frame_control_info_valid =
ppdu_info->frame_control_info_valid;
rx_user_status->data_sequence_control_info_valid =
ppdu_info->data_sequence_control_info_valid;
rx_user_status->first_data_seq_ctrl =
ppdu_info->first_data_seq_ctrl;
rx_user_status->preamble_type = ppdu_info->preamble_type;
rx_user_status->ht_flags = ppdu_info->ht_flags;
rx_user_status->vht_flags = ppdu_info->vht_flags;
rx_user_status->he_flags = ppdu_info->he_flags;
rx_user_status->rs_flags = ppdu_info->rs_flags;
rx_user_status->mpdu_cnt_fcs_ok =
ppdu_info->num_mpdu_fcs_ok;
rx_user_status->mpdu_cnt_fcs_err =
ppdu_info->num_mpdu_fcs_err;
ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 tlv_tag, u8 *tlv_data)
u32 tlv_tag, u8 *tlv_data, u32 userid)
{
u32 info0, info1;
u32 info0, info1, value;
u8 he_dcm = 0, he_stbc = 0;
u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@ -828,6 +893,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
ppdu_info->ast_index =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
__le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
__le32_to_cpu(eu_stats->info6))) - 1;
@ -851,6 +919,44 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AC:
ppdu_info->vht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
default:
break;
}
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats;
ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
rxuser_stats);
}
ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
__le32_to_cpu(eu_stats->rsvd1[0]);
ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
__le32_to_cpu(eu_stats->rsvd1[1]);
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
(struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
@ -949,50 +1055,151 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
ppdu_info->vht_flag_values5 = group_id;
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
ppdu_info->vht_flag_values4 =
FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
u32 nsts, cp_ltf, dcm;
ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
info0);
ppdu_info->bw =
FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
info0);
ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
ppdu_info->beamformed = info1 &
HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
info0);
nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
switch (cp_ltf) {
if (value == 0)
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
else
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 |=
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
ppdu_info->mcs = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
ppdu_info->dcm = he_dcm;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
ppdu_info->is_stbc = he_stbc;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
/* data4 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/* data5 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_1_X;
break;
case 1:
ppdu_info->gi = HAL_RX_GI_0_8_US;
break;
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
ppdu_info->gi = HAL_RX_GI_1_6_US;
break;
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
if (dcm && ppdu_info->is_stbc)
ppdu_info->gi = HAL_RX_GI_0_8_US;
else
ppdu_info->gi = HAL_RX_GI_3_2_US;
break;
if (he_dcm && he_stbc) {
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
} else {
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
}
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
ppdu_info->beamformed = value;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/* data6 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
value++;
ppdu_info->nss = value;
ppdu_info->he_data6 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
ppdu_info->nss = nsts + 1;
ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@ -1000,29 +1207,142 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
u32 cp_ltf;
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
ppdu_info->bw =
FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
info0);
cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
info0);
ppdu_info->he_mu_flags = 1;
switch (cp_ltf) {
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 =
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
/*data3*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
he_stbc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
/*data4*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/*data5*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
break;
case 1:
ppdu_info->gi = HAL_RX_GI_0_8_US;
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
ppdu_info->gi = HAL_RX_GI_1_6_US;
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
ppdu_info->gi = HAL_RX_GI_3_2_US;
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/*data6*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
info0);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
/* HE-MU Flags */
/* HE-MU-flags1 */
ppdu_info->he_flags1 =
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
value);
/* HE-MU-flags2 */
ppdu_info->he_flags2 =
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
value = value - 1;
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
@ -1040,7 +1360,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
@ -1050,14 +1370,25 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
info0);
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
info0);
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
@ -1066,17 +1397,40 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
/* HE-data2 */
ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
he_dcm = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
/* HE-data4 */
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
info0 &
HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
info0);
info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
@ -1118,6 +1472,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
ppdu_info->tsft = (ppdu_info->tsft << 32) |
__le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
@ -1141,12 +1498,14 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@ -1158,7 +1517,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
tlv_tag, ptr);
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);

Просмотреть файл

@ -73,6 +73,36 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
struct hal_rx_user_status {
u32 mcs:4,
nss:3,
ofdma_info_valid:1,
dl_ofdma_ru_start_index:7,
dl_ofdma_ru_width:7,
dl_ofdma_ru_size:8;
u32 ul_ofdma_user_v0_word0;
u32 ul_ofdma_user_v0_word1;
u32 ast_index;
u32 tid;
u16 tcp_msdu_count;
u16 udp_msdu_count;
u16 other_msdu_count;
u16 frame_control;
u8 frame_control_info_valid;
u8 data_sequence_control_info_valid;
u16 first_data_seq_ctrl;
u32 preamble_type;
u16 ht_flags;
u16 vht_flags;
u16 he_flags;
u8 rs_flags;
u32 mpdu_cnt_fcs_ok;
u32 mpdu_cnt_fcs_err;
u32 mpdu_fcs_ok_bitmap[8];
u32 mpdu_ok_byte_count;
u32 mpdu_err_byte_count;
};
#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
@ -107,6 +137,12 @@ struct hal_rx_mon_ppdu_info {
u8 mcs;
u8 nss;
u8 bw;
u8 vht_flag_values1;
u8 vht_flag_values2;
u8 vht_flag_values3[4];
u8 vht_flag_values4;
u8 vht_flag_values5;
u16 vht_flag_values6;
u8 is_stbc;
u8 gi;
u8 ldpc;
@ -114,10 +150,46 @@ struct hal_rx_mon_ppdu_info {
u8 rssi_comb;
u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
u8 tid;
u16 ht_flags;
u16 vht_flags;
u16 he_flags;
u16 he_mu_flags;
u8 dcm;
u8 ru_alloc;
u8 reception_type;
u64 tsft;
u64 rx_duration;
u16 frame_control;
u32 ast_index;
u8 rs_fcs_err;
u8 rs_flags;
u8 cck_flag;
u8 ofdm_flag;
u8 ulofdma_flag;
u8 frame_control_info_valid;
u16 he_per_user_1;
u16 he_per_user_2;
u8 he_per_user_position;
u8 he_per_user_known;
u16 he_flags1;
u16 he_flags2;
u8 he_RU[4];
u16 he_data1;
u16 he_data2;
u16 he_data3;
u16 he_data4;
u16 he_data5;
u16 he_data6;
u32 ppdu_len;
u32 prev_ppdu_id;
u32 device_id;
u16 first_data_seq_ctrl;
u8 monitor_direct_used;
u8 data_sequence_control_info_valid;
u8 ltf_size;
u8 rxpcu_filter_pass;
char rssi_chain[8][8];
struct hal_rx_user_status userstats;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@ -150,6 +222,9 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
__le32 info0;
@ -164,6 +239,16 @@ struct hal_rx_ppdu_end_user_stats {
__le32 rsvd2[11];
} __packed;
struct hal_rx_ppdu_end_user_stats_ext {
u32 info0;
u32 info1;
u32 info2;
u32 info3;
u32 info4;
u32 info5;
u32 info6;
} __packed;
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
@ -212,25 +297,62 @@ enum hal_rx_vht_sig_a_gi_setting {
HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
};
#define HAL_RX_SU_MU_CODING_LDPC 0x01
#define HE_GI_0_8 0
#define HE_GI_0_4 1
#define HE_GI_1_6 2
#define HE_GI_3_2 3
#define HE_LTF_1_X 0
#define HE_LTF_2_X 1
#define HE_LTF_4_X 2
#define HE_LTF_UNKNOWN 3
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
struct hal_rx_he_sig_a_su_info {
__le32 info0;
__le32 info1;
} __packed;
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
struct hal_rx_he_sig_a_mu_dl_info {
__le32 info0;
@ -243,6 +365,7 @@ struct hal_rx_he_sig_b1_mu_info {
__le32 info0;
} __packed;
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
@ -251,6 +374,7 @@ struct hal_rx_he_sig_b2_mu_info {
__le32 info0;
} __packed;
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
@ -279,11 +403,14 @@ struct hal_rx_phyrx_rssi_legacy_info {
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
__le32 rsvd1[21];
__le32 rsvd1[11];
__le32 info1;
__le32 rsvd2[9];
} __packed;
struct hal_rx_mpdu_info_wcn6855 {

Просмотреть файл

@ -813,6 +813,12 @@ static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
return peer_id;
}
static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@ -983,6 +989,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,

Просмотреть файл

@ -193,6 +193,7 @@ struct ath11k_hw_params {
bool supports_rssi_stats;
bool fw_wmi_diag_event;
bool current_cc_support;
bool dbr_debug_support;
};
struct ath11k_hw_ops {

Просмотреть файл

@ -5579,7 +5579,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work);
return 0;
}
@ -6354,6 +6354,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
}
ret = ath11k_debugfs_add_interface(arvif);
if (ret)
goto err_peer_del;
mutex_unlock(&ar->conf_mutex);
return 0;
@ -6388,6 +6392,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
ath11k_debugfs_remove_interface(arvif);
mutex_unlock(&ar->conf_mutex);
return ret;
@ -6486,6 +6491,8 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
ath11k_debugfs_remove_interface(arvif);
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@ -6623,12 +6630,13 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
const struct cfg80211_chan_def *chandef,
struct ieee80211_chanctx_conf *ctx,
bool restart)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
const struct cfg80211_chan_def *chandef = &ctx->def;
int he_support = arvif->vif->bss_conf.he_support;
int ret = 0;
@ -6663,8 +6671,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
arg.channel.freq2_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
arg.channel.freq2_radar = ctx->radar_enabled;
arg.channel.passive = arg.channel.chan_radar;
@ -6774,15 +6781,15 @@ err:
}
static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
const struct cfg80211_chan_def *chandef)
struct ieee80211_chanctx_conf *ctx)
{
return ath11k_mac_vdev_start_restart(arvif, chandef, false);
return ath11k_mac_vdev_start_restart(arvif, ctx, false);
}
static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
const struct cfg80211_chan_def *chandef)
struct ieee80211_chanctx_conf *ctx)
{
return ath11k_mac_vdev_start_restart(arvif, chandef, true);
return ath11k_mac_vdev_start_restart(arvif, ctx, true);
}
struct ath11k_mac_change_chanctx_arg {
@ -6849,13 +6856,33 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
/* change_chanctx can be called even before vdev_up from
* ieee80211_start_ap->ieee80211_vif_use_channel->
* ieee80211_recalc_radar_chanctx.
*
* Firmware expect vdev_restart only if vdev is up.
* If vdev is down then it expect vdev_stop->vdev_start.
*/
if (arvif->is_up) {
ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
if (ret) {
ath11k_warn(ab, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
} else {
ret = ath11k_mac_vdev_stop(arvif);
if (ret) {
ath11k_warn(ab, "failed to stop vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
if (ret)
ath11k_warn(ab, "failed to start vdev %d: %d\n",
arvif->vdev_id, ret);
ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
if (ret) {
ath11k_warn(ab, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@ -6940,7 +6967,8 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
changed & IEEE80211_CHANCTX_CHANGE_RADAR)
ath11k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@ -6960,7 +6988,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
if (WARN_ON(arvif->is_started))
return -EBUSY;
ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@ -7054,7 +7082,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
ret = ath11k_mac_vdev_start(arvif, ctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@ -8448,7 +8476,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);

Просмотреть файл

@ -13,6 +13,7 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@ -382,6 +383,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@ -561,7 +563,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
ret = 0;
break;
case ATH11K_MHI_POWER_ON:
ret = mhi_async_power_up(ab_pci->mhi_ctrl);
ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);

Просмотреть файл

@ -1571,6 +1571,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
return 0;
}
ret = ath11k_core_suspend(ab);
if (ret)
ath11k_warn(ab, "failed to suspend core: %d\n", ret);
@ -1583,6 +1588,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
return 0;
}
ret = ath11k_core_resume(ab);
if (ret)
ath11k_warn(ab, "failed to resume core: %d\n", ret);

Просмотреть файл

@ -2342,6 +2342,7 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
dma_free_coherent(ab->dev, m3_mem->size,
m3_mem->vaddr, m3_mem->paddr);
m3_mem->vaddr = NULL;
m3_mem->size = 0;
}
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
@ -2959,7 +2960,11 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
ath11k_core_qmi_firmware_ready(ab);
ret = ath11k_core_qmi_firmware_ready(ab);
if (ret) {
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
}
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
}
@ -3025,3 +3030,8 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_qmi_deinit_service);
void ath11k_qmi_free_resource(struct ath11k_base *ab)
{
ath11k_qmi_free_target_mem_chunk(ab);
ath11k_qmi_m3_free(ab);
}

Просмотреть файл

@ -492,5 +492,6 @@ void ath11k_qmi_event_work(struct work_struct *work);
void ath11k_qmi_msg_recv_work(struct work_struct *work);
void ath11k_qmi_deinit_service(struct ath11k_base *ab);
int ath11k_qmi_init_service(struct ath11k_base *ab);
void ath11k_qmi_free_resource(struct ath11k_base *ab);
#endif

Просмотреть файл

@ -1445,7 +1445,7 @@ struct hal_rx_desc_ipq8074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
u8 msdu_payload[0];
u8 msdu_payload[];
} __packed;
struct hal_rx_desc_qcn9074 {
@ -1464,7 +1464,7 @@ struct hal_rx_desc_qcn9074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
u8 msdu_payload[0];
u8 msdu_payload[];
} __packed;
struct hal_rx_desc_wcn6855 {
@ -1483,7 +1483,7 @@ struct hal_rx_desc_wcn6855 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
u8 msdu_payload[0];
u8 msdu_payload[];
} __packed;
struct hal_rx_desc {

Просмотреть файл

@ -107,7 +107,7 @@ struct spectral_search_fft_report {
__le32 info1;
__le32 info2;
__le32 reserve0;
u8 bins[0];
u8 bins[];
} __packed;
struct ath11k_spectral_search_report {

Просмотреть файл

@ -144,6 +144,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_twt_add_dialog_event) },
};
#define PRIMAP(_hw_mode_) \
@ -3085,11 +3087,12 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
/* TODO add MBSSID support */
cmd->mbss_support = 0;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_TWT_ENABLE_CMDID);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
} else {
ar->twt_enabled = 1;
}
return ret;
}
@ -3114,11 +3117,181 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_TWT_DISABLE_CMDID);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
} else {
ar->twt_enabled = 0;
}
return ret;
}
int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
struct wmi_twt_add_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_add_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
cmd->wake_intvl_us = params->wake_intvl_us;
cmd->wake_intvl_mantis = params->wake_intvl_mantis;
cmd->wake_dura_us = params->wake_dura_us;
cmd->sp_offset_us = params->sp_offset_us;
cmd->flags = params->twt_cmd;
if (params->flag_bcast)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
if (params->flag_trigger)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
if (params->flag_flow_type)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
if (params->flag_protection)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
cmd->flags);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to add twt dialog: %d",
ret);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
struct wmi_twt_del_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_del_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi delete twt dialog vdev %u dialog id %u\n",
cmd->vdev_id, cmd->dialog_id);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to delete twt dialog: %d",
ret);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
struct wmi_twt_pause_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_pause_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi pause twt dialog vdev %u dialog id %u\n",
cmd->vdev_id, cmd->dialog_id);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to pause twt dialog: %d",
ret);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
struct wmi_twt_resume_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_resume_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_TWT_RESUME_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
cmd->sp_offset_us = params->sp_offset_us;
cmd->next_twt_size = params->next_twt_size;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
cmd->next_twt_size);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to resume twt dialog: %d",
ret);
dev_kfree_skb(skb);
}
return ret;
}
@ -7532,6 +7705,66 @@ ath11k_wmi_diag_event(struct ath11k_base *ab,
trace_ath11k_wmi_diag(ab, skb->data, skb->len);
}
static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
{
switch (status) {
case WMI_ADD_TWT_STATUS_OK:
return "ok";
case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
return "twt disabled";
case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
return "dialog id in use";
case WMI_ADD_TWT_STATUS_INVALID_PARAM:
return "invalid parameters";
case WMI_ADD_TWT_STATUS_NOT_READY:
return "not ready";
case WMI_ADD_TWT_STATUS_NO_RESOURCE:
return "resource unavailable";
case WMI_ADD_TWT_STATUS_NO_ACK:
return "no ack";
case WMI_ADD_TWT_STATUS_NO_RESPONSE:
return "no response";
case WMI_ADD_TWT_STATUS_DENIED:
return "denied";
case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
fallthrough;
default:
return "unknown error";
}
}
static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_twt_add_dialog_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
"failed to parse wmi twt add dialog status event tlv: %d\n",
ret);
return;
}
ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
goto exit;
}
if (ev->status)
ath11k_warn(ab,
"wmi add twt dialog event vdev %d dialog id %d status %s\n",
ev->vdev_id, ev->dialog_id,
ath11k_wmi_twt_add_dialog_event_status(ev->status));
exit:
kfree(tb);
}
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@ -7629,11 +7862,17 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
case WMI_TWT_ADD_DIALOG_EVENTID:
ath11k_wmi_twt_add_dialog_event(ab, skb);
break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_TWT_DEL_DIALOG_EVENTID:
case WMI_TWT_PAUSE_DIALOG_EVENTID:
case WMI_TWT_RESUME_DIALOG_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
@ -7798,6 +8037,59 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar)
return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
struct ath11k_fw_dbglog *dbglog)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_debug_log_config_cmd_fixed_param *cmd;
struct sk_buff *skb;
struct wmi_tlv *tlv;
int ret, len;
len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->dbg_log_param = dbglog->param;
tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
switch (dbglog->param) {
case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
cmd->value = dbglog->value;
break;
case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
cmd->value = dbglog->value;
memcpy(tlv->value, module_id_bitmap,
MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
/* clear current config to be used for next user config */
memset(module_id_bitmap, 0,
MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
break;
default:
dev_kfree_skb(skb);
return -EINVAL;
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_DBGLOG_CFG_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_connect(struct ath11k_base *ab)
{
u32 i;

Просмотреть файл

@ -12,6 +12,7 @@
struct ath11k_base;
struct ath11k;
struct ath11k_fw_stats;
struct ath11k_fw_dbglog;
#define PSOC_HOST_MAX_NUM_SS (8)
@ -4952,6 +4953,112 @@ struct wmi_twt_disable_params_cmd {
u32 pdev_id;
} __packed;
enum WMI_HOST_TWT_COMMAND {
WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
WMI_HOST_TWT_COMMAND_DEMAND_TWT,
WMI_HOST_TWT_COMMAND_TWT_GROUPING,
WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
WMI_HOST_TWT_COMMAND_DICTATE_TWT,
WMI_HOST_TWT_COMMAND_REJECT_TWT,
};
#define WMI_TWT_ADD_DIALOG_FLAG_BCAST BIT(8)
#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER BIT(9)
#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE BIT(10)
#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION BIT(11)
struct wmi_twt_add_dialog_params_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 dialog_id;
u32 wake_intvl_us;
u32 wake_intvl_mantis;
u32 wake_dura_us;
u32 sp_offset_us;
u32 flags;
} __packed;
struct wmi_twt_add_dialog_params {
u32 vdev_id;
u8 peer_macaddr[ETH_ALEN];
u32 dialog_id;
u32 wake_intvl_us;
u32 wake_intvl_mantis;
u32 wake_dura_us;
u32 sp_offset_us;
u8 twt_cmd;
u8 flag_bcast;
u8 flag_trigger;
u8 flag_flow_type;
u8 flag_protection;
} __packed;
enum wmi_twt_add_dialog_status {
WMI_ADD_TWT_STATUS_OK,
WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
WMI_ADD_TWT_STATUS_INVALID_PARAM,
WMI_ADD_TWT_STATUS_NOT_READY,
WMI_ADD_TWT_STATUS_NO_RESOURCE,
WMI_ADD_TWT_STATUS_NO_ACK,
WMI_ADD_TWT_STATUS_NO_RESPONSE,
WMI_ADD_TWT_STATUS_DENIED,
WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
};
struct wmi_twt_add_dialog_event {
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 dialog_id;
u32 status;
} __packed;
struct wmi_twt_del_dialog_params {
u32 vdev_id;
u8 peer_macaddr[ETH_ALEN];
u32 dialog_id;
} __packed;
struct wmi_twt_del_dialog_params_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 dialog_id;
} __packed;
struct wmi_twt_pause_dialog_params {
u32 vdev_id;
u8 peer_macaddr[ETH_ALEN];
u32 dialog_id;
} __packed;
struct wmi_twt_pause_dialog_params_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 dialog_id;
} __packed;
struct wmi_twt_resume_dialog_params {
u32 vdev_id;
u8 peer_macaddr[ETH_ALEN];
u32 dialog_id;
u32 sp_offset_us;
u32 next_twt_size;
} __packed;
struct wmi_twt_resume_dialog_params_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 dialog_id;
u32 sp_offset_us;
u32 next_twt_size;
} __packed;
struct wmi_obss_spatial_reuse_params_cmd {
u32 tlv_header;
u32 pdev_id;
@ -5240,6 +5347,21 @@ struct wmi_rfkill_state_change_ev {
u32 radio_state;
} __packed;
enum wmi_debug_log_param {
WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
};
struct wmi_debug_log_config_cmd_fixed_param {
u32 tlv_header;
u32 dbg_log_param;
u32 value;
} __packed;
#define WMI_MAX_MEM_REQS 32
#define MAX_RADIOS 3
@ -5546,6 +5668,14 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
int ath11k_wmi_simulate_radar(struct ath11k *ar);
int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
struct wmi_twt_add_dialog_params *params);
int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
struct wmi_twt_del_dialog_params *params);
int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
struct wmi_twt_pause_dialog_params *params);
int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
struct wmi_twt_resume_dialog_params *params);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
@ -5582,4 +5712,6 @@ int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
int ath11k_wmi_wow_enable(struct ath11k *ar);
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]);
int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
struct ath11k_fw_dbglog *dbglog);
#endif

Просмотреть файл

@ -1217,6 +1217,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
static const struct usb_device_id ath6kl_usb_ids[] = {
{USB_DEVICE(0x0cf3, 0x9375)},
{USB_DEVICE(0x0cf3, 0x9374)},
{USB_DEVICE(0x04da, 0x390d)},
{ /* Terminating entry */ },
};

Просмотреть файл

@ -1750,7 +1750,6 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
{
u16 ap_info_entry_size;
struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
struct wmi_ap_info_v1 *ap_info_v1;
u8 index;
@ -1759,14 +1758,12 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->ap_list_ver != APLIST_VER1)
return -EINVAL;
ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
ath6kl_dbg(ATH6KL_DBG_WMI,
"number of APs in aplist event: %d\n", ev->num_ap);
if (len < (int) (sizeof(struct wmi_aplist_event) +
(ev->num_ap - 1) * ap_info_entry_size))
if (len < struct_size(ev, ap_list, ev->num_ap))
return -EINVAL;
/* AP list version 1 contents */
@ -1959,21 +1956,15 @@ static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
{
struct sk_buff *skb;
struct wmi_start_scan_cmd *sc;
s8 size;
int i, ret;
size = sizeof(struct wmi_start_scan_cmd);
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
if (num_chan)
size += sizeof(u16) * (num_chan - 1);
skb = ath6kl_wmi_get_new_buf(size);
skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
@ -2008,7 +1999,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct wmi_begin_scan_cmd *sc;
s8 size, *supp_rates;
s8 *supp_rates;
int i, band, ret;
struct ath6kl *ar = wmi->parent_dev;
int num_rates;
@ -2023,18 +2014,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
num_chan, ch_list);
}
size = sizeof(struct wmi_begin_scan_cmd);
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
if (num_chan)
size += sizeof(u16) * (num_chan - 1);
skb = ath6kl_wmi_get_new_buf(size);
skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;

Просмотреть файл

@ -863,7 +863,7 @@ struct wmi_begin_scan_cmd {
u8 num_ch;
/* channels in Mhz */
__le16 ch_list[1];
__le16 ch_list[];
} __packed;
/* wmi_start_scan_cmd is to be deprecated. Use
@ -889,7 +889,7 @@ struct wmi_start_scan_cmd {
u8 num_ch;
/* channels in Mhz */
__le16 ch_list[1];
__le16 ch_list[];
} __packed;
/*
@ -1373,7 +1373,7 @@ struct wmi_channel_list_reply {
u8 num_ch;
/* channel in Mhz */
__le16 ch_list[1];
__le16 ch_list[];
} __packed;
/* List of Events (target to host) */
@ -1545,7 +1545,7 @@ struct wmi_connect_event {
u8 beacon_ie_len;
u8 assoc_req_len;
u8 assoc_resp_len;
u8 assoc_info[1];
u8 assoc_info[];
} __packed;
/* Disconnect Event */
@ -1596,7 +1596,7 @@ struct wmi_disconnect_event {
u8 disconn_reason;
u8 assoc_resp_len;
u8 assoc_info[1];
u8 assoc_info[];
} __packed;
/*
@ -1637,7 +1637,7 @@ struct bss_bias {
struct bss_bias_info {
u8 num_bss;
struct bss_bias bss_bias[0];
struct bss_bias bss_bias[];
} __packed;
struct low_rssi_scan_params {
@ -1720,7 +1720,7 @@ struct wmi_neighbor_info {
struct wmi_neighbor_report_event {
u8 num_neighbors;
struct wmi_neighbor_info neighbor[0];
struct wmi_neighbor_info neighbor[];
} __packed;
/* TKIP MIC Error Event */
@ -1957,7 +1957,7 @@ union wmi_ap_info {
struct wmi_aplist_event {
u8 ap_list_ver;
u8 num_ap;
union wmi_ap_info ap_list[1];
union wmi_ap_info ap_list[];
} __packed;
/* Developer Commands */
@ -2051,7 +2051,7 @@ struct wmi_get_keepalive_cmd {
struct wmi_set_appie_cmd {
u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
u8 ie_len;
u8 ie_info[0];
u8 ie_info[];
} __packed;
struct wmi_set_ie_cmd {
@ -2059,7 +2059,7 @@ struct wmi_set_ie_cmd {
u8 ie_field; /* enum wmi_ie_field_type */
u8 ie_len;
u8 reserved;
u8 ie_info[0];
u8 ie_info[];
} __packed;
/* Notify the WSC registration status to the target */
@ -2127,7 +2127,7 @@ struct wmi_add_wow_pattern_cmd {
u8 filter_list_id;
u8 filter_size;
u8 filter_offset;
u8 filter[0];
u8 filter[];
} __packed;
struct wmi_del_wow_pattern_cmd {
@ -2360,7 +2360,7 @@ struct wmi_send_action_cmd {
__le32 freq;
__le32 wait;
__le16 len;
u8 data[0];
u8 data[];
} __packed;
struct wmi_send_mgmt_cmd {
@ -2369,7 +2369,7 @@ struct wmi_send_mgmt_cmd {
__le32 wait;
__le32 no_cck;
__le16 len;
u8 data[0];
u8 data[];
} __packed;
struct wmi_tx_status_event {
@ -2389,7 +2389,7 @@ struct wmi_set_appie_extended_cmd {
u8 role_id;
u8 mgmt_frm_type;
u8 ie_len;
u8 ie_info[0];
u8 ie_info[];
} __packed;
struct wmi_remain_on_chnl_event {
@ -2406,18 +2406,18 @@ struct wmi_cancel_remain_on_chnl_event {
struct wmi_rx_action_event {
__le32 freq;
__le16 len;
u8 data[0];
u8 data[];
} __packed;
struct wmi_p2p_capabilities_event {
__le16 len;
u8 data[0];
u8 data[];
} __packed;
struct wmi_p2p_rx_probe_req_event {
__le32 freq;
__le16 len;
u8 data[0];
u8 data[];
} __packed;
#define P2P_FLAG_CAPABILITIES_REQ (0x00000001)
@ -2431,7 +2431,7 @@ struct wmi_get_p2p_info {
struct wmi_p2p_info_event {
__le32 info_req_flags;
__le16 len;
u8 data[0];
u8 data[];
} __packed;
struct wmi_p2p_capabilities {
@ -2450,7 +2450,7 @@ struct wmi_p2p_probe_response_cmd {
__le32 freq;
u8 destination_addr[ETH_ALEN];
__le16 len;
u8 data[0];
u8 data[];
} __packed;
/* Extended WMI (WMIX)

Просмотреть файл

@ -1071,8 +1071,9 @@ struct ath_softc {
#endif
#ifdef CONFIG_ATH9K_HWRNG
struct hwrng rng_ops;
u32 rng_last;
struct task_struct *rng_task;
char rng_name[sizeof("ath9k_65535")];
#endif
};

Просмотреть файл

@ -43,7 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *info)
{
struct ath_mci_profile_info *entry;
u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
static const u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
(info->type == MCI_GPM_COEX_PROFILE_VOICE))

Просмотреть файл

@ -21,11 +21,6 @@
#include "hw.h"
#include "ar9003_phy.h"
#define ATH9K_RNG_BUF_SIZE 320
#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 10) >> 5) /* quality: 10/32 */
static DECLARE_WAIT_QUEUE_HEAD(rng_queue);
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
int i, j;
@ -71,61 +66,56 @@ static u32 ath9k_rng_delay_get(u32 fail_stats)
return delay;
}
static int ath9k_rng_kthread(void *data)
static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
int bytes_read;
struct ath_softc *sc = data;
u32 *rng_buf;
u32 delay, fail_stats = 0;
struct ath_softc *sc = container_of(rng, struct ath_softc, rng_ops);
u32 fail_stats = 0, word;
int bytes_read = 0;
rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
if (!rng_buf)
goto out;
while (!kthread_should_stop()) {
bytes_read = ath9k_rng_data_read(sc, rng_buf,
ATH9K_RNG_BUF_SIZE);
if (unlikely(!bytes_read)) {
delay = ath9k_rng_delay_get(++fail_stats);
wait_event_interruptible_timeout(rng_queue,
kthread_should_stop(),
msecs_to_jiffies(delay));
continue;
for (;;) {
if (max & ~3UL)
bytes_read = ath9k_rng_data_read(sc, buf, max >> 2);
if ((max & 3UL) && ath9k_rng_data_read(sc, &word, 1)) {
memcpy(buf + bytes_read, &word, max & 3UL);
bytes_read += max & 3UL;
memzero_explicit(&word, sizeof(word));
}
if (!wait || !max || likely(bytes_read) || fail_stats > 110)
break;
fail_stats = 0;
/* sleep until entropy bits under write_wakeup_threshold */
add_hwgenerator_randomness((void *)rng_buf, bytes_read,
ATH9K_RNG_ENTROPY(bytes_read));
msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
}
kfree(rng_buf);
out:
sc->rng_task = NULL;
return 0;
if (wait && !bytes_read && max)
bytes_read = -EIO;
return bytes_read;
}
void ath9k_rng_start(struct ath_softc *sc)
{
static atomic_t serial = ATOMIC_INIT(0);
struct ath_hw *ah = sc->sc_ah;
if (sc->rng_task)
if (sc->rng_ops.read)
return;
if (!AR_SREV_9300_20_OR_LATER(ah))
return;
sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng");
if (IS_ERR(sc->rng_task))
sc->rng_task = NULL;
snprintf(sc->rng_name, sizeof(sc->rng_name), "ath9k_%u",
(atomic_inc_return(&serial) - 1) & U16_MAX);
sc->rng_ops.name = sc->rng_name;
sc->rng_ops.read = ath9k_rng_read;
sc->rng_ops.quality = 320;
if (devm_hwrng_register(sc->dev, &sc->rng_ops))
sc->rng_ops.read = NULL;
}
void ath9k_rng_stop(struct ath_softc *sc)
{
if (sc->rng_task) {
kthread_stop(sc->rng_task);
sc->rng_task = NULL;
if (sc->rng_ops.read) {
devm_hwrng_unregister(sc->dev, &sc->rng_ops);
sc->rng_ops.read = NULL;
}
}

Просмотреть файл

@ -458,7 +458,6 @@ struct ar9170 {
# define CARL9170_HWRNG_CACHE_SIZE CARL9170_MAX_CMD_PAYLOAD_LEN
struct {
struct hwrng rng;
bool initialized;
char name[30 + 1];
u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
unsigned int cache_idx;

Просмотреть файл

@ -149,7 +149,7 @@ struct carl9170fw_fix_entry {
struct carl9170fw_fix_desc {
struct carl9170fw_desc_head head;
struct carl9170fw_fix_entry data[0];
struct carl9170fw_fix_entry data[];
} __packed;
#define CARL9170FW_FIX_DESC_SIZE \
(sizeof(struct carl9170fw_fix_desc))

Просмотреть файл

@ -1412,7 +1412,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
GFP_ATOMIC);
GFP_KERNEL);
if (!tid_info)
return -ENOMEM;
@ -1494,7 +1494,7 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
if (!(ar->features & CARL9170_WPS_BUTTON))
return 0;
input = input_allocate_device();
input = devm_input_allocate_device(&ar->udev->dev);
if (!input)
return -ENOMEM;
@ -1512,10 +1512,8 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
err = input_register_device(input);
if (err) {
input_free_device(input);
if (err)
return err;
}
ar->wps.pbc = input;
return 0;
@ -1539,7 +1537,7 @@ static int carl9170_rng_get(struct ar9170 *ar)
BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
if (!IS_ACCEPTING_CMD(ar))
return -EAGAIN;
count = ARRAY_SIZE(ar->rng.cache);
@ -1585,14 +1583,6 @@ static int carl9170_rng_read(struct hwrng *rng, u32 *data)
return sizeof(u16);
}
static void carl9170_unregister_hwrng(struct ar9170 *ar)
{
if (ar->rng.initialized) {
hwrng_unregister(&ar->rng.rng);
ar->rng.initialized = false;
}
}
static int carl9170_register_hwrng(struct ar9170 *ar)
{
int err;
@ -1603,25 +1593,14 @@ static int carl9170_register_hwrng(struct ar9170 *ar)
ar->rng.rng.data_read = carl9170_rng_read;
ar->rng.rng.priv = (unsigned long)ar;
if (WARN_ON(ar->rng.initialized))
return -EALREADY;
err = hwrng_register(&ar->rng.rng);
err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
if (err) {
dev_err(&ar->udev->dev, "Failed to register the random "
"number generator (%d)\n", err);
return err;
}
ar->rng.initialized = true;
err = carl9170_rng_get(ar);
if (err) {
carl9170_unregister_hwrng(ar);
return err;
}
return 0;
return carl9170_rng_get(ar);
}
#endif /* CONFIG_CARL9170_HWRNG */
@ -1914,7 +1893,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
WARN_ON(!(tx_streams >= 1 && tx_streams <=
IEEE80211_HT_MCS_TX_MAX_STREAMS));
tx_params = (tx_streams - 1) <<
tx_params |= (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
@ -1937,7 +1916,8 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
if (!bands)
return -EINVAL;
ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
ar->survey = devm_kcalloc(&ar->udev->dev, chans,
sizeof(struct survey_info), GFP_KERNEL);
if (!ar->survey)
return -ENOMEM;
ar->num_channels = chans;
@ -1964,11 +1944,7 @@ int carl9170_register(struct ar9170 *ar)
struct ath_regulatory *regulatory = &ar->common.regulatory;
int err = 0, i;
if (WARN_ON(ar->mem_bitmap))
return -EINVAL;
ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
if (!ar->mem_bitmap)
return -ENOMEM;
@ -2057,17 +2033,6 @@ void carl9170_unregister(struct ar9170 *ar)
carl9170_debugfs_unregister(ar);
#endif /* CONFIG_CARL9170_DEBUGFS */
#ifdef CONFIG_CARL9170_WPC
if (ar->wps.pbc) {
input_unregister_device(ar->wps.pbc);
ar->wps.pbc = NULL;
}
#endif /* CONFIG_CARL9170_WPC */
#ifdef CONFIG_CARL9170_HWRNG
carl9170_unregister_hwrng(ar);
#endif /* CONFIG_CARL9170_HWRNG */
carl9170_cancel_worker(ar);
cancel_work_sync(&ar->restart_work);
@ -2082,12 +2047,6 @@ void carl9170_free(struct ar9170 *ar)
kfree_skb(ar->rx_failover);
ar->rx_failover = NULL;
bitmap_free(ar->mem_bitmap);
ar->mem_bitmap = NULL;
kfree(ar->survey);
ar->survey = NULL;
mutex_destroy(&ar->mutex);
ieee80211_free_hw(ar->hw);

Просмотреть файл

@ -327,7 +327,7 @@ struct _carl9170_tx_superdesc {
struct _carl9170_tx_superframe {
struct _carl9170_tx_superdesc s;
struct _ar9170_tx_hwdesc f;
u8 frame_data[0];
u8 frame_data[];
} __packed __aligned(4);
#define CARL9170_TX_SUPERDESC_LEN 24

Просмотреть файл

@ -667,14 +667,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
* 0x8000 or 0x0 set, this is not a supported regulatory
* domain but since we have more than one user with it we
* need a solution for them. We default to 0x64, which is
* the default Atheros world regulatory domain.
* 0x8000 set, this is not a supported regulatory domain
* but since we have more than one user with it we need
* a solution for them. We default to 0x64, which is the
* default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
if (reg->current_rd != COUNTRY_ERD_FLAG)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;

Просмотреть файл

@ -108,7 +108,7 @@ struct fft_sample_ath10k {
u8 avgpwr_db;
u8 max_exp;
u8 data[0];
u8 data[];
} __packed;
struct fft_sample_ath11k {
@ -123,7 +123,7 @@ struct fft_sample_ath11k {
__be32 tsf;
__be32 noise;
u8 data[0];
u8 data[];
} __packed;
#endif /* SPECTRAL_COMMON_H */

Просмотреть файл

@ -394,7 +394,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
struct ieee80211_supported_band *band;
struct ieee80211_channel *channel;
struct ieee80211_channel *channel = NULL;
unsigned long flags;
int i, j;
@ -1391,11 +1391,11 @@ static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
spin_unlock_irqrestore(&wcn->survey_lock, flags);
wcn36xx_dbg(WCN36XX_DBG_MAC,
"ch %d rssi %d snr %d noise %d filled %x freq %d\n",
HW_VALUE_CHANNEL(survey->channel->hw_value),
chan_survey->rssi, chan_survey->snr, survey->noise,
survey->filled, survey->channel->center_freq);
wcn36xx_dbg(WCN36XX_DBG_MAC,
"ch %d rssi %d snr %d noise %d filled %x freq %d\n",
HW_VALUE_CHANNEL(survey->channel->hw_value),
chan_survey->rssi, chan_survey->snr, survey->noise,
survey->filled, survey->channel->center_freq);
return 0;
}
@ -1583,6 +1583,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
of_device_is_compatible(iris_node, "qcom,wcn3660b"))
wcn->rf_id = RF_IRIS_WCN3660;
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);

Просмотреть файл

@ -3347,7 +3347,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
case WCN36XX_HAL_SCAN_OFFLOAD_IND:
msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
msg_ind = kmalloc(struct_size(msg_ind, msg, len), GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
msg_header->msg_type);

Просмотреть файл

@ -376,8 +376,8 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
status.band, status.freq);
wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
status.band, status.freq);
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];

Просмотреть файл

@ -97,6 +97,7 @@ enum wcn36xx_ampdu_state {
#define RF_UNKNOWN 0x0000
#define RF_IRIS_WCN3620 0x3620
#define RF_IRIS_WCN3660 0x3660
#define RF_IRIS_WCN3680 0x3680
static inline void buff_to_be(u32 *buf, size_t len)

Просмотреть файл

@ -4623,7 +4623,7 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
s32 pktflags[] = {
static const s32 pktflags[] = {
BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG

Просмотреть файл

@ -704,6 +704,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
{
switch (ci->pub.chip) {
case BRCM_CC_4345_CHIP_ID:
case BRCM_CC_43454_CHIP_ID:
return 0x198000;
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
@ -1401,6 +1402,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
case BRCM_CC_43454_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
fallthrough;

Просмотреть файл

@ -248,7 +248,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));

Просмотреть файл

@ -1068,7 +1068,7 @@ struct brcmf_mkeep_alive_pkt_le {
__le32 period_msec;
__le16 len_bytes;
u8 keep_alive_id;
u8 data[0];
u8 data[];
} __packed;
#endif /* FWIL_TYPES_H_ */

Просмотреть файл

@ -651,6 +651,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_43454_CHIP_ID, 0x00000040, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),

Просмотреть файл

@ -15,7 +15,7 @@
struct brcmf_xtlv {
u16 id;
u16 len;
u8 data[0];
u8 data[];
};
enum brcmf_xtlv_option {

Просмотреть файл

@ -32,6 +32,7 @@
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345
#define BRCM_CC_43454_CHIP_ID 43454
#define BRCM_CC_43465_CHIP_ID 43465
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_43525_CHIP_ID 43525

Просмотреть файл

@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-prph.h"
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 69
#define IWL_22000_UCODE_API_MAX 72
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@ -39,6 +40,7 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
@ -119,8 +121,6 @@
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@ -224,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 1024, \
.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@ -285,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 1024, \
.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@ -299,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
}, \
}, \
.mon_dbgi_regs = { \
.write_ptr = { \
.addr = DBGI_SRAM_FIFO_POINTERS, \
.mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
}, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
@ -385,6 +391,21 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
.base_params = &iwl_ax210_base_params,
.umac_prph_offset = 0x300000,
.integrated = true,
.low_latency_xtal = true,
.xtal_latency = 12000,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
.imr_enabled = true,
};
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@ -476,6 +497,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
@ -816,6 +838,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
.fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
.uhb_supported = false,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
.fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
.uhb_supported = false,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
.uhb_supported = true,
@ -830,6 +866,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
.fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
.uhb_supported = false,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,

Просмотреть файл

@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
return ret;
}
static void iwlagn_mac_stop(struct ieee80211_hw *hw)

Просмотреть файл

@ -48,6 +48,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(IWLWIFI);
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search.

Просмотреть файл

@ -3,7 +3,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2018, 2020 Intel Corporation
* Copyright(c) 2018, 2020-2021 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@ -915,7 +915,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
len += 1 + 2;
copylen += 1 + 2;
new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
new_data = kmalloc(struct_size(new_data, data, len), GFP_ATOMIC);
if (new_data) {
new_data->length = len;
new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
@ -1015,8 +1015,7 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_get_cmd_string(priv->trans,
iwl_cmd_id(pkt->hdr.cmd,
0, 0)),
WIDE_ID(0, pkt->hdr.cmd)),
pkt->hdr.cmd);
}
}

Просмотреть файл

@ -4,6 +4,7 @@
* Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@ -19,6 +20,30 @@ const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
0xDD, 0x26, 0xB5, 0xFD);
IWL_EXPORT_SYMBOL(iwl_rfi_guid);
static const struct dmi_system_id dmi_ppag_approved_list[] = {
{ .ident = "HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
},
},
{ .ident = "SAMSUNG",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
},
},
{ .ident = "MSFT",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
{ .ident = "ASUS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
},
},
{}
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
{
@ -242,7 +267,7 @@ found:
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
struct iwl_tas_config_cmd_v3 *cmd)
union iwl_tas_config_cmd *cmd, int fw_ver)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@ -268,10 +293,18 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
ACPI_WTAS_ENABLE_IEC_POS;
u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
cmd->override_tas_iec = cpu_to_le16(override_iec);
cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
if (fw_ver <= 3) {
cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
} else {
cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
cmd->v4.override_tas_iec = (u8)override_iec;
cmd->v4.enable_tas_iec = (u8)enabled_iec;
}
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@ -297,7 +330,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
cmd->block_list_size = cpu_to_le32(block_list_size);
cmd->v4.block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
@ -319,7 +352,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
cmd->block_list_array[i] = cpu_to_le32(country);
cmd->v4.block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@ -529,8 +562,8 @@ IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
bool enabled;
int ret, tbl_rev;
u32 flags;
u8 num_chains, num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
@ -596,7 +629,8 @@ read_table:
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
enabled = !!(wifi_pkg->package.elements[1].integer.value);
flags = wifi_pkg->package.elements[1].integer.value;
fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS;
/* position of the actual table */
table = &wifi_pkg->package.elements[2];
@ -604,7 +638,8 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
flags & IWL_SAR_ENABLE_MSK,
num_chains, num_sub_bands);
out_free:
kfree(data);
@ -962,3 +997,181 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands = 0;
int idx = 2;
fwrt->ppag_flags = 0;
data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
/* try to read ppag table rev 2 or 1 (both have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev == 1 || tbl_rev == 2) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
"Reading PPAG table v2 (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
ret = -EINVAL;
goto out_free;
}
}
/* try to read ppag table revision 0 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n");
goto read_table;
}
read_table:
fwrt->ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
if (flags->type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
if (!fwrt->ppag_flags) {
ret = 0;
goto out_free;
}
/*
* read, verify gain values and save them into the PPAG table.
* first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
* following sub-bands to High-Band (5GHz).
*/
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
union acpi_object *ent;
ent = &wifi_pkg->package.elements[idx++];
if (ent->type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
if ((j == 0 &&
(fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
(j != 0 &&
(fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
fwrt->ppag_flags = 0;
ret = -EINVAL;
goto out_free;
}
}
}
ret = 0;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
int *cmd_size)
{
u8 cmd_ver;
int i, j, num_sub_bands;
s8 *gain;
if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(fwrt,
"PPAG capability not supported by FW, command not sent.\n");
return -EINVAL;
}
if (!fwrt->ppag_flags) {
IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
return -EINVAL;
}
/* The 'flags' field is the same in v1 and in v2 so we can just
* use v1 to access it.
*/
cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d but FW supports v1, sending truncated table\n",
fwrt->ppag_ver);
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else if (cmd_ver == 2 || cmd_ver == 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
if (fwrt->ppag_ver == 0) {
IWL_DEBUG_RADIO(fwrt,
"PPAG table is v1 but FW supports v2, sending padded table\n");
} else if (cmd_ver == 2 && fwrt->ppag_ver == 2) {
IWL_DEBUG_RADIO(fwrt,
"PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
}
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
gain[i * num_sub_bands + j] =
fwrt->ppag_chains[i].subbands[j];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
i, j, gain[i * num_sub_bands + j]);
}
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
{
if (!dmi_check_system(dmi_ppag_approved_list)) {
IWL_DEBUG_RADIO(fwrt,
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
fwrt->ppag_flags = 0;
return false;
}
return true;
}
IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);

Просмотреть файл

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@ -77,6 +77,8 @@
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
#define ACPI_WTAS_USA_UHB_MSK BIT(16)
#define ACPI_WTAS_USA_UHB_POS 16
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
@ -89,6 +91,11 @@
#define ACPI_PPAG_MAX_LB 24
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
#define ACPI_PPAG_MASK 3
#define IWL_PPAG_ETSI_MASK BIT(0)
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1
/*
* The profile for revision 2 is a superset of revision 1, which is in
@ -126,7 +133,8 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_6E = 3,
DSM_FUNC_11AX_ENABLEMENT = 6,
DSM_FUNC_ENABLE_UNII4_CHAN = 7,
DSM_FUNC_ACTIVATE_CHANNEL = 8
DSM_FUNC_ACTIVATE_CHANNEL = 8,
DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
};
enum iwl_dsm_values_srd {
@ -213,10 +221,17 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
struct iwl_tas_config_cmd_v3 *cmd);
union iwl_tas_config_cmd *cmd, int fw_ver);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
int *cmd_size);
bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@ -294,7 +309,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
struct iwl_tas_config_cmd_v3 *cmd)
union iwl_tas_config_cmd *cmd, int fw_ver)
{
return -ENOENT;
}
@ -304,6 +319,22 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt
return 0;
}
static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd, int *cmd_size)
{
return -ENOENT;
}
static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
{
return false;
}
#endif /* CONFIG_ACPI */
static inline union acpi_object *

Просмотреть файл

@ -322,14 +322,6 @@ enum iwl_legacy_cmds {
*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/**
* @DC2DC_CONFIG_CMD:
* Set/Get DC2DC frequency tune
* Command is &struct iwl_dc2dc_config_cmd,
* response is &struct iwl_dc2dc_config_resp
*/
DC2DC_CONFIG_CMD = 0x83,
/**
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
*/
@ -608,6 +600,11 @@ enum iwl_system_subcmd_ids {
* @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
*/
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
/**
* @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
*/
RFI_DEACTIVATE_NOTIF = 0xff,
};
#endif /* __iwl_fw_api_commands_h__ */

Просмотреть файл

@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
/**
* struct iwl_dc2dc_config_cmd - configure dc2dc values
*
* (DC2DC_CONFIG_CMD = 0x83)
*
* Set/Get & configure dc2dc values.
* The command always returns the current dc2dc values.
*
* @flags: set/get dc2dc
* @enable_low_power_mode: not used.
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_cmd {
__le32 flags;
__le32 enable_low_power_mode; /* not used */
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
*
* Current dc2dc values returned by the FW.
*
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_resp {
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
#endif /* __iwl_fw_api_config_h__ */

Просмотреть файл

@ -42,7 +42,7 @@ enum iwl_data_path_subcmd_ids {
RFH_QUEUE_CONFIG_CMD = 0xD,
/**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
*/
TLC_MNG_CONFIG_CMD = 0xF,
@ -57,6 +57,20 @@ enum iwl_data_path_subcmd_ids {
*/
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
* @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
* blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
* command, and &struct iwl_rx_baid_cfg_resp as a response.
*/
RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
/**
* @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
* command, uses &struct iwl_scd_queue_cfg_cmd and the response
* is (same as before) &struct iwl_tx_queue_cfg_rsp.
*/
SCD_QUEUE_CONFIG_CMD = 0x17,
/**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
@ -257,4 +271,136 @@ struct iwl_rlc_config_cmd {
u8 reserved[3];
} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
/**
* enum iwl_rx_baid_action - BAID allocation/config action
* @IWL_RX_BAID_ACTION_ADD: add a new BAID session
* @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
* @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
*/
enum iwl_rx_baid_action {
IWL_RX_BAID_ACTION_ADD,
IWL_RX_BAID_ACTION_MODIFY,
IWL_RX_BAID_ACTION_REMOVE,
}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
/**
* struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
* @sta_id_mask: station ID mask
* @tid: the TID for this session
* @reserved: reserved
* @ssn: the starting sequence number
* @win_size: RX BA session window size
*/
struct iwl_rx_baid_cfg_cmd_alloc {
__le32 sta_id_mask;
u8 tid;
u8 reserved[3];
__le16 ssn;
__le16 win_size;
} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
/**
* struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
* @old_sta_id_mask: old station ID mask
* @new_sta_id_mask: new station ID mask
* @tid: TID of the BAID
*/
struct iwl_rx_baid_cfg_cmd_modify {
__le32 old_sta_id_mask;
__le32 new_sta_id_mask;
__le32 tid;
} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
/**
* struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
* @baid: the BAID to remove
*/
struct iwl_rx_baid_cfg_cmd_remove_v1 {
__le32 baid;
} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
/**
* struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
* @sta_id_mask: the station mask of the BAID to remove
* @tid: the TID of the BAID to remove
*/
struct iwl_rx_baid_cfg_cmd_remove {
__le32 sta_id_mask;
__le32 tid;
} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
/**
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
* @action: the action, from &enum iwl_rx_baid_action
*/
struct iwl_rx_baid_cfg_cmd {
__le32 action;
union {
struct iwl_rx_baid_cfg_cmd_alloc alloc;
struct iwl_rx_baid_cfg_cmd_modify modify;
struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
struct iwl_rx_baid_cfg_cmd_remove remove;
}; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_rx_baid_cfg_resp - BAID allocation response
* @baid: the allocated BAID
*/
struct iwl_rx_baid_cfg_resp {
__le32 baid;
}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
/**
* enum iwl_scd_queue_cfg_operation - scheduler queue operation
* @IWL_SCD_QUEUE_ADD: allocate a new queue
* @IWL_SCD_QUEUE_REMOVE: remove a queue
* @IWL_SCD_QUEUE_MODIFY: modify a queue
*/
enum iwl_scd_queue_cfg_operation {
IWL_SCD_QUEUE_ADD = 0,
IWL_SCD_QUEUE_REMOVE = 1,
IWL_SCD_QUEUE_MODIFY = 2,
};
/**
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
* @u.add.sta_mask: station mask
* @u.add.tid: TID
* @u.add.reserved: reserved
* @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
* %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
* @u.add.cb_size: size code
* @u.add.bc_dram_addr: byte-count table IOVA
* @u.add.tfdq_dram_addr: TFD queue IOVA
* @u.remove.queue: queue ID for removal
* @u.modify.sta_mask: new station mask for modify
* @u.modify.queue: queue ID to modify
*/
struct iwl_scd_queue_cfg_cmd {
__le32 operation;
union {
struct {
__le32 sta_mask;
u8 tid;
u8 reserved[3];
__le32 flags;
__le32 cb_size;
__le64 bc_dram_addr;
__le64 tfdq_dram_addr;
} __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
struct {
__le32 queue;
} __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
struct {
__le32 sta_mask;
__le32 queue;
} __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
} __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
#endif /* __iwl_fw_api_datapath_h__ */

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@ -11,7 +11,8 @@
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
/**
* struct iwl_fw_ini_hcmd
@ -249,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
* struct iwl_fw_ini_conf_tlv - preset configuration TLV
* struct iwl_fw_ini_addr_val - Address and value to set it to
*
* @address: the base address
* @value: value to set at address
*/
struct iwl_fw_ini_addr_val {
__le32 address;
@ -475,6 +475,7 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@ -482,6 +483,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
};
/**
@ -496,4 +498,31 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
};
/**
* enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
*
* @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
};
/**
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
* @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
IWL_FW_INI_DUMP_BRIEF,
IWL_FW_INI_DUMP_MEDIUM,
IWL_FW_INI_DUMP_VERBOSE,
};
#endif

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -42,6 +42,12 @@ enum iwl_debug_cmds {
* &struct iwl_buf_alloc_cmd
*/
BUFFER_ALLOCATION = 0x8,
/**
* @FW_DUMP_COMPLETE_CMD:
* sends command to fw once dump collection completed
* &struct iwl_dbg_dump_complete_cmd
*/
FW_DUMP_COMPLETE_CMD = 0xB,
/**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
__le32 enabled_severities;
} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
/**
* struct iwl_dbg_dump_complete_cmd - dump complete cmd
*
* @tp: timepoint whose dump has completed
* @tp_data: timepoint data
*/
struct iwl_dbg_dump_complete_cmd {
__le32 tp;
__le32 tp_data;
} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_debug_h__ */

Просмотреть файл

@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
/**
* @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
*/
CANCEL_CHANNEL_SWITCH_CMD = 0x6,
/**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
* @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
*/
CHANNEL_SWITCH_START_NOTIF = 0xFF,
/**
*@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
*/
CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
};
#define IWL_P2P_NOA_DESC_COUNT (2)
@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
#define CS_ERR_COUNT_ERROR BIT(0)
#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
#define CS_ERR_LONG_TX_BLOCK BIT(2)
#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
/**
* struct iwl_channel_switch_error_notif - Channel switch error notification
*
* @mac_id: the mac for which the ucode sends the notification for
* @csa_err_mask: mask of channel switch error that can occur
*/
struct iwl_channel_switch_error_notif {
__le32 mac_id;
__le32 csa_err_mask;
} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
/**
* struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
*
* @mac_id: the mac that should cancel the channel switch
*/
struct iwl_cancel_channel_switch_cmd {
__le32 mac_id;
} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
/**
* struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
*

Просмотреть файл

@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
#define MAX_HE_CHANNEL_BW_INDX 4
#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
/**
* struct iwl_he_pkt_ext - QAM thresholds
* struct iwl_he_pkt_ext_v1 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
} __packed; /* PKT_EXT_DOT11AX_API_S */
struct iwl_he_pkt_ext_v1 {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
* struct iwl_he_pkt_ext_v2 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
* Each entry contains 2 QAM thresholds for 8us and 16us:
* 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
* QAM_tx < QAM_th1 --> PPE=0us
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
* QAM_th2 <= QAM_tx --> PPE=16us
* @pkt_ext_qam_th: QAM thresholds
* For each Nss/Bw define 2 QAM thrsholds (0..5)
* For rates below the low_th, no need for PPE
* For rates between low_th and high_th, need 8us PPE
* For rates equal or higher then the high_th, need 16us PPE
* Nss (0-siso, 1-mimo2) x
* BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
* enum iwl_he_sta_ctxt_flags - HE STA context flags
@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
* @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
* of threshold
* @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
* @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
* extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
*/
enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
};
/**
@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext pkt_ext;
struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
/**
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
* struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
struct iwl_he_sta_context_cmd {
struct iwl_he_sta_context_cmd_v2 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext pkt_ext;
struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@ -642,6 +676,81 @@ struct iwl_he_sta_context_cmd {
u8 reserved4[3];
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
* struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
* @reserved1: reserved byte for future use
* @reserved2: reserved byte for future use
* @flags: see %iwl_11ax_sta_ctxt_flags
* @ref_bssid_addr: reference BSSID used by the AP
* @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
* @htc_flags: which features are supported in HTC
* @frag_flags: frag support in A-MSDU
* @frag_level: frag support level
* @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
* @frag_min_size: min frag size (except last frag)
* @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @htc_trig_based_pkt_ext: default PE in 4us units
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
* @puncture_mask: puncture mask for EHT
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
* @max_bssid_indicator: indicator of the max bssid supported on the associated
* bss
* @bssid_index: index of the associated VAP
* @ema_ap: AP supports enhanced Multi BSSID advertisement
* @profile_periodicity: number of Beacon periods that are needed to receive the
* complete VAPs info
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
struct iwl_he_sta_context_cmd_v3 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
u8 reserved2;
__le32 flags;
/* The below fields are set via Multiple BSSID IE */
u8 ref_bssid_addr[6];
__le16 reserved0;
/* The below fields are set via HE-capabilities IE */
__le32 htc_flags;
u8 frag_flags;
u8 frag_level;
u8 frag_max_num;
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext_v2 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
u8 htc_trig_based_pkt_ext;
__le16 frame_time_rts_th;
/* Random access parameter set (i.e. RAPS) */
u8 rand_alloc_ecwmin;
u8 rand_alloc_ecwmax;
__le16 puncture_mask;
/* The below fields are set via MU EDCA parameter set element */
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
u8 max_bssid_indicator;
u8 bssid_index;
u8 ema_ap;
u8 profile_periodicity;
u8 bssid_count;
u8 reserved4[3];
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
* @bssid: the BSSID to sniff for

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -419,6 +419,30 @@ struct iwl_tas_config_cmd_v3 {
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
* @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
__le32 block_list_size;
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
union iwl_tas_config_cmd {
struct iwl_tas_config_cmd_v2 v2;
struct iwl_tas_config_cmd_v3 v3;
struct iwl_tas_config_cmd_v4 v4;
};
/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
@ -514,6 +538,32 @@ struct iwl_lari_config_change_cmd_v5 {
__le32 chan_state_active_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
/**
* struct iwl_lari_config_change_cmd_v6 - change LARI configuration
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
* different predefined FW config operation.
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
* @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
* per country, one to indicate whether to override and the other to
* indicate the value to use.
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
* per country, one to indicate whether to override and the other to
* indicate allow/disallow unii4 channels.
* @chan_state_active_bitmap: Bitmap for overriding channel state to active.
* Each bit represents a country or region to activate, according to the BIOS
* definitions.
* @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
* Each bit represents a set of channels in a specific band that should be disabled
*/
struct iwl_lari_config_change_cmd_v6 {
__le32 config_bitmap;
__le32 oem_uhb_allow_bitmap;
__le32 oem_11ax_allow_bitmap;
__le32 oem_unii4_allow_bitmap;
__le32 chan_state_active_bitmap;
__le32 force_disable_channels_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status

Просмотреть файл

@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
/**
* struct ct_kill_notif - CT-kill entry notification
* This structure represent both versions of this notification.
*
* @temperature: the current temperature in celsius
* @reserved: reserved
* @dts: only in v2: DTS that trigger the CT Kill bitmap:
* bit 0: ToP master
* bit 1: PA chain A master
* bit 2: PA chain B master
* bit 3: ToP slave
* bit 4: PA chain A slave
* bit 5: PA chain B slave)
* bits 6,7: reserved (set to 0)
* @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
*/
struct ct_kill_notif {
__le16 temperature;
__le16 reserved;
} __packed; /* GRP_PHY_CT_KILL_NTF */
u8 dts;
u8 scheme;
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum ctdp_cmd_operation - CTDP command operations

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -340,7 +340,7 @@ struct iwl_dev_tx_power_cmd_v5 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
/**
* struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
* struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6
* @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
@ -360,6 +360,28 @@ struct iwl_dev_tx_power_cmd_v6 {
__le32 timer_period;
} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
/**
* struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7
* @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
* @per_chain_restriction_changed: is per_chain_restriction has changed
* from last command. used if set_mode is
* IWL_TX_POWER_MODE_SET_SAR_TIMER.
* note: if not changed, the command is used for keep alive only.
* @reserved: reserved (padding)
* @timer_period: timer in milliseconds. if expires FW will change to default
* BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
* @flags: reduce power flags.
*/
struct iwl_dev_tx_power_cmd_v7 {
__le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
u8 enable_ack_reduction;
u8 per_chain_restriction_changed;
u8 reserved[2];
__le32 timer_period;
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
@ -375,6 +397,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v4 v4;
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
struct iwl_dev_tx_power_cmd_v7 v7;
};
};

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020 Intel Corporation
* Copyright (C) 2020-2021 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
__le32 status;
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
*
* @reason: used only for a log message
*/
struct iwl_rfi_deactivate_notif {
__le32 reason;
} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
#endif /* __iwl_fw_api_rfi_h__ */

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@ -133,7 +133,7 @@ enum IWL_TLC_MCS_PER_BW {
};
/**
* struct tlc_config_cmd - TLC configuration
* struct iwl_tlc_config_cmd_v3 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
@ -168,7 +168,7 @@ struct iwl_tlc_config_cmd_v3 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
* struct tlc_config_cmd - TLC configuration
* struct iwl_tlc_config_cmd_v4 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@ -296,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
* @ttl: time to live - packet lifetime limit. The FW should drop if
* passed.
* @reserved: reserved
* @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen3 {
@ -306,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
__le32 offload_assist;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
__le64 ttl;
u8 reserved[8];
struct ieee80211_hdr hdr[];
} __packed; /* TX_CMD_API_S_VER_8,
TX_CMD_API_S_VER_10 */

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2019-2020 Intel Corporation
* Copyright (C) 2005-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
#define IWL_CMD_QUEUE_SIZE 32

Просмотреть файл

@ -12,7 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
#include "iwl-fh.h"
/**
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
*
@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
iwl_trans_release_nic_access(fwrt->trans);
}
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
struct iwl_prph_range {
u32 start, end;
};
@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data {
static int
iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1175,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1195,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
@ -1204,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
idx++;
if (!fwrt->trans->trans_cfg->gen2)
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@ -1220,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1239,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1307,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1442,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1509,7 +1506,7 @@ out:
static int
iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
@ -1528,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_special_device_memory *special_mem =
@ -1549,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1561,8 +1558,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->range_data_size = reg->dev_addr.size;
iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
@ -1579,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
@ -1598,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, u32 range_len, int idx)
{
/* read the IMR memory and DMA it to SRAM */
struct iwl_fw_ini_error_dump_range *range = range_ptr;
u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
range->range_data_size = cpu_to_le32(size_to_dump);
if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
imr_curr_addr, size_to_dump)) {
IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
return -1;
}
fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
size_to_dump);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static void *
iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
@ -1677,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@ -1688,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@ -1696,10 +1718,21 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
&fwrt->trans->cfg->mon_smem_regs);
}
static void *
iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
&fwrt->trans->cfg->mon_dbgi_regs);
}
static void *
iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_err_table_dump *dump = data;
@ -1713,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_special_device_memory *dump = data;
@ -1725,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->data;
}
static void *
iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
return dump->data;
}
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@ -1784,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
return 1;
}
static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
/* range is total number of pages need to copied from
*IMR memory to SRAM and later from SRAM to DRAM
*/
u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
IWL_DEBUG_INFO(fwrt,
"WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
imr_enable, imr_size, sram_size);
return 0;
}
return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
}
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@ -1861,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 size = le32_to_cpu(reg->dev_addr.size);
u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
if (!size || !ranges)
return 0;
return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
(size + sizeof(struct iwl_fw_ini_error_dump_range));
}
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@ -1948,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
static u32
iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
u32 size = 0;
u32 ranges = 0;
u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
IWL_DEBUG_INFO(fwrt,
"WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
imr_enable, imr_size, sram_size);
return size;
}
size = imr_size;
ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
if (!size && !ranges) {
IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
return 0;
}
size += sizeof(struct iwl_fw_ini_error_dump) +
ranges * sizeof(struct iwl_fw_ini_error_dump_range);
return size;
}
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@ -1964,10 +2070,10 @@ struct iwl_dump_ini_mem_ops {
struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data);
void *data, u32 data_len);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range, int idx);
void *range, u32 range_len, int idx);
};
/**
@ -1990,24 +2096,53 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_fw_ini_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
u32 type = reg->type;
u32 id = le32_to_cpu(reg->id);
u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
u32 num_of_ranges, i, size;
void *range;
u8 *range;
u32 free_size;
u64 header_size;
u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
/*
* The higher part of the ID from 2 is irrelevant for
* us, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) >= 2)
id &= IWL_FW_INI_REGION_V2_MASK;
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
dump_policy, id, type);
if (le32_to_cpu(reg->hdr.version) >= 2) {
u32 dp = le32_get_bits(reg->id,
IWL_FW_INI_REGION_DUMP_POLICY_MASK);
if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
!(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
IWL_DEBUG_FW(fwrt,
"WRT: no dump - type %d and policy mismatch=%d\n",
dump_policy, dp);
return 0;
} else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
IWL_DEBUG_FW(fwrt,
"WRT: no dump - type %d and policy mismatch=%d\n",
dump_policy, dp);
return 0;
} else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
!(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
IWL_DEBUG_FW(fwrt,
"WRT: no dump - type %d and policy mismatch=%d\n",
dump_policy, dp);
return 0;
}
}
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
!ops->fill_range) {
IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
return 0;
}
size = ops->get_size(fwrt, reg_data);
if (!size)
if (size < sizeof(*header)) {
IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
return 0;
}
entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
@ -2022,9 +2157,6 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
tlv->reserved = reg->reserved;
tlv->len = cpu_to_le32(size);
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
type);
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
@ -2033,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
range = ops->fill_mem_hdr(fwrt, reg_data, header);
free_size = size;
range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
@ -2041,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
goto out_err;
}
header_size = range - (u8 *)header;
if (WARN(header_size > free_size,
"header size %llu > free_size %d",
header_size, free_size)) {
IWL_ERR(fwrt,
"WRT: fill_mem_hdr used more than given free_size\n");
goto out_err;
}
free_size -= header_size;
for (i = 0; i < num_of_ranges; i++) {
int range_size = ops->fill_range(fwrt, reg_data, range, i);
int range_size = ops->fill_range(fwrt, reg_data, range,
free_size, i);
if (range_size < 0) {
IWL_ERR(fwrt,
@ -2050,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
id, type);
goto out_err;
}
if (WARN(range_size > free_size, "range_size %d > free_size %d",
range_size, free_size)) {
IWL_ERR(fwrt,
"WRT: fill_raged used more than given free_size\n");
goto out_err;
}
free_size -= range_size;
range = range + range_size;
}
@ -2240,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_csr_iter,
},
[IWL_FW_INI_REGION_DRAM_IMR] = {},
[IWL_FW_INI_REGION_DRAM_IMR] = {
.get_num_of_ranges = iwl_dump_ini_imr_ranges,
.get_size = iwl_dump_ini_imr_get_size,
.fill_mem_hdr = iwl_dump_ini_imr_fill_header,
.fill_range = iwl_dump_ini_imr_iter,
},
[IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
@ -2255,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
[IWL_FW_INI_REGION_DBGI_SRAM] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.get_size = iwl_dump_ini_mon_dbgi_get_size,
.fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
.fill_range = iwl_dump_ini_dbgi_sram_iter,
},
};
@ -2270,6 +2430,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data reg_data = {
.dump_data = dump_data,
};
struct iwl_dump_ini_region_data imr_reg_data = {
.dump_data = dump_data,
};
int i;
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
@ -2305,10 +2468,32 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
/*
* DRAM_IMR can be collected only for FW/HW error timepoint
* when fw is not alive. In addition, it must be collected
* lastly as it overwrites SRAM that can possibly contain
* debug data which also need to be collected.
*/
if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
else
IWL_INFO(fwrt,
"WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
tp_id);
/* continue to next region */
continue;
}
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
/* collect DRAM_IMR region in the last */
if (imr_reg_data.reg_tlv)
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size)
size += iwl_dump_ini_info(fwrt, trigger, list);
@ -2444,7 +2629,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
struct list_head dump_list = LIST_HEAD_INIT(dump_list);
LIST_HEAD(dump_list);
struct scatterlist *sg_dump_data;
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
@ -2589,7 +2774,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
@ -2685,6 +2870,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data)
{
struct iwl_dbg_dump_complete_cmd hcmd_data;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
.data[0] = &hcmd_data,
.len[0] = sizeof(hcmd_data),
};
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
hcmd_data.tp = cpu_to_le32(timepoint);
hcmd_data.tp_data = cpu_to_le32(timepoint_data);
iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
}
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
@ -2693,10 +2900,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
u32 policy;
u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
if (!dump_data->trig) {
IWL_ERR(fwrt, "dump trigger data is not set\n");
goto out;
}
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
@ -2719,6 +2932,13 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
policy = le32_to_cpu(dump_data->trig->apply_policy);
time_point = le32_to_cpu(dump_data->trig->time_point);
if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
}
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
@ -2777,10 +2997,10 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
"WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
tp_id, (u32)(delay / USEC_PER_MSEC));
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
@ -2795,9 +3015,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
*/
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
if (fwrt->ops && fwrt->ops->dump_start)
fwrt->ops->dump_start(fwrt->ops_ctx);
iwl_fw_dbg_collect_sync(fwrt, wks->idx);

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data);
#endif /* __iwl_fw_dbg_h__ */

Просмотреть файл

@ -150,7 +150,7 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
{
struct iwl_dbg_host_event_cfg_cmd event_cfg;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0),
.id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
.flags = CMD_ASYNC,
.data[0] = &event_cfg,
.len[0] = sizeof(event_cfg),
@ -358,7 +358,7 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
ver = &fw->ucode_capa.cmd_versions[state->pos];
cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
cmd_id = WIDE_ID(ver->group, ver->cmd);
seq_printf(seq, " 0x%04x:\n", cmd_id);
seq_printf(seq, " name: %s\n",

Просмотреть файл

@ -119,7 +119,7 @@ enum iwl_ucode_tlv_type {
struct iwl_ucode_tlv {
__le32 type; /* see above */
__le32 length; /* not including type/length fields */
u8 data[0];
u8 data[];
};
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
@ -310,7 +310,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
@ -368,6 +367,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* reset flow
* @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@ -386,7 +387,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
@ -419,6 +419,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@ -453,6 +454,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
@ -514,34 +516,6 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_SHARED_CLK = BIT(31),
};
#define IWL_UCODE_MAX_CS 1
/**
* struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
* @cipher: a cipher suite selector
* @flags: cipher scheme flags (currently reserved for a future use)
* @hdr_len: a size of MPDU security header
* @pn_len: a size of PN
* @pn_off: an offset of pn from the beginning of the security header
* @key_idx_off: an offset of key index byte in the security header
* @key_idx_mask: a bit mask of key_idx bits
* @key_idx_shift: bit shift needed to get key_idx
* @mic_len: mic length in bytes
* @hw_cipher: a HW cipher index used in host commands
*/
struct iwl_fw_cipher_scheme {
__le32 cipher;
u8 flags;
u8 hdr_len;
u8 pn_len;
u8 pn_off;
u8 key_idx_off;
u8 key_idx_mask;
u8 key_idx_shift;
u8 mic_len;
u8 hw_cipher;
} __packed;
enum iwl_fw_dbg_reg_operator {
CSR_ASSIGN,
CSR_SETBIT,

Просмотреть файл

@ -2,13 +2,16 @@
/*
* Copyright(c) 2019 - 2021 Intel Corporation
*/
#include <fw/api/commands.h>
#include "img.h"
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
/* prior to LONG_GROUP, we never used this CMD version API */
u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
u8 cmd = iwl_cmd_opcode(cmd_id);
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)

Просмотреть файл

@ -133,16 +133,6 @@ struct iwl_fw_paging {
u32 fw_offs;
};
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
* @cs: cipher scheme entries
*/
struct iwl_fw_cscheme_list {
u8 size;
struct iwl_fw_cipher_scheme cs[];
} __packed;
/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
@ -197,7 +187,6 @@ struct iwl_dump_exclude {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @type: firmware type (&enum iwl_fw_type)
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
* @phy_integration_ver: PHY integration version string
@ -228,7 +217,6 @@ struct iwl_fw {
enum iwl_fw_type type;
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg dbg;
@ -275,7 +263,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);

Просмотреть файл

@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
{
struct iwl_soc_configuration_cmd cmd = {};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
.id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC,
if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);

Просмотреть файл

@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
}
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset, len);
(const u8 *)image->sec[sec_idx].data + offset, len);
block->fw_offs = image->sec[sec_idx].offset + offset;
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
.len = { sizeof(paging_cmd), },
.data = { &paging_cmd, },
};

Просмотреть файл

@ -33,7 +33,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
@ -47,7 +47,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@ -70,7 +70,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
sha1 = le32_to_cpup((__le32 *)data);
sha1 = le32_to_cpup((const __le32 *)data);
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
if (hw_match)
break;
mac_type = le16_to_cpup((__le16 *)data);
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
mac_type = le16_to_cpup((const __le16 *)data);
rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
struct iwl_pnvm_section *section = (void *)data;
const struct iwl_pnvm_section *section = (const void *)data;
u32 data_len = tlv_len - sizeof(*section);
IWL_DEBUG_FW(trans,
@ -107,7 +107,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
tlv_len);
/* TODO: remove, this is a deprecated separator */
if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
IWL_DEBUG_FW(trans, "Ignoring separator.\n");
break;
}
@ -173,7 +173,7 @@ out:
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
@ -181,7 +181,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@ -193,8 +193,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
struct iwl_sku_id *sku_id =
(void *)(data + sizeof(*tlv));
const struct iwl_sku_id *sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",

Просмотреть файл

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@ -16,7 +16,7 @@
#include "fw/acpi.h"
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
@ -163,6 +163,7 @@ struct iwl_fw_runtime {
u32 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
u8 reduced_power_flags;
#endif
};

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
else
cmd.id = SHARED_MEM_CFG;

Просмотреть файл

@ -69,7 +69,7 @@ out:
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
const u8 *data, size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
u8 *reduce_power_data = NULL, *tmp;
u32 size = 0;
@ -79,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@ -154,7 +154,7 @@ out:
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
void *sec_data;
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
@ -163,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@ -175,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
struct iwl_sku_id *sku_id =
(void *)(data + sizeof(*tlv));
const struct iwl_sku_id *sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",

Просмотреть файл

@ -260,6 +260,7 @@ enum iwl_cfg_trans_ltr_delay {
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@ -274,7 +275,8 @@ struct iwl_cfg_trans_params {
integrated:1,
low_latency_xtal:1,
bisr_workaround:1,
ltr_delay:2;
ltr_delay:2,
imr_enabled:1;
};
/**
@ -343,8 +345,8 @@ struct iwl_fw_mon_regs {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
* supports 256 BA aggregation
* @min_ba_txq_size: minimum number of slots required in a TX queue which
* based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
@ -405,9 +407,10 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 gp2_reg_addr;
u32 min_256_ba_txq_size;
u32 min_ba_txq_size;
const struct iwl_fw_mon_regs mon_dram_regs;
const struct iwl_fw_mon_regs mon_smem_regs;
const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@ -433,6 +436,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
@ -489,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const char iwl9162_name[];
@ -509,6 +514,7 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@ -631,9 +637,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018, 2020-2021 Intel Corporation
* Copyright (C) 2018, 2020-2022 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@ -34,6 +34,7 @@ enum iwl_prph_scratch_mtr_format {
/**
* enum iwl_prph_scratch_flags - PRPH scratch control flags
* @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
* @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
* @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
* in hwm config.
@ -55,6 +56,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),

Просмотреть файл

@ -534,6 +534,9 @@ enum {
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
/* This register is common for Tx and Rx, Rx queues start from 512 */
#define HBUS_TARG_WRPTR_Q_SHIFT (16)
#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
/**********************************************************
* CSR values

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
if (!node)
return -ENOMEM;
memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
return 0;
@ -181,11 +182,11 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
* The higher part of the ID in from version 2 is irrelevant for
* us, so mask it out.
* The higher part of the ID from version 2 is debug policy.
* The id will be only lsb 16 bits, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) >= 2)
id &= IWL_FW_INI_REGION_V2_MASK;
id &= IWL_FW_INI_REGION_ID_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EOPNOTSUPP;
}
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
trans->dbg.imr_data.sram_addr =
le32_to_cpu(reg->internal_buffer.base_addr);
trans->dbg.imr_data.sram_size =
le32_to_cpu(reg->internal_buffer.size);
}
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
@ -271,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
u32 tp = le32_to_cpu(conf_set->time_point);
u32 type = le32_to_cpu(conf_set->set_type);
@ -460,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
@ -577,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
@ -762,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
{
int ret, i, dram_alloc = 0;
struct iwl_dram_info dram_info;
int ret, i;
bool dram_alloc = false;
struct iwl_dram_data *frags =
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
struct iwl_dram_info *dram_info;
if (!frags || !frags->block)
return;
dram_info = frags->block;
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
return;
dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
if (!ret)
dram_alloc++;
dram_alloc = true;
else
IWL_WARN(fwrt,
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
i, ret);
}
if (dram_alloc) {
memcpy(frags->block, &dram_info, sizeof(dram_info));
IWL_DEBUG_FW(fwrt, "block data after %016x\n",
*((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
}
if (dram_alloc)
IWL_DEBUG_FW(fwrt, "block data after %08x\n",
dram_info->first_word);
else
memset(frags->block, 0, sizeof(*dram_info));
}
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
@ -811,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
}
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
struct list_head *config_list)
struct list_head *conf_list)
{
struct iwl_dbg_tlv_node *node;
list_for_each_entry(node, config_list, list) {
list_for_each_entry(node, conf_list, list) {
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
u32 count, address, value;
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
@ -861,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
struct iwl_dbgc1_info dram_info = {};
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
__le64 dram_base_addr = cpu_to_le64(frags->physical);
__le32 dram_size = cpu_to_le32(frags->size);
u64 dram_addr = le64_to_cpu(dram_base_addr);
__le64 dram_base_addr;
__le32 dram_size;
u64 dram_addr;
u32 ret;
if (!frags)
break;
dram_base_addr = cpu_to_le64(frags->physical);
dram_size = cpu_to_le32(frags->size);
dram_addr = le64_to_cpu(dram_base_addr);
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
dram_base_addr, dram_size);
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
@ -10,6 +10,8 @@
#include <fw/file.h>
#include <fw/api/dbg-tlv.h>
#define IWL_DBG_TLV_MAX_PRESET 15
/**
* struct iwl_dbg_tlv_node - debug TLV node
* @list: list of &struct iwl_dbg_tlv_node

Просмотреть файл

@ -243,14 +243,14 @@ struct iwl_firmware_pieces {
/* FW debug data parsed for driver usage */
bool dbg_dest_tlv_init;
u8 *dbg_dest_ver;
const u8 *dbg_dest_ver;
union {
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
};
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
@ -324,29 +324,6 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
}
static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
{
int i, j;
struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
struct iwl_fw_cipher_scheme *fwcs;
if (len < sizeof(*l) ||
len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
return -EINVAL;
for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
fwcs = &l->cs[j];
/* we skip schemes with zero cipher suite selector */
if (!fwcs->cipher)
continue;
fw->cs[j++] = *fwcs;
}
return 0;
}
/*
* Gets uCode section from tlv.
*/
@ -356,13 +333,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
{
struct fw_img_parsing *img;
struct fw_sec *sec;
struct fw_sec_parsing *sec_parse;
const struct fw_sec_parsing *sec_parse;
size_t alloc_size;
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
return -1;
sec_parse = (struct fw_sec_parsing *)data;
sec_parse = (const struct fw_sec_parsing *)data;
img = &pieces->img[type];
@ -385,8 +362,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
{
struct iwl_tlv_calib_data *def_calib =
(struct iwl_tlv_calib_data *)data;
const struct iwl_tlv_calib_data *def_calib =
(const struct iwl_tlv_calib_data *)data;
u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
if (ucode_type >= IWL_UCODE_TYPE_MAX) {
IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
@ -404,7 +381,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
const struct iwl_ucode_api *ucode_api = (void *)data;
const struct iwl_ucode_api *ucode_api = (const void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i;
@ -425,7 +402,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
const struct iwl_ucode_capa *ucode_capa = (void *)data;
const struct iwl_ucode_capa *ucode_capa = (const void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i;
@ -457,7 +434,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
{
struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
u32 api_ver, hdr_size, build;
char buildstr[25];
const u8 *src;
@ -600,7 +577,7 @@ static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
sizeof(region->special_mem))
return;
region = (void *)tlv->data;
region = (const void *)tlv->data;
addr = le32_to_cpu(region->special_mem.base_addr);
addr += le32_to_cpu(region->special_mem.offset);
addr &= ~FW_ADDR_CACHE_CONTROL;
@ -655,7 +632,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
struct iwl_ucode_capabilities *capa,
bool *usniffer_images)
{
struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
const struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
@ -704,8 +681,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
tlv_data = tlv->data;
@ -762,7 +739,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->max_probe_length =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_PAN:
if (tlv_len)
@ -783,7 +760,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
* will not work with the new firmware, or
* it'll not take advantage of new features.
*/
capa->flags = le32_to_cpup((__le32 *)tlv_data);
capa->flags = le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_API_CHANGES_SET:
if (tlv_len != sizeof(struct iwl_ucode_api))
@ -799,37 +776,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_ptr =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_size =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_errlog_ptr =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_ptr =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_size =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_errlog_ptr =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
if (tlv_len)
@ -858,7 +835,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->standard_phy_calibration_size =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@ -884,7 +861,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PHY_SKU:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
drv->fw.valid_tx_ant = (drv->fw.phy_config &
FW_PHY_CFG_TX_CHAIN) >>
FW_PHY_CFG_TX_CHAIN_POS;
@ -911,7 +888,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
num_of_cpus =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
if (num_of_cpus == 2) {
drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
@ -925,18 +902,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
break;
case IWL_UCODE_TLV_CSCHEME:
if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
goto invalid_tlv_len;
break;
case IWL_UCODE_TLV_N_SCAN_CHANNELS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->n_scan_channels =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_VERSION: {
__le32 *ptr = (void *)tlv_data;
const __le32 *ptr = (const void *)tlv_data;
u32 major, minor;
u8 local_comp;
@ -960,15 +933,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
struct iwl_fw_dbg_dest_tlv *dest = NULL;
struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
const struct iwl_fw_dbg_dest_tlv *dest = NULL;
const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
u8 mon_mode;
pieces->dbg_dest_ver = (u8 *)tlv_data;
pieces->dbg_dest_ver = (const u8 *)tlv_data;
if (*pieces->dbg_dest_ver == 1) {
dest = (void *)tlv_data;
dest = (const void *)tlv_data;
} else if (*pieces->dbg_dest_ver == 0) {
dest_v1 = (void *)tlv_data;
dest_v1 = (const void *)tlv_data;
} else {
IWL_ERR(drv,
"The version is %d, and it is invalid\n",
@ -1009,7 +982,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_CONF: {
struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
const struct iwl_fw_dbg_conf_tlv *conf =
(const void *)tlv_data;
if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
@ -1043,8 +1017,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
struct iwl_fw_dbg_trigger_tlv *trigger =
(void *)tlv_data;
const struct iwl_fw_dbg_trigger_tlv *trigger =
(const void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
@ -1075,7 +1049,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
drv->fw.dbg.dump_mask =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
@ -1087,7 +1061,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
IWL_DEBUG_FW(drv,
"Paging: paging enabled (size = %u bytes)\n",
@ -1117,8 +1091,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
/* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
(void *)tlv_data;
const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
(const void *)tlv_data;
size_t size;
struct iwl_fw_dbg_mem_seg_tlv *n;
@ -1146,10 +1120,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
struct {
const struct {
__le32 buf_addr;
__le32 buf_size;
} *recov_info = (void *)tlv_data;
} *recov_info = (const void *)tlv_data;
if (tlv_len != sizeof(*recov_info))
goto invalid_tlv_len;
@ -1160,10 +1134,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
break;
case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
struct {
const struct {
u8 version[32];
u8 sha1[20];
} *fseq_ver = (void *)tlv_data;
} *fseq_ver = (const void *)tlv_data;
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
@ -1174,19 +1148,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_NUM_STATIONS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
if (le32_to_cpup((__le32 *)tlv_data) >
if (le32_to_cpup((const __le32 *)tlv_data) >
IWL_MVM_STATION_COUNT_MAX) {
IWL_ERR(drv,
"%d is an invalid number of station\n",
le32_to_cpup((__le32 *)tlv_data));
le32_to_cpup((const __le32 *)tlv_data));
goto tlv_error;
}
capa->num_stations =
le32_to_cpup((__le32 *)tlv_data);
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
struct iwl_umac_debug_addrs *dbg_ptrs =
(void *)tlv_data;
const struct iwl_umac_debug_addrs *dbg_ptrs =
(const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@ -1201,8 +1175,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
struct iwl_lmac_debug_addrs *dbg_ptrs =
(void *)tlv_data;
const struct iwl_lmac_debug_addrs *dbg_ptrs =
(const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@ -1277,7 +1251,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
return -EINVAL;
}
@ -1418,7 +1392,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
struct iwl_ucode_header *ucode;
const struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
@ -1456,7 +1430,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
ucode = (struct iwl_ucode_header *)ucode_raw->data;
ucode = (const struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
@ -1645,6 +1619,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
mutex_lock(&iwlwifi_opmode_table_mtx);
switch (fw->type) {
case IWL_FW_DVM:
@ -1661,8 +1637,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@ -1796,6 +1770,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
kfree(drv);
}
#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
@ -1803,7 +1778,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
.enable_ini = true,
.enable_ini = ENABLE_INI,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@ -1915,10 +1890,42 @@ MODULE_PARM_DESC(nvm_file, "NVM file name");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
bool, S_IRUGO | S_IWUSR);
static int enable_ini_set(const char *arg, const struct kernel_param *kp)
{
int ret = 0;
bool res;
__u32 new_enable_ini;
/* in case the argument type is a number */
ret = kstrtou32(arg, 0, &new_enable_ini);
if (!ret) {
if (new_enable_ini > ENABLE_INI) {
pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
return -EINVAL;
}
goto out;
}
/* in case the argument type is boolean */
ret = kstrtobool(arg, &res);
if (ret)
return ret;
new_enable_ini = (res ? ENABLE_INI : 0);
out:
iwlwifi_mod_params.enable_ini = new_enable_ini;
return 0;
}
static const struct kernel_param_ops enable_ini_ops = {
.set = enable_ini_set
};
module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
MODULE_PARM_DESC(enable_ini,
"Enable debug INI TLV FW debug infrastructure (default: true");
"0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
"Debug INI TLV FW debug infrastructure (default: 16)");
/*
* set bt_coex_active to true, uCode will do kill/defer

Просмотреть файл

@ -84,7 +84,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
* everything is built-in, then we can avoid that.
*/
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
#else
#define IWL_EXPORT_SYMBOL(sym)
#endif

Просмотреть файл

@ -23,26 +23,22 @@
*/
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
"Acquired semaphore after %d tries.\n",

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@ -590,11 +590,31 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define TFD_QUEUE_BC_SIZE_GEN3 1024
#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
/* IMR DMA registers */
#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c
#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520
#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524
#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528
#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c
#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530
/* RFH S2D DMA registers */
#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c
#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002
/* TFH D2S DMA registers */
#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000
#define IMR_UREG_CHICK 0x00d05c00
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
* struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
struct iwl_gen3_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
struct iwl_gen3_bc_tbl_entry {
__le16 tfd_offset;
} __packed;
#endif /* !__iwl_fh_h__ */

Просмотреть файл

@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value = 0x5a5a5a5a;
if (iwl_trans_grab_nic_access(trans)) {
value = iwl_read32(trans, reg);
u32 value = iwl_read32(trans, reg);
iwl_trans_release_nic_access(trans);
return value;
}
return value;
return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_direct32);
@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = 0x5a5a5a5a;
if (iwl_trans_grab_nic_access(trans)) {
val = iwl_read_prph_no_grab(trans, ofs);
u32 val = iwl_read_prph_no_grab(trans, ofs);
iwl_trans_release_nic_access(trans);
return val;
}
return val;
return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше