wireless-drivers-next patches for v5.17

First set of patches for v5.17. The biggest change is the iwlmei
 driver for Intel's AMT devices. Also now WCN6855 support in ath11k
 should be usable.
 
 Major changes:
 
 ath10k
 
 * fetch (pre-)calibration data via nvmem subsystem
 
 ath11k
 
 * enable 802.11 power save mode in station mode for qca6390 and wcn6855
 
 * trace log support
 
 * proper board file detection for WCN6855 based on PCI ids
 
 * BSS color change support
 
 rtw88
 
 * add debugfs file to force lowest basic rate
 
 * add quirk to disable PCI ASPM on HP 250 G7 Notebook PC
 
 mwifiex
 
 * add quirk to disable deep sleep with certain hardware revision in
   Surface Book 2 devices
 
 iwlwifi
 
 * add iwlmei driver for co-operating with Intel's Active Management
   Technology (AMT) devices
 -----BEGIN PGP SIGNATURE-----
 
 iQFFBAABCgAvFiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmGvclcRHGt2YWxvQGtl
 cm5lbC5vcmcACgkQbhckVSbrbZtK6wgAoOoT83JKMreXlXXhVegqlJbC3HyElF5r
 DJlpDDJkJUT9airol2nd0yFfP+5WFyQPrt5shsQmqz43U4jlgfFpFXZIjQufK+gn
 VAGvVGfsanRXEFlsVgFZeSZvAEyEyNSggxADC03Ky0xtiCGc89r2o71jD3HA/ZzO
 1X8gbKH7YLWj4G/GQkrKsvIAwzoZXT7nwQSdW73M8QVzk4OSNhLBLdiqKYq0yViM
 7Ea2Vj27hiyk/RXNUZHy+bKa8vKN5sA91VHJ836aPZBQ4OLotGzF3AgHfgIhIpdr
 hI4BVJbngpjQho1EkCnZZuISPes0YQWJB5hK5xpL98yuIL4wyJRfeQ==
 =I8Dj
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2021-12-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.17

First set of patches for v5.17. The biggest change is the iwlmei
driver for Intel's AMT devices. Also now WCN6855 support in ath11k
should be usable.

Major changes:

ath10k
 * fetch (pre-)calibration data via nvmem subsystem

ath11k
 * enable 802.11 power save mode in station mode for qca6390 and wcn6855
 * trace log support
 * proper board file detection for WCN6855 based on PCI ids
 * BSS color change support

rtw88
 * add debugfs file to force lowest basic rate
 * add quirk to disable PCI ASPM on HP 250 G7 Notebook PC

mwifiex
 * add quirk to disable deep sleep with certain hardware revision in
  Surface Book 2 devices

iwlwifi
 * add iwlmei driver for co-operating with Intel's Active Management
   Technology (AMT) devices

* tag 'wireless-drivers-next-2021-12-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next: (87 commits)
  iwlwifi: mei: fix linking when tracing is not enabled
  rtlwifi: rtl8192de: Style clean-ups
  mwl8k: Use named struct for memcpy() region
  intersil: Use struct_group() for memcpy() region
  libertas_tf: Use struct_group() for memcpy() region
  libertas: Use struct_group() for memcpy() region
  wlcore: no need to initialise statics to false
  rsi: Fix out-of-bounds read in rsi_read_pkt()
  rsi: Fix use-after-free in rsi_rx_done_handler()
  brcmfmac: Configure keep-alive packet on suspend
  wilc1000: remove '-Wunused-but-set-variable' warning in chip_wakeup()
  iwlwifi: mvm: read the rfkill state and feed it to iwlmei
  iwlwifi: mvm: add vendor commands needed for iwlmei
  iwlwifi: integrate with iwlmei
  iwlwifi: mei: add debugfs hooks
  iwlwifi: mei: add the driver to allow cooperation with CSME
  mei: bus: add client dma interface
  mwifiex: Ignore BTCOEX events from the 88W8897 firmware
  mwifiex: Ensure the version string from the firmware is 0-terminated
  mwifiex: Add quirk to disable deep sleep with certain hardware revision
  ...
====================

Link: https://lore.kernel.org/r/20211207144211.A9949C341C1@smtp.kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-12-07 21:01:16 -08:00
Родитель adc76fc97b fe6db7eda9
Коммит 150791442e
103 изменённых файлов: 13385 добавлений и 5935 удалений

Просмотреть файл

@ -643,6 +643,64 @@ static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
kfree(cl_vtag);
}
void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
{
struct mei_device *bus;
struct mei_cl *cl;
int ret;
if (!cldev || !buffer_id || !size)
return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
MEI_FW_PAGE_SIZE);
return ERR_PTR(-EINVAL);
}
cl = cldev->cl;
bus = cldev->bus;
mutex_lock(&bus->device_lock);
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
goto out;
/* update pointers */
cl->cldev = cldev;
}
ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
out:
mutex_unlock(&bus->device_lock);
if (ret)
return ERR_PTR(ret);
return cl->dma.vaddr;
}
EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
{
struct mei_device *bus;
struct mei_cl *cl;
int ret;
if (!cldev)
return -EINVAL;
cl = cldev->cl;
bus = cldev->bus;
mutex_lock(&bus->device_lock);
ret = mei_cl_dma_unmap(cl, NULL);
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
mutex_unlock(&bus->device_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
/**
* mei_cldev_enable - enable me client device
* create connection with me client
@ -753,9 +811,11 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
out:
/* Flush queues and remove any pending read */
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
/* Flush queues and remove any pending read unless we have mapped DMA */
if (!cl->dma_mapped) {
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
}
mutex_unlock(&bus->device_lock);
return err;
@ -1052,6 +1112,7 @@ static void mei_cl_bus_dev_release(struct device *dev)
if (!cldev)
return;
mei_cl_flush_queues(cldev->cl, NULL);
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
mei_cl_unlink(cldev->cl);

Просмотреть файл

@ -700,6 +700,9 @@ int mei_cl_unlink(struct mei_cl *cl)
cl_dbg(dev, cl, "unlink client");
if (cl->state == MEI_FILE_UNINITIALIZED)
return 0;
if (dev->open_handle_count > 0)
dev->open_handle_count--;

Просмотреть файл

@ -22,6 +22,11 @@
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
* FW page size for DMA allocations
*/
#define MEI_FW_PAGE_SIZE 4096UL
/*
* MEI Version
*/

Просмотреть файл

@ -153,6 +153,10 @@ static void ar5523_cmd_rx_cb(struct urb *urb)
ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
return;
}
if (!cmd->odata) {
ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply");
return;
}
memcpy(cmd->odata, hdr + 1, sizeof(u32));
cmd->olen = sizeof(u32);
cmd->res = 0;

Просмотреть файл

@ -12,6 +12,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/pm_qos.h>
#include <linux/nvmem-consumer.h>
#include <asm/byteorder.h>
#include "core.h"
@ -935,7 +936,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
}
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE ||
ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM)
bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
else
bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
@ -1726,7 +1728,8 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
/* As of now pre-cal is valid for 10_4 variants */
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE ||
ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM)
bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
@ -1853,6 +1856,39 @@ out_free:
return ret;
}
static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name)
{
struct nvmem_cell *cell;
void *buf;
size_t len;
int ret;
cell = devm_nvmem_cell_get(ar->dev, cell_name);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
return ret;
}
buf = nvmem_cell_read(cell, &len);
if (IS_ERR(buf))
return PTR_ERR(buf);
if (ar->hw_params.cal_data_len != len) {
kfree(buf);
ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n",
cell_name, len, ar->hw_params.cal_data_len);
return -EMSGSIZE;
}
ret = ath10k_download_board_data(ar, buf, len);
kfree(buf);
if (ret)
ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n",
cell_name, ret);
return ret;
}
int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
struct ath10k_fw_file *fw_file)
{
@ -2087,6 +2123,18 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar)
{
int ret;
ret = ath10k_download_cal_nvmem(ar, "pre-calibration");
if (ret == 0) {
ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM;
goto success;
} else if (ret == -EPROBE_DEFER) {
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot did not find a pre-calibration nvmem-cell, try file next: %d\n",
ret);
ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
@ -2153,6 +2201,18 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"pre cal download procedure failed, try cal file: %d\n",
ret);
ret = ath10k_download_cal_nvmem(ar, "calibration");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_NVMEM;
goto done;
} else if (ret == -EPROBE_DEFER) {
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot did not find a calibration nvmem-cell, try file next: %d\n",
ret);
ret = ath10k_download_cal_file(ar, ar->cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_FILE;

Просмотреть файл

@ -877,8 +877,10 @@ enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
ATH10K_CAL_MODE_DT,
ATH10K_CAL_MODE_NVMEM,
ATH10K_PRE_CAL_MODE_FILE,
ATH10K_PRE_CAL_MODE_DT,
ATH10K_PRE_CAL_MODE_NVMEM,
ATH10K_CAL_MODE_EEPROM,
};
@ -898,10 +900,14 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "otp";
case ATH10K_CAL_MODE_DT:
return "dt";
case ATH10K_CAL_MODE_NVMEM:
return "nvmem";
case ATH10K_PRE_CAL_MODE_FILE:
return "pre-cal-file";
case ATH10K_PRE_CAL_MODE_DT:
return "pre-cal-dt";
case ATH10K_PRE_CAL_MODE_NVMEM:
return "pre-cal-nvmem";
case ATH10K_CAL_MODE_EEPROM:
return "eeprom";
}

Просмотреть файл

@ -14,6 +14,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE1: target->host HTT + HTC control */
@ -40,6 +41,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
@ -73,11 +75,12 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE8: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS,
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
@ -89,6 +92,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE10: target->host HTT */
@ -142,6 +146,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
@ -175,6 +180,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE8: target autonomous hif_memcpy */
@ -220,6 +226,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
@ -489,18 +496,32 @@ err_unlock:
return skb;
}
static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
struct sk_buff_head list;
__skb_queue_head_init(&list);
while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
dev_kfree_skb_any(skb);
continue;
}
__skb_queue_tail(&list, skb);
}
while ((skb = __skb_dequeue(&list))) {
ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->send_cb(ab, skb);
}
}
@ -636,7 +657,7 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
pipe->attr_flags = attr->flags;
if (attr->src_nentries) {
pipe->send_cb = ath11k_ce_send_done_cb;
pipe->send_cb = attr->send_cb;
nentries = roundup_pow_of_two(attr->src_nentries);
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
@ -667,9 +688,10 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
if (pipe->send_cb)
pipe->send_cb(pipe);
if (attr->src_nentries)
ath11k_ce_tx_process_cb(pipe);
if (pipe->recv_cb)
ath11k_ce_recv_process_cb(pipe);
@ -678,9 +700,10 @@ void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
pipe->send_cb(pipe);
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
ath11k_ce_tx_process_cb(pipe);
}
EXPORT_SYMBOL(ath11k_ce_per_engine_service);
@ -953,6 +976,7 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
void ath11k_ce_free_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
struct ath11k_ce_ring *ce_ring;
int desc_sz;
int i;
@ -964,22 +988,24 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
if (pipe->src_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
ce_ring = pipe->src_ring;
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->src_ring->base_addr_owner_space,
pipe->src_ring->base_addr_ce_space);
ce_ring->base_addr_owner_space_unaligned,
ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
if (pipe->dest_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
ce_ring = pipe->dest_ring;
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->dest_ring->base_addr_owner_space,
pipe->dest_ring->base_addr_ce_space);
ce_ring->base_addr_owner_space_unaligned,
ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
@ -987,11 +1013,12 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
if (pipe->status_ring) {
desc_sz =
ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
ce_ring = pipe->status_ring;
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->status_ring->base_addr_owner_space,
pipe->status_ring->base_addr_ce_space);
ce_ring->base_addr_owner_space_unaligned,
ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}

Просмотреть файл

@ -101,6 +101,7 @@ struct ce_attr {
unsigned int dest_nentries;
void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
void (*send_cb)(struct ath11k_base *, struct sk_buff *);
};
#define CE_DESC_RING_ALIGN 8
@ -154,7 +155,7 @@ struct ath11k_ce_pipe {
unsigned int buf_sz;
unsigned int rx_buf_needed;
void (*send_cb)(struct ath11k_ce_pipe *);
void (*send_cb)(struct ath11k_base *, struct sk_buff *);
void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
struct tasklet_struct intr_tq;

Просмотреть файл

@ -76,12 +76,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = true,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
.credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
.wakeup_mhi = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@ -125,12 +130,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = true,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
.credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
.wakeup_mhi = false,
},
{
.name = "qca6390 hw2.0",
@ -173,12 +183,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.wakeup_mhi = true,
},
{
.name = "qcn9074 hw1.0",
@ -221,12 +236,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = true,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = false,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.fix_l1ss = true,
.credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = true,
.alloc_cacheable_memory = true,
.wakeup_mhi = false,
},
{
.name = "wcn6855 hw2.0",
@ -269,12 +289,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.wakeup_mhi = true,
},
};
@ -392,11 +417,26 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
scnprintf(name, name_len,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath11k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id, variant);
switch (ab->id.bdf_search) {
case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
ath11k_bus_str(ab->hif.bus),
ab->id.vendor, ab->id.device,
ab->id.subsystem_vendor,
ab->id.subsystem_device,
ab->qmi.target.chip_id,
ab->qmi.target.board_id,
variant);
break;
default:
scnprintf(name, name_len,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath11k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id, variant);
break;
}
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
@ -633,7 +673,7 @@ static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
return 0;
}
#define BOARD_NAME_SIZE 100
#define BOARD_NAME_SIZE 200
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
char boardname[BOARD_NAME_SIZE];

Просмотреть файл

@ -47,6 +47,11 @@ enum ath11k_supported_bw {
ATH11K_BW_160 = 3,
};
enum ath11k_bdf_search {
ATH11K_BDF_SEARCH_DEFAULT,
ATH11K_BDF_SEARCH_BUS_AND_BOARD,
};
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
@ -240,6 +245,7 @@ struct ath11k_vif {
bool is_started;
bool is_up;
bool spectral_enabled;
bool ps;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
@ -249,6 +255,8 @@ struct ath11k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
};
@ -759,6 +767,14 @@ struct ath11k_base {
struct completion htc_suspend;
struct {
enum ath11k_bdf_search bdf_search;
u32 vendor;
u32 device;
u32 subsystem_vendor;
u32 subsystem_device;
} id;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};

Просмотреть файл

@ -87,17 +87,23 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
req_entries = min(num_free, ring->bufs_max);
num_remain = req_entries;
align = ring->buf_align;
size = sizeof(*buff) + ring->buf_sz + align - 1;
size = ring->buf_sz + align - 1;
while (num_remain > 0) {
buff = kzalloc(size, GFP_ATOMIC);
buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
if (!buff)
break;
buff->payload = kzalloc(size, GFP_ATOMIC);
if (!buff->payload) {
kfree(buff);
break;
}
ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
kfree(buff->payload);
kfree(buff);
break;
}
@ -282,7 +288,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
num_entry = ev->fixed.num_buf_release_entry;
size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
size = ring->buf_sz + ring->buf_align - 1;
num_buff_reaped = 0;
spin_lock_bh(&srng->lock);
@ -319,7 +325,8 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
ring->handler(ar, &handler_data);
}
memset(buff, 0, size);
buff->paddr = 0;
memset(buff->payload, 0, size);
ath11k_dbring_bufs_replenish(ar, ring, buff);
}
@ -346,6 +353,7 @@ void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
idr_remove(&ring->bufs_idr, buf_id);
dma_unmap_single(ar->ab->dev, buff->paddr,
ring->buf_sz, DMA_FROM_DEVICE);
kfree(buff->payload);
kfree(buff);
}

Просмотреть файл

@ -13,7 +13,7 @@
struct ath11k_dbring_element {
dma_addr_t paddr;
u8 payload[0];
u8 *payload;
};
struct ath11k_dbring_data {

Просмотреть файл

@ -17,7 +17,7 @@ void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_info(ab->dev, "%pV", &vaf);
/* TODO: Trace the log */
trace_ath11k_log_info(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_info);
@ -32,7 +32,7 @@ void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_err(ab->dev, "%pV", &vaf);
/* TODO: Trace the log */
trace_ath11k_log_err(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_err);
@ -47,7 +47,7 @@ void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_warn_ratelimited(ab->dev, "%pV", &vaf);
/* TODO: Trace the log */
trace_ath11k_log_warn(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_warn);
@ -68,7 +68,7 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
if (ath11k_debug_mask & mask)
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
/* TODO: trace log */
trace_ath11k_log_dbg(ab, mask, &vaf);
va_end(args);
}
@ -100,6 +100,10 @@ void ath11k_dbg_dump(struct ath11k_base *ab,
dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
}
}
/* tracing code doesn't like null strings */
trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "",
buf, len);
}
EXPORT_SYMBOL(ath11k_dbg_dump);

Просмотреть файл

@ -60,7 +60,8 @@ static inline void ath11k_dbg_dump(struct ath11k_base *ab,
#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
do { \
if (ath11k_debug_mask & dbg_mask) \
if ((ath11k_debug_mask & dbg_mask) || \
trace_ath11k_log_dbg_enabled()) \
__ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
} while (0)

Просмотреть файл

@ -195,7 +195,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
timeout = jiffies + msecs_to_jiffies(3 * HZ);
timeout = jiffies + msecs_to_jiffies(3 * 1000);
ath11k_debugfs_fw_stats_reset(ar);

Просмотреть файл

@ -101,8 +101,11 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
if (ring->cached)
kfree(ring->vaddr_unaligned);
else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
ring->vaddr_unaligned = NULL;
}
@ -222,6 +225,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
bool cached = false;
if (max_entries < 0 || entry_sz < 0)
return -EINVAL;
@ -230,9 +234,29 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
num_entries = max_entries;
ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
if (ab->hw_params.alloc_cacheable_memory) {
/* Allocate the reo dst and tx completion rings from cacheable memory */
switch (type) {
case HAL_REO_DST:
case HAL_WBM2SW_RELEASE:
cached = true;
break;
default:
cached = false;
}
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
}
}
if (!cached)
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
if (!ring->vaddr_unaligned)
return -ENOMEM;
@ -292,6 +316,11 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
return -EINVAL;
}
if (cached) {
params.flags |= HAL_SRNG_FLAGS_CACHED;
ring->cached = 1;
}
ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
@ -742,13 +771,12 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
const struct ath11k_hw_hal_params *hal_params;
int grp_id = irq_grp->grp_id;
int work_done = 0;
int i = 0, j;
int i, j;
int tot_work_done = 0;
while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
ath11k_dp_tx_completion_handler(ab, i);
i++;
if (ab->hw_params.ring_mask->tx[grp_id]) {
i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {

Просмотреть файл

@ -64,6 +64,7 @@ struct dp_srng {
dma_addr_t paddr;
int size;
u32 ring_id;
u8 cached;
};
struct dp_rxdma_ring {
@ -517,7 +518,8 @@ struct htt_ppdu_stats_cfg_cmd {
} __packed;
#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
enum htt_ppdu_stats_tag_type {

Просмотреть файл

@ -20,13 +20,15 @@
#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
static inline
u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
}
static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline
enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
@ -34,32 +36,34 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_bas
return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline
u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline
bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
struct sk_buff *skb)
static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@ -67,8 +71,8 @@ static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
return ieee80211_has_morefrags(hdr->frame_control);
}
static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
struct sk_buff *skb)
static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@ -76,37 +80,37 @@ static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
}
static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
__le32_to_cpu(attn->info2));
}
static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
{
return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
__le32_to_cpu(attn->info2)) ==
@ -154,68 +158,68 @@ static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
}
static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
}
static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
struct hal_rx_desc *desc)
static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
}
@ -233,14 +237,14 @@ static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
}
static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
{
return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
__le32_to_cpu(attn->info1));
}
static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
{
u8 *rx_pkt_hdr;
@ -249,8 +253,8 @@ static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
return rx_pkt_hdr;
}
static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
{
u32 tlv_tag;
@ -259,15 +263,15 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
return tlv_tag == HAL_RX_MPDU_START;
}
static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
}
static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc,
u16 len)
static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
@ -2596,36 +2600,30 @@ free_out:
static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct napi_struct *napi,
struct sk_buff_head *msdu_list,
int *quota, int ring_id)
int mac_id)
{
struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath11k *ar;
struct ieee80211_rx_status rx_status = {0};
u8 mac_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
rcu_read_lock();
if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
__skb_queue_purge(msdu_list);
return;
}
while (*quota && (msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH11K_SKB_RXCB(msdu);
mac_id = rxcb->mac_id;
ar = ab->pdevs[mac_id].ar;
if (!rcu_dereference(ab->pdevs_active[mac_id])) {
dev_kfree_skb_any(msdu);
continue;
}
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
ar = ab->pdevs[mac_id].ar;
if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
__skb_queue_purge(msdu_list);
return;
}
while ((msdu = __skb_dequeue(msdu_list))) {
ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
if (ret) {
if (unlikely(ret)) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
@ -2633,10 +2631,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
}
ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
(*quota)--;
}
rcu_read_unlock();
}
int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
@ -2645,19 +2640,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
int num_buffs_reaped[MAX_RADIOS] = {0};
struct sk_buff_head msdu_list;
struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
struct hal_srng *srng;
struct sk_buff *msdu;
int quota = budget;
bool done = false;
int buf_id, mac_id;
struct ath11k *ar;
u32 *rx_desc;
struct hal_reo_dest_ring *desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
int i;
__skb_queue_head_init(&msdu_list);
for (i = 0; i < MAX_RADIOS; i++)
__skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
@ -2666,13 +2663,11 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
ath11k_hal_srng_access_begin(ab, srng);
try_again:
while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
desc.buf_addr_info.info1);
desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@ -2681,7 +2676,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
if (unlikely(!msdu)) {
ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@ -2697,37 +2692,41 @@ try_again:
DMA_FROM_DEVICE);
num_buffs_reaped[mac_id]++;
total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc.info0);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
desc->info0);
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
desc.rx_mpdu_info.meta_data);
desc->rx_mpdu_info.meta_data);
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
desc.rx_mpdu_info.info0);
desc->rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc.info0);
desc->info0);
rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list, msdu);
__skb_queue_tail(&msdu_list[mac_id], msdu);
if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
if (rxcb->is_continuation) {
done = false;
} else {
total_msdu_reaped++;
done = true;
break;
}
if (total_msdu_reaped >= budget)
break;
}
/* Hw might have updated the head pointer after we cached it.
@ -2736,7 +2735,7 @@ try_again:
* head pointer so that we can reap complete MPDU in the current
* rx processing.
*/
if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
ath11k_hal_srng_access_end(ab, srng);
goto try_again;
}
@ -2745,25 +2744,23 @@ try_again:
spin_unlock_bh(&srng->lock);
if (!total_msdu_reaped)
if (unlikely(!total_msdu_reaped))
goto exit;
for (i = 0; i < ab->num_radios; i++) {
if (!num_buffs_reaped[i])
continue;
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
ar = ab->pdevs[i].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
ab->hw_params.hal_params->rx_buf_rbm);
}
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
&quota, ring_id);
exit:
return budget - quota;
return total_msdu_reaped;
}
static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
@ -4829,7 +4826,7 @@ static struct sk_buff *
ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u32 mac_id, struct sk_buff *head_msdu,
struct sk_buff *last_msdu,
struct ieee80211_rx_status *rxs)
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
@ -4839,12 +4836,17 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u8 *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
struct rx_attention *rx_attention;
u32 err_bitmap;
if (!head_msdu)
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
if (err_bitmap & DP_RX_MPDU_ERR_FCS)
*fcs_err = true;
if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
return NULL;
@ -4933,9 +4935,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
tail_msdu, rxs);
tail_msdu, rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
@ -4943,6 +4946,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
header = mon_skb;
rxs->flag = 0;
if (fcs_err)
rxs->flag = RX_FLAG_FAILED_FCS_CRC;
do {
skb_next = mon_skb->next;
if (!skb_next)

Просмотреть файл

@ -95,11 +95,11 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 ring_selector = 0, ring_map = 0;
bool tcl_ring_retry;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
return -ESHUTDOWN;
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@ -127,7 +127,7 @@ tcl_ring_sel:
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (ret < 0) {
if (unlikely(ret < 0)) {
if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
@ -152,7 +152,7 @@ tcl_ring_sel:
ti.meta_data_flags = arvif->tcl_metadata;
}
if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
ti.encrypt_type =
ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
@ -173,8 +173,8 @@ tcl_ring_sel:
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
@ -211,7 +211,7 @@ tcl_ring_sel:
}
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
@ -231,7 +231,7 @@ tcl_ring_sel:
ath11k_hal_srng_access_begin(ab, tcl_ring);
hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
if (!hal_tcl_desc) {
if (unlikely(!hal_tcl_desc)) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
*/
@ -245,7 +245,7 @@ tcl_ring_sel:
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) &&
if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
@ -293,20 +293,18 @@ static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
struct sk_buff *msdu;
struct ath11k_skb_cb *skb_cb;
spin_lock_bh(&tx_ring->tx_idr_lock);
msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
if (!msdu) {
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
@ -325,12 +323,13 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct ath11k_skb_cb *skb_cb;
struct ath11k *ar;
spin_lock_bh(&tx_ring->tx_idr_lock);
msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
if (!msdu) {
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
if (unlikely(!msdu)) {
ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
ts->msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
return;
}
@ -339,9 +338,6 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
ar = skb_cb->ar;
idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
@ -435,16 +431,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
dev_kfree_skb_any(msdu);
goto exit;
return;
}
if (!skb_cb->vif) {
if (unlikely(!skb_cb->vif)) {
dev_kfree_skb_any(msdu);
goto exit;
return;
}
info = IEEE80211_SKB_CB(msdu);
@ -465,7 +459,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar))) {
if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
if (ar->last_ppdu_id == 0) {
ar->last_ppdu_id = ts->ppdu_id;
@ -494,9 +488,6 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
*/
ieee80211_tx_status(ar->hw, msdu);
exit:
rcu_read_unlock();
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
@ -505,11 +496,11 @@ static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
{
ts->buf_rel_source =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
return;
if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
return;
ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
@ -556,8 +547,9 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
(ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
(ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
tx_ring->tx_status_tail))) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
@ -580,7 +572,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
ath11k_dp_tx_process_htt_tx_complete(ab,
(void *)tx_status,
mac_id, msdu_id,
@ -588,16 +580,16 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
continue;
}
spin_lock_bh(&tx_ring->tx_idr_lock);
msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
if (!msdu) {
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
spin_unlock(&tx_ring->tx_idr_lock);
continue;
}
idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
spin_unlock(&tx_ring->tx_idr_lock);
ar = ab->pdevs[mac_id].ar;
@ -903,7 +895,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
pdev_mask = 1 << (i + 1);
pdev_mask = 1 << (ar->pdev_idx + i);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);

Просмотреть файл

@ -627,6 +627,21 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL;
}
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng)
{
u32 *desc;
/* prefetch only if desc is available */
desc = ath11k_hal_srng_dst_peek(ab, srng);
if (likely(desc)) {
dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
(srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE);
prefetch(desc);
}
}
u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
struct hal_srng *srng)
{
@ -639,8 +654,15 @@ u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
srng->ring_size;
srng->u.dst_ring.tp += srng->entry_size;
/* wrap around to start of ring*/
if (srng->u.dst_ring.tp == srng->ring_size)
srng->u.dst_ring.tp = 0;
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
ath11k_hal_srng_prefetch_desc(ab, srng);
return desc;
}
@ -775,11 +797,16 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
else
} else {
srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
ath11k_hal_srng_prefetch_desc(ab, srng);
}
}
/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()

Просмотреть файл

@ -513,6 +513,7 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
#define HAL_SRNG_FLAGS_CACHED 0x20000000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)

Просмотреть файл

@ -81,6 +81,8 @@ int ath11k_htc_send(struct ath11k_htc *htc,
struct ath11k_base *ab = htc->ab;
int credits = 0;
int ret;
bool credit_flow_enabled = (ab->hw_params.credit_flow &&
ep->tx_credit_flow_enabled);
if (eid >= ATH11K_HTC_EP_COUNT) {
ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
@ -89,7 +91,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
skb_push(skb, sizeof(struct ath11k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
if (credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
@ -126,7 +128,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
if (credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
@ -203,23 +205,25 @@ static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
break;
}
switch (record->hdr.id) {
case ATH11K_HTC_RECORD_CREDITS:
len = sizeof(struct ath11k_htc_credit_report);
if (record->hdr.len < len) {
ath11k_warn(ab, "Credit report too long\n");
status = -EINVAL;
if (ab->hw_params.credit_flow) {
switch (record->hdr.id) {
case ATH11K_HTC_RECORD_CREDITS:
len = sizeof(struct ath11k_htc_credit_report);
if (record->hdr.len < len) {
ath11k_warn(ab, "Credit report too long\n");
status = -EINVAL;
break;
}
ath11k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
ath11k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
if (status)
@ -245,6 +249,29 @@ static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack)
complete(&ab->htc_suspend);
}
void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_htc *htc = &ab->htc;
struct ath11k_htc_ep *ep;
void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
if (eid >= ATH11K_HTC_EP_COUNT)
return;
ep = &htc->endpoint[eid];
spin_lock_bh(&htc->tx_lock);
ep_tx_complete = ep->ep_ops.ep_tx_complete;
spin_unlock_bh(&htc->tx_lock);
if (!ep_tx_complete) {
dev_kfree_skb_any(skb);
return;
}
ep_tx_complete(htc->ab, skb);
}
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
@ -607,6 +634,11 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc,
disable_credit_flow_ctrl = true;
}
if (!ab->hw_params.credit_flow) {
flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
conn_req->service_id);
@ -732,7 +764,10 @@ int ath11k_htc_start(struct ath11k_htc *htc)
msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
if (ab->hw_params.credit_flow)
ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
else
msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
if (status) {

Просмотреть файл

@ -83,8 +83,8 @@ enum ath11k_htc_conn_flags {
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 0x4,
ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 0x8,
};
enum ath11k_htc_conn_svc_status {
@ -116,6 +116,8 @@ struct ath11k_htc_conn_svc_resp {
u32 svc_meta_pad;
} __packed;
#define ATH11K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1)
struct ath11k_htc_setup_complete_extended {
u32 msg_id;
u32 flags;
@ -305,5 +307,6 @@ int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
struct sk_buff *skb);
void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb);
#endif

Просмотреть файл

@ -1061,8 +1061,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
.tx = {
ATH11K_TX_RING_MASK_0,
ATH11K_TX_RING_MASK_1,
ATH11K_TX_RING_MASK_2,
},
.rx_mon_status = {
0, 0, 0, 0,

Просмотреть файл

@ -170,12 +170,17 @@ struct ath11k_hw_params {
bool supports_monitor;
bool supports_shadow_regs;
bool idle_ps;
bool supports_sta_ps;
bool cold_boot_calib;
bool supports_suspend;
u32 hal_desc_sz;
bool fix_l1ss;
bool credit_flow;
u8 max_tx_ring;
const struct ath11k_hw_hal_params *hal_params;
bool supports_dynamic_smps_6ghz;
bool alloc_cacheable_memory;
bool wakeup_mhi;
};
struct ath11k_hw_ops {

Просмотреть файл

@ -775,9 +775,9 @@ static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
arg.channel.min_power = 0;
arg.channel.max_power = channel->max_power * 2;
arg.channel.max_reg_power = channel->max_reg_power * 2;
arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
arg.channel.max_power = channel->max_power;
arg.channel.max_reg_power = channel->max_reg_power;
arg.channel.max_antenna_gain = channel->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
@ -1049,6 +1049,83 @@ static int ath11k_mac_monitor_stop(struct ath11k *ar)
return 0;
}
static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
bool enable_ps;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (arvif->vif->type != NL80211_IFTYPE_STATION)
return 0;
enable_ps = arvif->ps;
if (!arvif->is_started) {
/* mac80211 can update vif powersave state while disconnected.
* Firmware doesn't behave nicely and consumes more power than
* necessary if PS is disabled on a non-started vdev. Hence
* force-enable PS for non-running vdevs.
*/
psmode = WMI_STA_PS_MODE_ENABLED;
} else if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
/* firmware doesn't like 0 */
timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
}
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
timeout);
if (ret) {
ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
if (ret) {
ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
psmode, arvif->vdev_id, ret);
return ret;
}
return 0;
}
static int ath11k_mac_config_ps(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_mac_vif_setup_ps(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret);
break;
}
}
return ret;
}
static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath11k *ar = hw->priv;
@ -1137,11 +1214,15 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
arvif->rsnie_present = true;
else
arvif->rsnie_present = false;
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
else
arvif->wpaie_present = false;
ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
@ -1154,6 +1235,26 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
return ret;
}
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
if (!vif->color_change_active && !arvif->bcca_zero_sent)
return;
if (vif->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
}
arvif->bcca_zero_sent = false;
if (vif->color_change_active)
ieee80211_beacon_update_cntdwn(vif);
ath11k_mac_setup_bcn_tmpl(arvif);
}
static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@ -2397,6 +2498,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct peer_assoc_params peer_arg;
struct ieee80211_sta *ap_sta;
struct ath11k_peer *peer;
bool is_auth = false;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@ -2418,6 +2521,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@ -2458,13 +2562,22 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
/* Authorize BSS Peer */
ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
if (peer && peer->is_authorized)
is_auth = true;
spin_unlock_bh(&ar->ab->base_lock);
if (is_auth) {
ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
}
ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&bss_conf->he_obss_pd);
@ -2805,10 +2918,17 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
"Set staggered beacon mode for VDEV: %d\n",
arvif->vdev_id);
ret = ath11k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
ret);
if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) {
ret = ath11k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
ret);
}
if (arvif->bcca_zero_sent)
arvif->do_not_send_tmpl = true;
else
arvif->do_not_send_tmpl = false;
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@ -2942,6 +3062,16 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_mac_txpower_recalc(ar);
}
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params.supports_sta_ps) {
arvif->ps = vif->bss_conf.ps;
ret = ath11k_mac_config_ps(ar);
if (ret)
ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath11k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
@ -3009,6 +3139,25 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
param_id = WMI_VDEV_PARAM_BSS_COLOR;
if (info->he_bss_color.enabled)
param_value = info->he_bss_color.color <<
IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET;
else
param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id,
param_value);
if (ret)
ath11k_warn(ar->ab,
"failed to set bss color param on vdev %i: %d\n",
arvif->vdev_id, ret);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"bss color param 0x%x set on vdev %i\n",
param_value, arvif->vdev_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
arvif->vdev_id,
@ -3316,9 +3465,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
return 0;
if (cmd == DISABLE_KEY) {
/* TODO: Check if FW expects value other than NONE for del */
/* arg.key_cipher = WMI_CIPHER_NONE; */
arg.key_len = 0;
arg.key_cipher = WMI_CIPHER_NONE;
arg.key_data = NULL;
goto install;
}
@ -3685,6 +3832,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@ -3824,11 +3972,27 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_CHWIDTH, bw);
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
/* Send peer assoc command before set peer bandwidth param to
* avoid the mismatch between the peer phymode and the peer
* bandwidth.
*/
ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true);
peer_arg.is_assoc = false;
err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (err) {
ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, err);
} else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_CHWIDTH, bw);
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
} else {
ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
sta->addr, arvif->vdev_id);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
@ -3896,6 +4060,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
peer_arg.is_assoc = false;
err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (err)
ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@ -4095,6 +4260,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST)) {
ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
if (ar->ab->hw_params.vdev_start_delay &&
vif->type == NL80211_IFTYPE_STATION)
goto free;
ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
@ -4116,6 +4285,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
}
spin_unlock_bh(&ar->ab->base_lock);
free:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@ -4130,6 +4300,34 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer)
peer->is_authorized = true;
spin_unlock_bh(&ar->ab->base_lock);
if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer)
peer->is_authorized = false;
spin_unlock_bh(&ar->ab->base_lock);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@ -4561,6 +4759,10 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
vht_cap.vht_supported = 1;
vht_cap.cap = ar->pdev->cap.vht_cap;
if (ar->pdev->cap.nss_ratio_enabled)
vht_cap.vht_mcs.tx_highest |=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
rxmcs_map = 0;
@ -5048,13 +5250,15 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
arvif = ath11k_vif_to_arvif(skb_cb->vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
atomic_inc(&ar->num_pending_mgmt_tx);
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
WARN_ON_ONCE(1);
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
ieee80211_free_txskb(ar->hw, skb);
} else {
atomic_inc(&ar->num_pending_mgmt_tx);
}
} else {
ath11k_warn(ar->ab,
@ -5138,7 +5342,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
arsta = (struct ath11k_sta *)control->sta->drv_priv;
ret = ath11k_dp_tx(ar, arvif, arsta, skb);
if (ret) {
if (unlikely(ret)) {
ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
@ -5484,7 +5688,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
u32 param_id, param_value;
u16 nss;
int i;
int ret;
int ret, fbret;
int bit;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@ -5638,7 +5842,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id,
WMI_STA_PS_MODE_DISABLED);
if (ret) {
ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
arvif->vdev_id, ret);
@ -5686,17 +5891,17 @@ err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
arvif->vdev_id);
if (ret) {
fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
arvif->vdev_id);
if (fbret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
arvif->vdev_id, vif->addr);
goto err;
}
ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
vif->addr);
if (ret)
fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
vif->addr);
if (fbret)
goto err;
ar->num_peers--;
@ -5831,7 +6036,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
@ -5969,9 +6173,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
ath11k_phymodes[chandef->chan->band][chandef->width];
arg.channel.min_power = 0;
arg.channel.max_power = chandef->chan->max_power * 2;
arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
arg.channel.max_power = chandef->chan->max_power;
arg.channel.max_reg_power = chandef->chan->max_reg_power;
arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
@ -6467,6 +6671,19 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_STA) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
if (ret)
ath11k_warn(ar->ab,
"failed to delete peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac removed peer %pM vdev %d after vdev stop\n",
arvif->bssid, arvif->vdev_id);
}
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
@ -7277,21 +7494,20 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->tx_duration = arsta->tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
if (!arsta->txrate.legacy && !arsta->txrate.nss)
return;
if (arsta->txrate.legacy) {
sinfo->txrate.legacy = arsta->txrate.legacy;
} else {
sinfo->txrate.mcs = arsta->txrate.mcs;
sinfo->txrate.nss = arsta->txrate.nss;
sinfo->txrate.bw = arsta->txrate.bw;
sinfo->txrate.he_gi = arsta->txrate.he_gi;
sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
if (arsta->txrate.legacy || arsta->txrate.nss) {
if (arsta->txrate.legacy) {
sinfo->txrate.legacy = arsta->txrate.legacy;
} else {
sinfo->txrate.mcs = arsta->txrate.mcs;
sinfo->txrate.nss = arsta->txrate.nss;
sinfo->txrate.bw = arsta->txrate.bw;
sinfo->txrate.he_gi = arsta->txrate.he_gi;
sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
/* TODO: Use real NF instead of default one. */
sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
@ -7672,7 +7888,8 @@ static int __ath11k_mac_register(struct ath11k *ar)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz)
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
(ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz))
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@ -7703,6 +7920,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_BSS_COLOR);
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);

Просмотреть файл

@ -155,4 +155,5 @@ enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw b
enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
#endif

Просмотреть файл

@ -182,7 +182,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
*/
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
if (ab->hw_params.wakeup_mhi &&
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
@ -206,7 +207,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
}
}
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
if (ab->hw_params.wakeup_mhi &&
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
}
@ -219,7 +221,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
*/
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
if (ab->hw_params.wakeup_mhi &&
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
@ -243,7 +246,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
}
}
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
if (ab->hw_params.wakeup_mhi &&
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
@ -1251,6 +1255,15 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_free_core;
}
ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
ab->id.vendor = pdev->vendor;
ab->id.device = pdev->device;
ab->id.subsystem_vendor = pdev->subsystem_vendor;
ab->id.subsystem_device = pdev->subsystem_device;
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
@ -1273,6 +1286,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {

Просмотреть файл

@ -28,6 +28,7 @@ struct ath11k_peer {
u8 ucast_keyidx;
u16 sec_type;
u16 sec_type_grp;
bool is_authorized;
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);

Просмотреть файл

@ -1586,7 +1586,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
struct qmi_txn txn = {};
struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
@ -1640,6 +1640,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send host capability request: %d\n", ret);
goto out;
}
@ -1705,6 +1706,7 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send indication register request: %d\n",
ret);
goto out;
@ -1734,7 +1736,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
struct qmi_txn txn;
int ret = 0, i;
bool delayed;
@ -1783,6 +1785,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to respond qmi memory request: %d\n",
ret);
goto out;
@ -1911,7 +1914,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req;
struct qmi_wlanfw_cap_resp_msg_v01 resp;
struct qmi_txn txn = {};
struct qmi_txn txn;
int ret = 0;
int r;
@ -1930,6 +1933,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send qmi cap request: %d\n",
ret);
goto out;
@ -2000,7 +2004,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
struct qmi_txn txn = {};
struct qmi_txn txn;
const u8 *temp = data;
void __iomem *bdf_addr = NULL;
int ret;
@ -2245,7 +2249,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
struct qmi_txn txn = {};
struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
@ -2277,6 +2281,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send m3 information request: %d\n",
ret);
goto out;
@ -2303,7 +2308,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
{
struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
struct qmi_txn txn = {};
struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
@ -2325,6 +2330,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n",
mode, ret);
goto out;
@ -2358,7 +2364,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
struct qmi_txn txn = {};
struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
@ -2419,6 +2425,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send wlan config request: %d\n",
ret);
goto out;

Просмотреть файл

@ -456,6 +456,9 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
{
u16 bw;
if (end_freq <= start_freq)
return 0;
bw = end_freq - start_freq;
bw = min_t(u16, bw, max_bw);
@ -463,8 +466,10 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
bw = 80;
else if (bw >= 40 && bw < 80)
bw = 40;
else if (bw < 40)
else if (bw >= 20 && bw < 40)
bw = 20;
else
bw = 0;
return bw;
}
@ -488,73 +493,77 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
struct cur_reg_rule *reg_rule,
u8 *rule_idx, u32 flags, u16 max_bw)
{
u32 start_freq;
u32 end_freq;
u16 bw;
u8 i;
i = *rule_idx;
/* there might be situations when even the input rule must be dropped */
i--;
/* frequencies below weather radar */
bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
if (bw > 0) {
i++;
ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_reg_update_rule(regd->reg_rules + i,
reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
else
end_freq = reg_rule->end_freq;
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
max_bw);
i++;
ath11k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (end_freq == reg_rule->end_freq) {
regd->n_reg_rules--;
*rule_idx = i;
return;
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain,
reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
flags);
}
/* weather radar frequencies */
start_freq = max_t(u32, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW);
end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH);
bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw);
if (bw > 0) {
i++;
ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
end_freq, bw, reg_rule->ant_gain,
reg_rule->reg_power, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, start_freq, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms, flags);
}
/* frequencies above weather radar */
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, max_bw);
if (bw > 0) {
i++;
i++;
ath11k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw, reg_rule->ant_gain,
reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
flags);
}
*rule_idx = i;
}

Просмотреть файл

@ -7,3 +7,4 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg);

Просмотреть файл

@ -14,12 +14,24 @@
#if !defined(CONFIG_ATH11K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {} \
static inline bool trace_##name##_enabled(void) \
{ \
return false; \
}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(...)
#undef DEFINE_EVENT
#define DEFINE_EVENT(evt_class, name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath11k
#define ATH11K_MSG_MAX 400
TRACE_EVENT(ath11k_htt_pktlog,
TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
u32 pktlog_checksum),
@ -108,6 +120,166 @@ TRACE_EVENT(ath11k_htt_rxdesc,
)
);
DECLARE_EVENT_CLASS(ath11k_log_event,
TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
TP_ARGS(ab, vaf),
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
__dynamic_array(char, msg, ATH11K_MSG_MAX)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH11K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH11K_MSG_MAX);
),
TP_printk(
"%s %s %s",
__get_str(driver),
__get_str(device),
__get_str(msg)
)
);
DEFINE_EVENT(ath11k_log_event, ath11k_log_err,
TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
TP_ARGS(ab, vaf)
);
DEFINE_EVENT(ath11k_log_event, ath11k_log_warn,
TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
TP_ARGS(ab, vaf)
);
DEFINE_EVENT(ath11k_log_event, ath11k_log_info,
TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
TP_ARGS(ab, vaf)
);
TRACE_EVENT(ath11k_wmi_cmd,
TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
TP_ARGS(ab, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
__entry->buf_len
)
);
TRACE_EVENT(ath11k_wmi_event,
TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
TP_ARGS(ab, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
__entry->buf_len
)
);
TRACE_EVENT(ath11k_log_dbg,
TP_PROTO(struct ath11k_base *ab, unsigned int level, struct va_format *vaf),
TP_ARGS(ab, level, vaf),
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
__field(unsigned int, level)
__dynamic_array(char, msg, ATH11K_MSG_MAX)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
__entry->level = level;
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH11K_MSG_MAX, vaf->fmt,
*vaf->va) >= ATH11K_MSG_MAX);
),
TP_printk(
"%s %s %s",
__get_str(driver),
__get_str(device),
__get_str(msg)
)
);
TRACE_EVENT(ath11k_log_dbg_dump,
TP_PROTO(struct ath11k_base *ab, const char *msg, const char *prefix,
const void *buf, size_t buf_len),
TP_ARGS(ab, msg, prefix, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
__string(msg, msg)
__string(prefix, prefix)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
__assign_str(msg, msg);
__assign_str(prefix, prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s %s/%s\n",
__get_str(driver),
__get_str(device),
__get_str(prefix),
__get_str(msg)
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */

Просмотреть файл

@ -128,6 +128,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
.min_len = sizeof(struct wmi_obss_color_collision_event) },
};
#define PRIMAP(_hw_mode_) \
@ -249,6 +251,8 @@ static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buf
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = cmd;
trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
@ -267,21 +271,39 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
{
struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
int ret = -EOPNOTSUPP;
struct ath11k_base *ab = wmi_sc->ab;
might_sleep();
wait_event_timeout(wmi_sc->tx_credits_wq, ({
ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
if (ab->hw_params.credit_flow) {
wait_event_timeout(wmi_sc->tx_credits_wq, ({
ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
ret = -ESHUTDOWN;
if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
&wmi_sc->ab->dev_flags))
ret = -ESHUTDOWN;
(ret != -EAGAIN);
}), WMI_SEND_TIMEOUT_HZ);
(ret != -EAGAIN);
}), WMI_SEND_TIMEOUT_HZ);
} else {
wait_event_timeout(wmi->tx_ce_desc_wq, ({
ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
&wmi_sc->ab->dev_flags))
ret = -ESHUTDOWN;
(ret != -ENOBUFS);
}), WMI_SEND_TIMEOUT_HZ);
}
if (ret == -EAGAIN)
ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
if (ret == -ENOBUFS)
ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
cmd_id);
return ret;
}
@ -1244,7 +1266,8 @@ int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
return ret;
}
int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
enum wmi_sta_ps_mode psmode)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_ps_mode_cmd *cmd;
@ -1259,7 +1282,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->sta_ps_mode = enable;
cmd->sta_ps_mode = psmode;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
if (ret) {
@ -1269,7 +1292,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI vdev set psmode %d vdev id %d\n",
enable, vdev_id);
psmode, vdev_id);
return ret;
}
@ -1612,6 +1635,15 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
struct ieee80211_vif *vif;
struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
if (!arvif) {
ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
return -EINVAL;
}
vif = arvif->vif;
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
@ -1624,8 +1656,12 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
if (vif->csa_active) {
cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
}
cmd->buf_len = bcn->len;
ptr = skb->data + sizeof(*cmd);
@ -1689,7 +1725,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar,
tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
if (arg->key_data)
memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
if (ret) {
@ -1762,7 +1799,7 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
cmd->peer_flags |= WMI_PEER_AUTH;
if (param->need_ptk_4_way) {
cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
if (!hw_crypto_disabled)
if (!hw_crypto_disabled && param->is_assoc)
cmd->peer_flags &= ~WMI_PEER_AUTH;
}
if (param->need_gtk_2_way)
@ -2386,6 +2423,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
tchan_info->reg_class_id);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
tchan_info->antennamax);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
tchan_info->maxregpower);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@ -3427,6 +3466,53 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
return ret;
}
static void
ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const void **tb;
const struct wmi_obss_color_collision_event *ev;
struct ath11k_vif *arvif;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
if (!ev) {
ath11k_warn(ab, "failed to fetch obss color collision ev");
goto exit;
}
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
if (!arvif) {
ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
ev->vdev_id);
goto exit;
}
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
break;
case WMI_BSS_COLOR_COLLISION_DISABLE:
case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
break;
default:
ath11k_warn(ab, "received unknown obss color collision detetction event\n");
}
exit:
kfree(tb);
}
static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
@ -5813,7 +5899,30 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_pdev_wmi *wmi = NULL;
u32 i;
u8 wmi_ep_count;
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
dev_kfree_skb(skb);
if (eid >= ATH11K_HTC_EP_COUNT)
return;
wmi_ep_count = ab->htc.wmi_ep_count;
if (wmi_ep_count > ab->hw_params.max_radios)
return;
for (i = 0; i < ab->htc.wmi_ep_count; i++) {
if (ab->wmi_ab.wmi[i].eid == eid) {
wmi = &ab->wmi_ab.wmi[i];
break;
}
}
if (wmi)
wake_up(&wmi->tx_ce_desc_wq);
}
static bool ath11k_reg_is_world_alpha(char *alpha)
@ -6111,6 +6220,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
@ -6118,6 +6228,14 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
if (!arvif) {
ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
vdev_id);
return;
}
ath11k_mac_bcn_tx_event(arvif);
}
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
@ -6398,6 +6516,7 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
struct ieee80211_sta *sta;
struct ath11k_peer *peer;
struct ath11k *ar;
u32 vdev_id;
if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
ath11k_warn(ab, "failed to extract peer sta kickout event");
@ -6413,10 +6532,15 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
if (!peer) {
ath11k_warn(ab, "peer not found %pM\n",
arg.mac_addr);
spin_unlock_bh(&ab->base_lock);
goto exit;
}
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
vdev_id = peer->vdev_id;
spin_unlock_bh(&ab->base_lock);
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
@ -6437,7 +6561,6 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
ieee80211_report_low_ack(sta, 10);
exit:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
@ -7054,6 +7177,8 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
goto out;
@ -7138,6 +7263,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
ath11k_probe_resp_tx_status_event(ab, skb);
break;
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@ -7199,6 +7327,7 @@ static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
return 0;
}

Просмотреть файл

@ -774,6 +774,8 @@ enum wmi_tlv_event_id {
WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
WMI_SAP_OFL_DEL_STA_EVENTID,
WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
WMI_DCC_GET_STATS_RESP_EVENTID,
@ -2522,6 +2524,7 @@ struct ath11k_pdev_wmi {
enum ath11k_htc_ep_id eid;
const struct wmi_peer_flags_map *peer_flags;
u32 rx_decap_mode;
wait_queue_head_t tx_ce_desc_wq;
};
struct vdev_create_params {
@ -3617,6 +3620,7 @@ struct peer_assoc_params {
u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
bool twt_responder;
bool twt_requester;
bool is_assoc;
struct ath11k_ppe_threshold peer_ppet;
};
@ -4914,6 +4918,13 @@ struct wmi_pdev_obss_pd_bitmap_cmd {
#define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000
#define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000
enum wmi_bss_color_collision {
WMI_BSS_COLOR_COLLISION_DISABLE = 0,
WMI_BSS_COLOR_COLLISION_DETECTION,
WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
};
struct wmi_obss_color_collision_cfg_params_cmd {
u32 tlv_header;
u32 vdev_id;
@ -4931,6 +4942,12 @@ struct wmi_bss_color_change_enable_params_cmd {
u32 enable;
} __packed;
struct wmi_obss_color_collision_event {
u32 vdev_id;
u32 evt_type;
u64 obss_color_bitmap;
} __packed;
#define ATH11K_IPV4_TH_SEED_SIZE 5
#define ATH11K_IPV6_TH_SEED_SIZE 11
@ -5351,7 +5368,8 @@ int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
u32 vdev_id, u32 param_id, u32 param_val);
int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
u32 param_value, u8 pdev_id);
int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable);
int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
enum wmi_sta_ps_mode psmode);
int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
int ath11k_wmi_cmd_init(struct ath11k_base *ab);
int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);

Просмотреть файл

@ -120,7 +120,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
AR_ISR_TXEOL);
}
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs = MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);

Просмотреть файл

@ -1005,24 +1005,20 @@ static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
int i, int nmeasurement)
{
struct ath_common *common = ath9k_hw_common(ah);
int im, ix, iy, temp;
int im, ix, iy;
for (im = 0; im < nmeasurement; im++) {
for (ix = 0; ix < MAXIQCAL - 1; ix++) {
for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
if (coeff->mag_coeff[i][im][iy] <
coeff->mag_coeff[i][im][ix]) {
temp = coeff->mag_coeff[i][im][ix];
coeff->mag_coeff[i][im][ix] =
coeff->mag_coeff[i][im][iy];
coeff->mag_coeff[i][im][iy] = temp;
swap(coeff->mag_coeff[i][im][ix],
coeff->mag_coeff[i][im][iy]);
}
if (coeff->phs_coeff[i][im][iy] <
coeff->phs_coeff[i][im][ix]) {
temp = coeff->phs_coeff[i][im][ix];
coeff->phs_coeff[i][im][ix] =
coeff->phs_coeff[i][im][iy];
coeff->phs_coeff[i][im][iy] = temp;
swap(coeff->phs_coeff[i][im][ix],
coeff->phs_coeff[i][im][iy]);
}
}
}

Просмотреть файл

@ -272,6 +272,21 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
return 0;
}
static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
{
int reg_data = 0;
wcn36xx_dxe_read_register(wcn,
WCN36XX_DXE_INT_MASK_REG,
&reg_data);
reg_data &= ~wcn_ch;
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_INT_MASK_REG,
(int)reg_data);
}
static int wcn36xx_dxe_fill_skb(struct device *dev,
struct wcn36xx_dxe_ctl *ctl,
gfp_t gfp)
@ -834,6 +849,53 @@ unlock:
return ret;
}
static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
{
unsigned long flags;
struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
bool ret = true;
spin_lock_irqsave(&ch->lock, flags);
/* Loop through ring buffer looking for nonempty entries. */
ctl_bd_start = ch->head_blk_ctl;
ctl_bd = ctl_bd_start;
ctl_skb_start = ctl_bd_start->next;
ctl_skb = ctl_skb_start;
do {
if (ctl_skb->skb) {
ret = false;
goto unlock;
}
ctl_bd = ctl_skb->next;
ctl_skb = ctl_bd->next;
} while (ctl_skb != ctl_skb_start);
unlock:
spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
{
int i = 0;
/* Called with mac80211 queues stopped. Wait for empty HW queues. */
do {
if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
return 0;
}
/* This ieee80211_ops callback is specifically allowed to
* sleep.
*/
usleep_range(1000, 1100);
} while (++i < 100);
return -EBUSY;
}
int wcn36xx_dxe_init(struct wcn36xx *wcn)
{
int reg_data = 0, ret;
@ -869,7 +931,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
WCN36XX_DXE_WQ_TX_L);
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
/***************************************/
/* Init descriptors for TX HIGH channel */
@ -893,9 +954,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
/* Enable channel interrupts */
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
@ -905,7 +963,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
goto out_err_rxl_ch;
}
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@ -928,9 +985,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
WCN36XX_DXE_REG_CTL_RX_L,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
/* Enable channel interrupts */
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
@ -962,15 +1016,18 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
WCN36XX_DXE_REG_CTL_RX_H,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
/* Enable channel interrupts */
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
ret = wcn36xx_dxe_request_irqs(wcn);
if (ret < 0)
goto out_err_irq;
timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
/* Enable channel interrupts */
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
return 0;
out_err_irq:
@ -987,6 +1044,14 @@ out_err_txh_ch:
void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
{
int reg_data = 0;
/* Disable channel interrupts */
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
free_irq(wcn->tx_irq, wcn);
free_irq(wcn->rx_irq, wcn);
del_timer(&wcn->tx_ack_timer);
@ -996,6 +1061,15 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
wcn->tx_ack_skb = NULL;
}
/* Put the DXE block into reset before freeing memory */
reg_data = WCN36XX_DXE_REG_RESET;
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
}

Просмотреть файл

@ -466,5 +466,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
struct wcn36xx_tx_bd *bd,
struct sk_buff *skb,
bool is_low);
int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn);
void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
#endif /* _DXE_H_ */

Просмотреть файл

@ -402,6 +402,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
{
struct wcn36xx *wcn = hw->priv;
int ret;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
@ -417,17 +418,31 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
* want to receive/transmit regular data packets, then
* simply stop the scan session and exit PS mode.
*/
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
wcn->sw_scan_channel = 0;
if (wcn->sw_scan_channel)
wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
if (wcn->sw_scan_init) {
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
}
} else if (wcn->sw_scan) {
/* A scan is ongoing, do not change the operating
* channel, but start a scan session on the channel.
*/
wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
if (wcn->sw_scan_channel)
wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
if (!wcn->sw_scan_init) {
/* This can fail if we are unable to notify the
* operating channel.
*/
ret = wcn36xx_smd_init_scan(wcn,
HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
if (ret) {
mutex_unlock(&wcn->conf_mutex);
return -EIO;
}
}
wcn36xx_smd_start_scan(wcn, ch);
wcn->sw_scan_channel = ch;
} else {
wcn36xx_change_opchannel(wcn, ch);
}
@ -707,6 +722,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_start");
wcn->sw_scan = true;
wcn->sw_scan_vif = vif;
wcn->sw_scan_channel = 0;
@ -721,8 +738,15 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_complete");
/* ensure that any scan session is finished */
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif);
if (wcn->sw_scan_channel)
wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
if (wcn->sw_scan_init) {
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
}
wcn->sw_scan = false;
wcn->sw_scan_opchannel = 0;
}
@ -1277,6 +1301,16 @@ static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw,
}
#endif
static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct wcn36xx *wcn = hw->priv;
if (wcn36xx_dxe_tx_flush(wcn)) {
wcn36xx_err("Failed to flush hardware tx queues\n");
}
}
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@ -1304,6 +1338,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
.flush = wcn36xx_flush,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};

Просмотреть файл

@ -722,6 +722,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_init = true;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@ -752,6 +753,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_channel = scan_channel;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@ -782,6 +784,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_channel = 0;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@ -823,6 +826,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_init = false;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@ -2732,7 +2736,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
tmp->bss_index);
vif = wcn36xx_priv_to_vif(tmp);
ieee80211_connection_loss(vif);
ieee80211_beacon_loss(vif);
}
return 0;
}
@ -2747,7 +2751,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
rsp->bss_index);
vif = wcn36xx_priv_to_vif(tmp);
ieee80211_connection_loss(vif);
ieee80211_beacon_loss(vif);
return 0;
}
}

Просмотреть файл

@ -272,7 +272,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
const struct wcn36xx_rate *rate;
struct ieee80211_hdr *hdr;
struct wcn36xx_rx_bd *bd;
struct ieee80211_supported_band *sband;
u16 fc, sn;
/*
@ -314,8 +313,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
fc = __le16_to_cpu(hdr->frame_control);
sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
status.freq = WCN36XX_CENTER_FREQ(wcn);
status.band = WCN36XX_BAND(wcn);
status.mactime = 10;
status.signal = -get_rssi0(bd);
status.antenna = 1;
@ -327,18 +324,36 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
if (bd->scan_learn) {
/* If packet originate from hardware scanning, extract the
* band/channel from bd descriptor.
*/
u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
status.band = NL80211_BAND_5GHZ;
status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
status.band);
} else {
status.band = NL80211_BAND_2GHZ;
status.freq = ieee80211_channel_to_frequency(hwch, status.band);
}
} else {
status.band = WCN36XX_BAND(wcn);
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
status.enc_flags = rate->encoding_flags;
status.bw = rate->bw;
status.rate_idx = rate->mcs_or_legacy_index;
sband = wcn->hw->wiphy->bands[status.band];
status.nss = 1;
if (status.band == NL80211_BAND_5GHZ &&
status.encoding == RX_ENC_LEGACY &&
status.rate_idx >= sband->n_bitrates) {
status.rate_idx >= 4) {
/* no dsss rates in 5Ghz rates table */
status.rate_idx -= 4;
}
@ -353,22 +368,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
ieee80211_is_probe_resp(hdr->frame_control))
status.boottime_ns = ktime_get_boottime_ns();
if (bd->scan_learn) {
/* If packet originates from hardware scanning, extract the
* band/channel from bd descriptor.
*/
u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
status.band = NL80211_BAND_5GHZ;
status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
status.band);
} else {
status.band = NL80211_BAND_2GHZ;
status.freq = ieee80211_channel_to_frequency(hwch, status.band);
}
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {

Просмотреть файл

@ -248,6 +248,7 @@ struct wcn36xx {
struct cfg80211_scan_request *scan_req;
bool sw_scan;
u8 sw_scan_opchannel;
bool sw_scan_init;
u8 sw_scan_channel;
struct ieee80211_vif *sw_scan_vif;
struct mutex scan_lock;

Просмотреть файл

@ -3901,6 +3901,24 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
cfg->wowl.active = true;
}
static int brcmf_keepalive_start(struct brcmf_if *ifp, unsigned int interval)
{
struct brcmf_mkeep_alive_pkt_le kalive = {0};
int ret = 0;
/* Configure Null function/data keepalive */
kalive.version = cpu_to_le16(1);
kalive.period_msec = cpu_to_le16(interval * MSEC_PER_SEC);
kalive.len_bytes = cpu_to_le16(0);
kalive.keep_alive_id = cpu_to_le16(0);
ret = brcmf_fil_iovar_data_set(ifp, "mkeep_alive", &kalive, sizeof(kalive));
if (ret)
brcmf_err("keep-alive packet config failed, ret=%d\n", ret);
return ret;
}
static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wowl)
{
@ -3947,6 +3965,9 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
} else {
/* Configure WOWL paramaters */
brcmf_configure_wowl(cfg, ifp, wowl);
/* Prevent disassociation due to inactivity with keep-alive */
brcmf_keepalive_start(ifp, 30);
}
exit:

Просмотреть файл

@ -1052,4 +1052,23 @@ struct brcmf_gscan_config {
struct brcmf_gscan_bucket_config bucket[1];
};
/**
* struct brcmf_mkeep_alive_pkt_le - configuration data for keep-alive frame.
*
* @version: version for mkeep_alive
* @length: length of fixed parameters in the structure.
* @period_msec: keep-alive period in milliseconds.
* @len_bytes: size of the data.
* @keep_alive_id: ID (0 - 3).
* @data: keep-alive frame data.
*/
struct brcmf_mkeep_alive_pkt_le {
__le16 version;
__le16 length;
__le32 period_msec;
__le16 len_bytes;
u8 keep_alive_id;
u8 data[0];
} __packed;
#endif /* FWIL_TYPES_H_ */

Просмотреть файл

@ -92,6 +92,32 @@ config IWLWIFI_BCAST_FILTERING
If unsure, don't enable this option, as some programs might
expect incoming broadcasts for their normal operations.
config IWLMEI
tristate "Intel Management Engine communication over WLAN"
depends on INTEL_MEI
depends on PM
depends on IWLMVM
help
Enables the iwlmei kernel module.
CSME stands for Converged Security and Management Engine. It is a CPU
on the chipset and runs a dedicated firmware. AMT (Active Management
Technology) is one of the applications that run on that CPU. AMT
allows to control the platform remotely.
This kernel module allows to communicate with the Intel Management
Engine over Wifi. This is supported starting from Tiger Lake
platforms and has been tested on 9260 devices only.
If AMT is configured not to use the wireless device, this module is
harmless (and useless).
Enabling this option on a platform that has a different device and
has Wireless enabled on AMT can prevent WiFi from working correctly.
For more information see
<https://software.intel.com/en-us/manageability/>
If unsure, say N.
menu "Debugging Options"
config IWLWIFI_DEBUG

Просмотреть файл

@ -30,5 +30,6 @@ ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
CFLAGS_iwl-devtrace.o := -I$(src)

Просмотреть файл

@ -22,6 +22,7 @@
#include "fw/api/commands.h"
#include "fw/api/cmdhdr.h"
#include "fw/img.h"
#include "mei/iwl-mei.h"
/* NVM offsets (in words) definitions */
enum nvm_offsets {
@ -1114,6 +1115,66 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return false;
}
struct iwl_nvm_data *
iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw)
{
struct iwl_nvm_data *data;
u32 sbands_flags = 0;
u8 rx_chains = fw->valid_rx_ant;
u8 tx_chains = fw->valid_rx_ant;
if (cfg->uhb_supported)
data = kzalloc(struct_size(data, channels,
IWL_NVM_NUM_CHANNELS_UHB),
GFP_KERNEL);
else
data = kzalloc(struct_size(data, channels,
IWL_NVM_NUM_CHANNELS_EXT),
GFP_KERNEL);
if (!data)
return NULL;
BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) !=
IWL_NVM_NUM_CHANNELS_UHB);
data->nvm_version = mei_nvm->nvm_version;
iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg);
if (data->valid_tx_ant)
tx_chains &= data->valid_tx_ant;
if (data->valid_rx_ant)
rx_chains &= data->valid_rx_ant;
data->sku_cap_mimo_disabled = false;
data->sku_cap_band_24ghz_enable = true;
data->sku_cap_band_52ghz_enable = true;
data->sku_cap_11n_enable =
!(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL);
data->sku_cap_11ac_enable = true;
data->sku_cap_11ax_enable =
mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT;
data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT;
data->n_hw_addrs = mei_nvm->n_hw_addrs;
/* If no valid mac address was found - bail out */
if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) {
kfree(data);
return NULL;
}
if (data->lar_enabled &&
fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains,
sbands_flags, true, fw);
return data;
}
IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data);
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw,

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2015, 2018-2020 Intel Corporation
* Copyright (C) 2005-2015, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
@ -8,6 +8,7 @@
#include <net/cfg80211.h>
#include "iwl-eeprom-parse.h"
#include "mei/iwl-mei.h"
/**
* enum iwl_nvm_sbands_flags - modification flags for the channel profiles
@ -81,4 +82,12 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw);
/**
* iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data
*/
struct iwl_nvm_data *
iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw);
#endif /* __iwl_nvm_parse_h__ */

Просмотреть файл

@ -924,6 +924,7 @@ struct iwl_trans_txqs {
/**
* struct iwl_trans - transport common data
*
* @csme_own - true if we couldn't get ownership on the device
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
@ -958,6 +959,7 @@ struct iwl_trans_txqs {
* @iwl_trans_txqs: transport tx queues data.
*/
struct iwl_trans {
bool csme_own;
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;

Просмотреть файл

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IWLMEI) += iwlmei.o
iwlmei-y += main.o
iwlmei-y += net.o
iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o
CFLAGS_trace.o := -I$(src)
ccflags-y += -I $(srctree)/$(src)/../

Просмотреть файл

@ -0,0 +1,20 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Intel Corporation
*/
#ifndef __IWLMEI_INTERNAL_H_
#define __IWLMEI_INTERNAL_H_
#include <uapi/linux/if_ether.h>
#include <linux/netdevice.h>
#include "sap.h"
rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *skb,
const struct iwl_sap_oob_filters *filters,
bool *pass_to_csme);
void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx);
#endif /* __IWLMEI_INTERNAL_H_ */

Просмотреть файл

@ -0,0 +1,505 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Intel Corporation
*/
#ifndef __iwl_mei_h__
#define __iwl_mei_h__
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
/**
* DOC: Introduction
*
* iwlmei is the kernel module that is in charge of the commnunication between
* the iwlwifi driver and the CSME firmware's WLAN driver. This communication
* uses the SAP protocol defined in another file.
* iwlwifi can request or release ownership on the WiFi device through iwlmei.
* iwlmei may notify iwlwifi about certain events: what filter iwlwifi should
* use to passthrough inbound packets to the CSME firmware for example. iwlmei
* may also use iwlwifi to send traffic. This means that we need communication
* from iwlmei to iwlwifi and the other way around.
*/
/**
* DOC: Life cycle
*
* iwlmei exports symbols that are needed by iwlwifi so that iwlmei will always
* be loaded when iwlwifi is alive. iwlwifi registers itself to iwlmei and
* provides the pointers to the functions that iwlmei calls whenever needed.
* iwlwifi calls iwlmei through direct and context-free function calls.
* It is assumed that only one device is accessible to the CSME firmware and
* under the scope of iwlmei so that it is valid not to have any context passed
* to iwlmei's functions.
*
* There are cases in which iwlmei can't access the CSME firmware, because the
* CSME firmware is undergoing a reset, or the mei bus decided to unbind the
* device. In those cases, iwlmei will need not to send requests over the mei
* bus. Instead, it needs to cache the requests from iwlwifi and fulfill them
* when the mei bus is available again.
*
* iwlmei can call iwlwifi as long as iwlwifi is registered to iwlmei. When
* iwlwifi goes down (the PCI device is unbound, or the iwlwifi is unloaded)
* iwlwifi needs to unregister from iwlmei.
*/
/**
* DOC: Memory layout
*
* Since iwlwifi calls iwlmei without any context, iwlmei needs to hold a
* global pointer to its data (which is in the mei client device's private
* data area). If there was no bind on the mei bus, this pointer is NULL and
* iwlmei knows not access to the CSME firmware upon requests from iwlwifi.
*
* iwlmei needs to cache requests from iwlwifi when there is no mei client
* device available (when iwlmei has been removed from the mei bus). In this
* case, all iwlmei's data that resides in the mei client device's private data
* area is unavailable. For this specific case, a separate caching area is
* needed.
*/
/**
* DOC: Concurrency
*
* iwlwifi can call iwlmei at any time. iwlmei will take care to synchronize
* the calls from iwlwifi with its internal flows. iwlwifi must not call iwlmei
* in flows that cannot sleep. Moreover, iwlwifi must not call iwlmei in flows
* that originated from iwlmei.
*/
/**
* DOC: Probe and remove from mei bus driver
*
* When the mei bus driver enumerates its devices, it calls the iwlmei's probe
* function which will send the %SAP_ME_MSG_START message. The probe completes
* before the response (%SAP_ME_MSG_START_OK) is received. This response will
* be handle by the Rx path. Once it arrives, the connection to the CSME
* firmware is considered established and iwlwifi's requests can be treated
* against the CSME firmware.
*
* When the mei bus driver removes the device, iwlmei loses all the data that
* was attached to the mei client device. It clears the global pointer to the
* mei client device since it is not available anymore. This will cause all the
* requests coming from iwlwifi to be cached. This flow takes the global mutex
* to be synchronized with all the requests coming from iwlwifi.
*/
/**
* DOC: Driver load when CSME owns the device
*
* When the driver (iwlwifi) is loaded while CSME owns the device,
* it'll ask CSME to release the device through HW registers. CSME
* will release the device only in the case that there is no connection
* through the mei bus. If there is a mei bus connection, CSME will refuse
* to release the ownership on the device through the HW registers. In that
* case, iwlwifi must first request ownership using the SAP protocol.
*
* Once iwlwifi will request ownership through the SAP protocol, CSME will
* grant the ownership on the device through the HW registers as well.
* In order to request ownership over SAP, we first need to have an interface
* which means that we need to register to mac80211.
* This can't happen before we get the NVM that contains all the capabilities
* of the device. Reading the NVM usually requires the load the firmware, but
* this is impossible as long as we don't have ownership on the device.
* In order to solve this chicken and egg problem, the host driver can get
* the NVM through CSME which owns the device. It can send
* %SAP_MSG_NOTIF_GET_NVM, which will be replied by %SAP_MSG_NOTIF_NVM with
* the NVM's content that the host driver needs.
*/
/**
* DOC: CSME behavior regarding the ownership requests
*
* The ownership requests from the host can come in two different ways:
* - the HW registers in iwl_pcie_set_hw_ready
* - using the Software Arbitration Protocol (SAP)
*
* The host can ask CSME who owns the device with %SAP_MSG_NOTIF_WHO_OWNS_NIC,
* and it can request ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP.
* The host will first use %SAP_MSG_NOTIF_WHO_OWNS_NIC to know what state
* CSME is in. In case CSME thinks it owns the device, the host can ask for
* ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP.
*
* Here the table that describes CSME's behavior upon ownership request:
*
* +-------------------+------------+--------------+-----------------------------+------------+
* | State | HW reg bit | Reply for | Event | HW reg bit |
* | | before | WHO_OWNS_NIC | | after |
* +===================+============+==============+=============================+============+
* | WiAMT not | 0 | Host | HW register or | 0 |
* | operational | Host owner | | HOST_ASKS_FOR_NIC_OWNERSHIP | Host owner |
* +-------------------+------------+--------------+-----------------------------+------------+
* | Operational & | 1 | N/A | HW register | 0 |
* | SAP down & | CSME owner | | | Host owner |
* | no session active | | | | |
* +-------------------+------------+--------------+-----------------------------+------------+
* | Operational & | 1 | CSME | HW register | 1 |
* | SAP up | CSME owner | | | CSME owner |
* +-------------------+------------+--------------+-----------------------------+------------+
* | Operational & | 1 | CSME | HOST_ASKS_FOR_NIC_OWNERSHIP | 0 |
* | SAP up | CSME owner | | | Host owner |
* +-------------------+------------+--------------+-----------------------------+------------+
*/
/**
* DOC: Driver load when CSME is associated and a session is active
*
* A "session" is active when CSME is associated to an access point and the
* link is used to attach a remote driver or to control the system remotely.
* When a session is active, we want to make sure it won't disconnect when we
* take ownership on the device.
* In this case, the driver can get the device, but it'll need to make
* sure that it'll connect to the exact same AP (same BSSID).
* In order to do so, CSME will send the connection parameters through
* SAP and then the host can check if it can connect to this same AP.
* If yes, it can request ownership through SAP and connect quickly without
* scanning all the channels, but just probing the AP on the channel that
* CSME was connected to.
* In order to signal this specific scenario to iwlwifi, iwlmei will
* immediately require iwlwifi to report RF-Kill to the network stack. This
* RF-Kill will prevent the stack from getting the device, and it has a reason
* that tells the userspace that the device is in RF-Kill because it is not
* owned by the host. Once the userspace has configured the right profile,
* it'll be able to let iwlmei know that it can request ownership over SAP
* which will remove the RF-Kill, and finally allow the host to connect.
* The host has then 3 seconds to connect (including DHCP). Had the host
* failed to connect within those 3 seconds, CSME will take the device back.
*/
/**
* DOC: Datapath
*
* CSME can transmit packets, through the netdev that it gets from the wifi
* driver. It'll send packet in the 802.3 format and simply call
* dev_queue_xmit.
*
* For Rx, iwlmei registers a Rx handler that it attaches to the netdev. iwlmei
* may catch packets and send them to CSME, it can then either drop them so
* that they are invisible to user space, or let them go the user space.
*
* Packets transmitted by the user space do not need to be forwarded to CSME
* with the exception of the DHCP request. In order to know what IP is used
* by the user space, CSME needs to get the DHCP request. See
* iwl_mei_tx_copy_to_csme().
*/
/**
* enum iwl_mei_nvm_caps - capabilities for MEI NVM
* @MEI_NVM_CAPS_LARI_SUPPORT: Lari is supported
* @MEI_NVM_CAPS_11AX_SUPPORT: 11AX is supported
*/
enum iwl_mei_nvm_caps {
MEI_NVM_CAPS_LARI_SUPPORT = BIT(0),
MEI_NVM_CAPS_11AX_SUPPORT = BIT(1),
};
/**
* struct iwl_mei_nvm - used to pass the NVM from CSME
* @hw_addr: The MAC address
* @n_hw_addrs: The number of MAC addresses
* @reserved: For alignment.
* @radio_cfg: The radio configuration.
* @caps: See &enum iwl_mei_nvm_caps.
* @nvm_version: The version of the NVM.
* @channels: The data for each channel.
*
* If a field is added, it must correspond to the SAP structure.
*/
struct iwl_mei_nvm {
u8 hw_addr[ETH_ALEN];
u8 n_hw_addrs;
u8 reserved;
u32 radio_cfg;
u32 caps;
u32 nvm_version;
u32 channels[110];
};
/**
* enum iwl_mei_pairwise_cipher - cipher for UCAST key
* @IWL_MEI_CIPHER_NONE: none
* @IWL_MEI_CIPHER_CCMP: ccmp
* @IWL_MEI_CIPHER_GCMP: gcmp
* @IWL_MEI_CIPHER_GCMP_256: gcmp 256
*
* Note that those values are dictated by the CSME firmware API (see sap.h)
*/
enum iwl_mei_pairwise_cipher {
IWL_MEI_CIPHER_NONE = 0,
IWL_MEI_CIPHER_CCMP = 4,
IWL_MEI_CIPHER_GCMP = 8,
IWL_MEI_CIPHER_GCMP_256 = 9,
};
/**
* enum iwl_mei_akm_auth - a combination of AKM and AUTH method
* @IWL_MEI_AKM_AUTH_OPEN: No encryption
* @IWL_MEI_AKM_AUTH_RSNA: 1X profile
* @IWL_MEI_AKM_AUTH_RSNA_PSK: PSK profile
* @IWL_MEI_AKM_AUTH_SAE: SAE profile
*
* Note that those values are dictated by the CSME firmware API (see sap.h)
*/
enum iwl_mei_akm_auth {
IWL_MEI_AKM_AUTH_OPEN = 0,
IWL_MEI_AKM_AUTH_RSNA = 6,
IWL_MEI_AKM_AUTH_RSNA_PSK = 7,
IWL_MEI_AKM_AUTH_SAE = 9,
};
/**
* struct iwl_mei_conn_info - connection info
* @lp_state: link protection state
* @auth_mode: authentication mode
* @ssid_len: the length of SSID
* @ssid: the SSID
* @pairwise_cipher: the cipher used for unicast packets
* @channel: the associated channel
* @band: the associated band
* @bssid: the BSSID
*/
struct iwl_mei_conn_info {
u8 lp_state;
u8 auth_mode;
u8 ssid_len;
u8 channel;
u8 band;
u8 pairwise_cipher;
u8 bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
};
/**
* struct iwl_mei_colloc_info - collocated AP info
* @channel: the channel of the collocated AP
* @bssid: the BSSID of the collocated AP
*/
struct iwl_mei_colloc_info {
u8 channel;
u8 bssid[ETH_ALEN];
};
/*
* struct iwl_mei_ops - driver's operations called by iwlmei
* Operations will not be called more than once concurrently.
* It's not allowed to call iwlmei functions from this context.
*
* @me_conn_status: provide information about CSME's current connection.
* @rfkill: called when the wifi driver should report a change in the rfkill
* status.
* @roaming_forbidden: indicates whether roaming is forbidden.
* @sap_connected: indicate that SAP is now connected. Will be called in case
* the wifi driver registered to iwlmei before SAP connection succeeded or
* when the SAP connection is re-established.
* @nic_stolen: this means that device is no longer available. The device can
* still be used until the callback returns.
*/
struct iwl_mei_ops {
void (*me_conn_status)(void *priv,
const struct iwl_mei_conn_info *conn_info);
void (*rfkill)(void *priv, bool blocked);
void (*roaming_forbidden)(void *priv, bool forbidden);
void (*sap_connected)(void *priv);
void (*nic_stolen)(void *priv);
};
#if IS_ENABLED(CONFIG_IWLMEI)
/**
* iwl_mei_is_connected() - is the connection to the CSME firmware established?
*
* Return: true if we have a SAP connection
*/
bool iwl_mei_is_connected(void);
/**
* iwl_mei_get_nvm() - returns the NVM for the device
*
* It is the caller's responsibility to free the memory returned
* by this function.
* This function blocks (sleeps) until the NVM is ready.
*
* Return: the NVM as received from CSME
*/
struct iwl_mei_nvm *iwl_mei_get_nvm(void);
/**
* iwl_mei_get_ownership() - request ownership
*
* This function blocks until ownership is granted or timeout expired.
*
* Return: 0 in case we could get ownership on the device
*/
int iwl_mei_get_ownership(void);
/**
* iwl_mei_set_rfkill_state() - set SW and HW RF kill states
* @hw_rfkill: HW RF kill state.
* @sw_rfkill: SW RF kill state.
*
* This function must be called when SW RF kill is issued by the user.
*/
void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill);
/**
* iwl_mei_set_nic_info() - set mac address
* @mac_address: mac address to set
* @nvm_address: NVM mac adsress to set
*
* This function must be called upon mac address change.
*/
void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address);
/**
* iwl_mei_set_country_code() - set new country code
* @mcc: the new applied MCC
*
* This function must be called upon country code update
*/
void iwl_mei_set_country_code(u16 mcc);
/**
* iwl_mei_set_power_limit() - set TX power limit
* @power_limit: pointer to an array of 10 elements (le16) represents the power
* restrictions per chain.
*
* This function must be called upon power restrictions change
*/
void iwl_mei_set_power_limit(const __le16 *power_limit);
/**
* iwl_mei_register() - register the wifi driver to iwlmei
* @priv: a pointer to the wifi driver's context. Cannot be NULL.
* @ops: the ops structure.
*
* Return: 0 unless something went wrong. It is illegal to call any
* other API function before this function is called and succeeds.
*
* Only one wifi driver instance (wifi device instance really)
* can register at a time.
*/
int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops);
/**
* iwl_mei_start_unregister() - unregister the wifi driver from iwlmei
*
* From this point on, iwlmei will not used the callbacks provided by
* the driver, but the device is still usable.
*/
void iwl_mei_start_unregister(void);
/**
* iwl_mei_unregister_complete() - complete the unregistration
*
* Must be called after iwl_mei_start_unregister. When this function returns,
* the device is owned by CSME.
*/
void iwl_mei_unregister_complete(void);
/**
* iwl_mei_set_netdev() - sets the netdev for Tx / Rx.
* @netdev: the net_device
*
* The caller should set the netdev to a non-NULL value when the
* interface is added. Packets might be sent to the driver immediately
* afterwards.
* The caller should set the netdev to NULL when the interface is removed.
* This function will call synchronize_net() after setting the netdev to NULL.
* Only when this function returns, can the caller assume that iwlmei will
* no longer inject packets into the netdev's Tx path.
*
* Context: This function can sleep and assumes rtnl_lock is taken.
* The netdev must be set to NULL before iwl_mei_start_unregister() is called.
*/
void iwl_mei_set_netdev(struct net_device *netdev);
/**
* iwl_mei_tx_copy_to_csme() - must be called for each packet sent by
* the wifi driver.
* @skb: the skb sent
* @ivlen: the size of the IV that needs to be skipped after the MAC and
* before the SNAP header.
*
* This function doesn't take any lock, it simply tries to catch DHCP
* packets sent by the wifi driver. If the packet is a DHCP packet, it
* will send it to CSME. This function must not be called for virtual
* interfaces that are not monitored by CSME, meaning it must be called
* only for packets transmitted by the netdevice that was registered
* with iwl_mei_set_netdev().
*/
void iwl_mei_tx_copy_to_csme(struct sk_buff *skb, unsigned int ivlen);
/**
* iwl_mei_host_associated() - must be called when iwlwifi associated.
* @conn_info: pointer to the connection info structure.
* @colloc_info: pointer to the collocated AP info. This is relevant only in
* case of UHB associated AP, otherwise set to NULL.
*/
void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
const struct iwl_mei_colloc_info *colloc_info);
/**
* iwl_mei_host_disassociated() - must be called when iwlwifi disassociated.
*/
void iwl_mei_host_disassociated(void);
/**
* iwl_mei_device_down() - must be called when the device is down
*/
void iwl_mei_device_down(void);
#else
static inline bool iwl_mei_is_connected(void)
{ return false; }
static inline struct iwl_mei_nvm *iwl_mei_get_nvm(void)
{ return NULL; }
static inline int iwl_mei_get_ownership(void)
{ return 0; }
static inline void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
{}
static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
{}
static inline void iwl_mei_set_country_code(u16 mcc)
{}
static inline void iwl_mei_set_power_limit(__le16 *power_limit)
{}
static inline int iwl_mei_register(void *priv,
const struct iwl_mei_ops *ops)
{ return 0; }
static inline void iwl_mei_start_unregister(void)
{}
static inline void iwl_mei_unregister_complete(void)
{}
static inline void iwl_mei_set_netdev(struct net_device *netdev)
{}
static inline void iwl_mei_tx_copy_to_csme(struct sk_buff *skb,
unsigned int ivlen)
{}
static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
const struct iwl_mei_colloc_info *colloc_info)
{}
static inline void iwl_mei_host_disassociated(void)
{}
static inline void iwl_mei_device_down(void)
{}
#endif /* CONFIG_IWLMEI */
#endif /* __iwl_mei_h__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,409 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Intel Corporation
*/
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_arp.h>
#include <uapi/linux/icmp.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/ip.h>
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/mm.h>
#include "internal.h"
#include "sap.h"
#include "iwl-mei.h"
/*
* Returns true if further filtering should be stopped. Only in that case
* pass_to_csme and rx_handler_res are set. Otherwise, next level of filters
* should be checked.
*/
static bool iwl_mei_rx_filter_eth(const struct ethhdr *ethhdr,
const struct iwl_sap_oob_filters *filters,
bool *pass_to_csme,
rx_handler_result_t *rx_handler_res)
{
const struct iwl_sap_eth_filter *filt;
/* This filter is not relevant for UCAST packet */
if (!is_multicast_ether_addr(ethhdr->h_dest) ||
is_broadcast_ether_addr(ethhdr->h_dest))
return false;
for (filt = &filters->eth_filters[0];
filt < &filters->eth_filters[0] + ARRAY_SIZE(filters->eth_filters);
filt++) {
/* Assume there are no enabled filter after a disabled one */
if (!(filt->flags & SAP_ETH_FILTER_ENABLED))
break;
if (compare_ether_header(filt->mac_address, ethhdr->h_dest))
continue;
/* Packet needs to reach the host's stack */
if (filt->flags & SAP_ETH_FILTER_COPY)
*rx_handler_res = RX_HANDLER_PASS;
else
*rx_handler_res = RX_HANDLER_CONSUMED;
/* We have an authoritative answer, stop filtering */
if (filt->flags & SAP_ETH_FILTER_STOP) {
*pass_to_csme = true;
return true;
}
return false;
}
/* MCAST frames that don't match layer 2 filters are not sent to ME */
*pass_to_csme = false;
return true;
}
/*
* Returns true iff the frame should be passed to CSME in which case
* rx_handler_res is set.
*/
static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
const struct iwl_sap_oob_filters *filters,
rx_handler_result_t *rx_handler_res)
{
const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
const struct arphdr *arp;
const __be32 *target_ip;
u32 flags = le32_to_cpu(filt->flags);
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return false;
arp = arp_hdr(skb);
/* Handle only IPv4 over ethernet ARP frames */
if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
arp->ar_pro != htons(ETH_P_IP))
return false;
/*
* After the ARP header, we have:
* src MAC address - 6 bytes
* src IP address - 4 bytes
* target MAC addess - 6 bytes
*/
target_ip = (void *)((u8 *)(arp + 1) +
ETH_ALEN + sizeof(__be32) + ETH_ALEN);
/*
* ARP request is forwarded to ME only if IP address match in the
* ARP request's target ip field.
*/
if (arp->ar_op == htons(ARPOP_REQUEST) &&
(filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS)) &&
(filt->ipv4_addr == 0 || filt->ipv4_addr == *target_ip)) {
if (flags & SAP_IPV4_FILTER_ARP_REQ_COPY)
*rx_handler_res = RX_HANDLER_PASS;
else
*rx_handler_res = RX_HANDLER_CONSUMED;
return true;
}
/* ARP reply is always forwarded to ME regardless of the IP */
if (flags & SAP_IPV4_FILTER_ARP_RESP_PASS &&
arp->ar_op == htons(ARPOP_REPLY)) {
if (flags & SAP_IPV4_FILTER_ARP_RESP_COPY)
*rx_handler_res = RX_HANDLER_PASS;
else
*rx_handler_res = RX_HANDLER_CONSUMED;
return true;
}
return false;
}
static bool
iwl_mei_rx_filter_tcp_udp(struct sk_buff *skb, bool ip_match,
const struct iwl_sap_oob_filters *filters,
rx_handler_result_t *rx_handler_res)
{
const struct iwl_sap_flex_filter *filt;
for (filt = &filters->flex_filters[0];
filt < &filters->flex_filters[0] + ARRAY_SIZE(filters->flex_filters);
filt++) {
if (!(filt->flags & SAP_FLEX_FILTER_ENABLED))
break;
/*
* We are required to have a match on the IP level and we didn't
* have such match.
*/
if ((filt->flags &
(SAP_FLEX_FILTER_IPV4 | SAP_FLEX_FILTER_IPV6)) &&
!ip_match)
continue;
if ((filt->flags & SAP_FLEX_FILTER_UDP) &&
ip_hdr(skb)->protocol != IPPROTO_UDP)
continue;
if ((filt->flags & SAP_FLEX_FILTER_TCP) &&
ip_hdr(skb)->protocol != IPPROTO_TCP)
continue;
/*
* We must have either a TCP header or a UDP header, both
* starts with a source port and then a destination port.
* Both are big endian words.
* Use a UDP header and that will work for TCP as well.
*/
if ((filt->src_port && filt->src_port != udp_hdr(skb)->source) ||
(filt->dst_port && filt->dst_port != udp_hdr(skb)->dest))
continue;
if (filt->flags & SAP_FLEX_FILTER_COPY)
*rx_handler_res = RX_HANDLER_PASS;
else
*rx_handler_res = RX_HANDLER_CONSUMED;
return true;
}
return false;
}
static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
const struct iwl_sap_oob_filters *filters,
rx_handler_result_t *rx_handler_res)
{
const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
const struct iphdr *iphdr;
unsigned int iphdrlen;
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
!pskb_may_pull(skb, skb_network_offset(skb) +
sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
return false;
iphdrlen = ip_hdrlen(skb);
iphdr = ip_hdr(skb);
match = !filters->ipv4_filter.ipv4_addr ||
filters->ipv4_filter.ipv4_addr == iphdr->daddr;
skb_set_transport_header(skb, skb_network_offset(skb) + iphdrlen);
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
case IPPROTO_TCP:
/*
* UDP header is shorter than TCP header and we look at the first bytes
* of the header anyway (see below).
* If we have a truncated TCP packet, let CSME handle this.
*/
if (!pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct udphdr)))
return false;
return iwl_mei_rx_filter_tcp_udp(skb, match,
filters, rx_handler_res);
case IPPROTO_ICMP: {
struct icmphdr *icmp;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(*icmp)))
return false;
icmp = icmp_hdr(skb);
/*
* Don't pass echo requests to ME even if it wants it as we
* want the host to answer.
*/
if ((filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS)) &&
match && (icmp->type != ICMP_ECHO || icmp->code != 0)) {
if (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY))
*rx_handler_res = RX_HANDLER_PASS;
else
*rx_handler_res = RX_HANDLER_CONSUMED;
return true;
}
break;
}
case IPPROTO_ICMPV6:
/* TODO: Should we have the same ICMP request logic here too? */
if ((filters->icmpv6_flags & cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED) &&
match)) {
if (filters->icmpv6_flags &
cpu_to_le32(SAP_ICMPV6_FILTER_COPY))
*rx_handler_res = RX_HANDLER_PASS;
else
*rx_handler_res = RX_HANDLER_CONSUMED;
return true;
}
break;
default:
return false;
}
return false;
}
static bool iwl_mei_rx_filter_ipv6(struct sk_buff *skb,
const struct iwl_sap_oob_filters *filters,
rx_handler_result_t *rx_handler_res)
{
*rx_handler_res = RX_HANDLER_PASS;
/* TODO */
return false;
}
static rx_handler_result_t
iwl_mei_rx_pass_to_csme(struct sk_buff *skb,
const struct iwl_sap_oob_filters *filters,
bool *pass_to_csme)
{
const struct ethhdr *ethhdr = (void *)skb_mac_header(skb);
rx_handler_result_t rx_handler_res = RX_HANDLER_PASS;
bool (*filt_handler)(struct sk_buff *skb,
const struct iwl_sap_oob_filters *filters,
rx_handler_result_t *rx_handler_res);
/*
* skb->data points the IP header / ARP header and the ETH header
* is in the headroom.
*/
skb_reset_network_header(skb);
/*
* MCAST IP packets sent by us are received again here without
* an ETH header. Drop them here.
*/
if (!skb_mac_offset(skb))
return RX_HANDLER_PASS;
if (skb_headroom(skb) < sizeof(*ethhdr))
return RX_HANDLER_PASS;
if (iwl_mei_rx_filter_eth(ethhdr, filters,
pass_to_csme, &rx_handler_res))
return rx_handler_res;
switch (skb->protocol) {
case htons(ETH_P_IP):
filt_handler = iwl_mei_rx_filter_ipv4;
break;
case htons(ETH_P_ARP):
filt_handler = iwl_mei_rx_filter_arp;
break;
case htons(ETH_P_IPV6):
filt_handler = iwl_mei_rx_filter_ipv6;
break;
default:
*pass_to_csme = false;
return rx_handler_res;
}
*pass_to_csme = filt_handler(skb, filters, &rx_handler_res);
return rx_handler_res;
}
rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
const struct iwl_sap_oob_filters *filters,
bool *pass_to_csme)
{
rx_handler_result_t ret;
struct sk_buff *skb;
ret = iwl_mei_rx_pass_to_csme(orig_skb, filters, pass_to_csme);
if (!*pass_to_csme)
return RX_HANDLER_PASS;
if (ret == RX_HANDLER_PASS)
skb = skb_copy(orig_skb, GFP_ATOMIC);
else
skb = orig_skb;
/* CSME wants the MAC header as well, push it back */
skb_push(skb, skb->data - skb_mac_header(skb));
/*
* Add the packet that CSME wants to get to the ring. Don't send the
* Check Shared Area HECI message since this is not possible from the
* Rx context. The caller will schedule a worker to do just that.
*/
iwl_mei_add_data_to_ring(skb, false);
/*
* In case we drop the packet, don't free it, the caller will do that
* for us
*/
if (ret == RX_HANDLER_PASS)
dev_kfree_skb(skb);
return ret;
}
#define DHCP_SERVER_PORT 67
#define DHCP_CLIENT_PORT 68
void iwl_mei_tx_copy_to_csme(struct sk_buff *origskb, unsigned int ivlen)
{
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
struct ethhdr ethhdr;
struct ethhdr *eth;
/* Catch DHCP packets */
if (origskb->protocol != htons(ETH_P_IP) ||
ip_hdr(origskb)->protocol != IPPROTO_UDP ||
udp_hdr(origskb)->source != htons(DHCP_CLIENT_PORT) ||
udp_hdr(origskb)->dest != htons(DHCP_SERVER_PORT))
return;
/*
* We could be a bit less aggressive here and not copy everything, but
* this is very rare anyway, do don't bother much.
*/
skb = skb_copy(origskb, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = origskb->protocol;
hdr = (void *)skb->data;
memcpy(ethhdr.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(ethhdr.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
/*
* Remove the ieee80211 header + IV + SNAP but leave the ethertype
* We still have enough headroom for the sap header.
*/
pskb_pull(skb, ieee80211_hdrlen(hdr->frame_control) + ivlen + 6);
eth = skb_push(skb, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
memcpy(eth, &ethhdr, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
iwl_mei_add_data_to_ring(skb, true);
dev_kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme);

Просмотреть файл

@ -0,0 +1,733 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Intel Corporation
*/
#ifndef __sap_h__
#define __sap_h__
#include "mei/iwl-mei.h"
/**
* DOC: Introduction
*
* SAP is the protocol used by the Intel Wireless driver (iwlwifi)
* and the wireless driver implemented in the CSME firmware.
* It allows to do several things:
* 1) Decide who is the owner of the device: CSME or the host
* 2) When the host is the owner of the device, CSME can still
* send and receive packets through iwlwifi.
*
* The protocol uses the ME interface (mei driver) to send
* messages to the CSME firmware. Those messages have a header
* &struct iwl_sap_me_msg_hdr and this header is followed
* by a payload.
*
* Since this messaging system cannot support high amounts of
* traffic, iwlwifi and the CSME firmware's WLAN driver have an
* addtional communication pipe to exchange information. The body
* of the message is copied to a shared area and the message that
* goes over the ME interface just signals the other side
* that a new message is waiting in the shared area. The ME
* interface is used only for signaling and not to transfer
* the payload.
*
* This shared area of memory is DMA'able mapped to be
* writable by both the CSME firmware and iwlwifi. It is
* mapped to address space of the device that controls the ME
* interface's DMA engine. Any data that iwlwifi needs to
* send to the CSME firmware needs to be copied to there.
*/
/**
* DOC: Initial Handshake
*
* Once we get a link to the CMSE's WLAN driver we start the handshake
* to establish the shared memory that will allow the communication between
* the CSME's WLAN driver and the host.
*
* 1) Host sends %SAP_ME_MSG_START message with the physical address
* of the shared area.
* 2) CSME replies with %SAP_ME_MSG_START_OK which includes the versions
* protocol versions supported by CSME.
*/
/**
* DOC: Host and driver state messages
*
* In order to let CSME konw about the host state and the host driver state,
* the host sends messages that let CSME know about the host's state.
* When the host driver is loaded, the host sends %SAP_MSG_NOTIF_WIFIDR_UP.
* When the host driver is unloaded, the host sends %SAP_MSG_NOTIF_WIFIDR_DOWN.
* When the iwlmei is unloaded, %SAP_MSG_NOTIF_HOST_GOES_DOWN is sent to let
* CSME know not to access the shared memory anymore since it'll be freed.
*
* CSME will reply to SAP_MSG_NOTIF_WIFIDR_UP by
* %SAP_MSG_NOTIF_AMT_STATE to let the host driver whether CSME can use the
* WiFi device or not followed by %SAP_MSG_NOTIF_CSME_CONN_STATUS to inform
* the host driver on the connection state of CSME.
*
* When host is associated to an AP, it must send %SAP_MSG_NOTIF_HOST_LINK_UP
* and when it disconnect from the AP, it must send
* %SAP_MSG_NOTIF_HOST_LINK_DOWN.
*/
/**
* DOC: Ownership
*
* The device can be controlled either by the CSME firmware or
* by the host driver: iwlwifi. There is a negotiaion between
* those two entities to determine who controls (or owns) the
* device. Since the CSME can control the device even when the
* OS is not working or even missing, the CSME can request the
* device if it comes to the conclusion that the OS's host driver
* is not operational. This is why the host driver needs to
* signal CSME that it is up and running. If the driver is
* unloaded, it'll signal CSME that it is going down so that
* CSME can take ownership.
*/
/**
* DOC: Ownership transfer
*
* When the host driver needs the device, it'll send the
* %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP that will be replied by
* %SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ which will let the
* host know whether the ownership is granted or no. If the ownership is
* granted, the hosts sends %SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED.
*
* When CSME requests ownership, it'll send the
* %SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP and give some time to host to stop
* accessing the device. The host needs to send
* %SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED to confirm that it won't access
* the device anymore. If the host failed to send this message fast enough,
* CSME will take ownership on the device anyway.
* When CSME is willing to release the ownership, it'll send
* %SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP.
*/
/**
* DOC: Data messages
*
* Data messages must be sent and receives on a separate queue in the shared
* memory. Almost all the data messages use the %SAP_MSG_DATA_PACKET for both
* packets sent by CSME to the host to be sent to the AP or for packets
* received from the AP and sent by the host to CSME.
* CSME sends filters to the host to let the host what inbound packets it must
* send to CSME. Those filters are received by the host as a
* %SAP_MSG_NOTIF_CSME_FILTERS command.
* The only outbound packets that must be sent to CSME are the DHCP packets.
* Those packets must use the %SAP_MSG_CB_DATA_PACKET message.
*/
/**
* enum iwl_sap_me_msg_id - the ID of the ME message
* @SAP_ME_MSG_START: See &struct iwl_sap_me_msg_start.
* @SAP_ME_MSG_START_OK: See &struct iwl_sap_me_msg_start_ok.
* @SAP_ME_MSG_CHECK_SHARED_AREA: This message has no payload.
*/
enum iwl_sap_me_msg_id {
SAP_ME_MSG_START = 1,
SAP_ME_MSG_START_OK,
SAP_ME_MSG_CHECK_SHARED_AREA,
};
/**
* struct iwl_sap_me_msg_hdr - the header of the ME message
* @type: the type of the message, see &enum iwl_sap_me_msg_id.
* @seq_num: a sequence number used for debug only.
* @len: the length of the mssage.
*/
struct iwl_sap_me_msg_hdr {
__le32 type;
__le32 seq_num;
__le32 len;
} __packed;
/**
* struct iwl_sap_me_msg_start - used for the %SAP_ME_MSG_START message
* @hdr: See &struct iwl_sap_me_msg_hdr.
* @shared_mem: physical address of SAP shared memory area.
* @init_data_seq_num: seq_num of the first data packet HOST -> CSME.
* @init_notif_seq_num: seq_num of the first notification HOST -> CSME.
* @supported_versions: The host sends to the CSME a zero-terminated array
* of versions its supports.
*
* This message is sent by the host to CSME and will responded by the
* %SAP_ME_MSG_START_OK message.
*/
struct iwl_sap_me_msg_start {
struct iwl_sap_me_msg_hdr hdr;
__le64 shared_mem;
__le16 init_data_seq_num;
__le16 init_notif_seq_num;
u8 supported_versions[64];
} __packed;
/**
* struct iwl_sap_me_msg_start_ok - used for the %SAP_ME_MSG_START_OK
* @hdr: See &struct iwl_sap_me_msg_hdr
* @init_data_seq_num: Not used.
* @init_notif_seq_num: Not used
* @supported_version: The version that will be used.
* @reserved: For alignment.
*
* This message is sent by CSME to the host in response to the
* %SAP_ME_MSG_START message.
*/
struct iwl_sap_me_msg_start_ok {
struct iwl_sap_me_msg_hdr hdr;
__le16 init_data_seq_num;
__le16 init_notif_seq_num;
u8 supported_version;
u8 reserved[3];
} __packed;
/**
* enum iwl_sap_msg - SAP messages
* @SAP_MSG_NOTIF_BOTH_WAYS_MIN: Not used.
* @SAP_MSG_NOTIF_PING: No payload. Solicitate a response message (check-alive).
* @SAP_MSG_NOTIF_PONG: No payload. The response message.
* @SAP_MSG_NOTIF_BOTH_WAYS_MAX: Not used.
*
* @SAP_MSG_NOTIF_FROM_CSME_MIN: Not used.
* @SAP_MSG_NOTIF_CSME_FILTERS: TODO
* @SAP_MSG_NOTIF_AMT_STATE: Payload is a DW. Any non-zero value means
* that CSME is enabled.
* @SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ: Payload is a DW. 0 means
* the host will not get ownership. Any other value means the host is
* the owner.
* @SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP: No payload.
* @SAP_MSG_NOTIF_TRIGGER_IP_REFRESH: No payload.
* @SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP: No payload.
* @SAP_MSG_NOTIF_NIC_OWNER: Payload is a DW. See &enum iwl_sap_nic_owner.
* @SAP_MSG_NOTIF_CSME_CONN_STATUS: See &struct iwl_sap_notif_conn_status.
* @SAP_MSG_NOTIF_NVM: See &struct iwl_sap_nvm.
* @SAP_MSG_NOTIF_FROM_CSME_MAX: Not used.
*
* @SAP_MSG_NOTIF_FROM_HOST_MIN: Not used.
* @SAP_MSG_NOTIF_BAND_SELECTION: TODO
* @SAP_MSG_NOTIF_RADIO_STATE: Payload is a DW.
* See &enum iwl_sap_radio_state_bitmap.
* @SAP_MSG_NOTIF_NIC_INFO: See &struct iwl_sap_notif_host_nic_info.
* @SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP: No payload.
* @SAP_MSG_NOTIF_HOST_SUSPENDS: Payload is a DW. Bitmap described in
* &enum iwl_sap_notif_host_suspends_bitmap.
* @SAP_MSG_NOTIF_HOST_RESUMES: Payload is a DW. 0 or 1. 1 says that
* the CSME should re-initialize the init control block.
* @SAP_MSG_NOTIF_HOST_GOES_DOWN: No payload.
* @SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED: No payload.
* @SAP_MSG_NOTIF_COUNTRY_CODE: See &struct iwl_sap_notif_country_code.
* @SAP_MSG_NOTIF_HOST_LINK_UP: See &struct iwl_sap_notif_host_link_up.
* @SAP_MSG_NOTIF_HOST_LINK_DOWN: See &struct iwl_sap_notif_host_link_down.
* @SAP_MSG_NOTIF_WHO_OWNS_NIC: No payload.
* @SAP_MSG_NOTIF_WIFIDR_DOWN: No payload.
* @SAP_MSG_NOTIF_WIFIDR_UP: No payload.
* @SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED: No payload.
* @SAP_MSG_NOTIF_SAR_LIMITS: See &struct iwl_sap_notif_sar_limits.
* @SAP_MSG_NOTIF_GET_NVM: No payload. Triggers %SAP_MSG_NOTIF_NVM.
* @SAP_MSG_NOTIF_FROM_HOST_MAX: Not used.
*
* @SAP_MSG_DATA_MIN: Not used.
* @SAP_MSG_DATA_PACKET: Packets that passed the filters defined by
* %SAP_MSG_NOTIF_CSME_FILTERS. The payload is &struct iwl_sap_hdr with
* the payload of the packet immediately afterwards.
* @SAP_MSG_CB_DATA_PACKET: Indicates to CSME that we transmitted a specific
* packet. Used only for DHCP transmitted packets. See
* &struct iwl_sap_cb_data.
* @SAP_MSG_DATA_MAX: Not used.
*/
enum iwl_sap_msg {
SAP_MSG_NOTIF_BOTH_WAYS_MIN = 0,
SAP_MSG_NOTIF_PING = 1,
SAP_MSG_NOTIF_PONG = 2,
SAP_MSG_NOTIF_BOTH_WAYS_MAX,
SAP_MSG_NOTIF_FROM_CSME_MIN = 500,
SAP_MSG_NOTIF_CSME_FILTERS = SAP_MSG_NOTIF_FROM_CSME_MIN,
/* 501 is deprecated */
SAP_MSG_NOTIF_AMT_STATE = 502,
SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ = 503,
SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP = 504,
SAP_MSG_NOTIF_TRIGGER_IP_REFRESH = 505,
SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP = 506,
/* 507 is deprecated */
/* 508 is deprecated */
/* 509 is deprecated */
/* 510 is deprecated */
SAP_MSG_NOTIF_NIC_OWNER = 511,
SAP_MSG_NOTIF_CSME_CONN_STATUS = 512,
SAP_MSG_NOTIF_NVM = 513,
SAP_MSG_NOTIF_FROM_CSME_MAX,
SAP_MSG_NOTIF_FROM_HOST_MIN = 1000,
SAP_MSG_NOTIF_BAND_SELECTION = SAP_MSG_NOTIF_FROM_HOST_MIN,
SAP_MSG_NOTIF_RADIO_STATE = 1001,
SAP_MSG_NOTIF_NIC_INFO = 1002,
SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP = 1003,
SAP_MSG_NOTIF_HOST_SUSPENDS = 1004,
SAP_MSG_NOTIF_HOST_RESUMES = 1005,
SAP_MSG_NOTIF_HOST_GOES_DOWN = 1006,
SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED = 1007,
SAP_MSG_NOTIF_COUNTRY_CODE = 1008,
SAP_MSG_NOTIF_HOST_LINK_UP = 1009,
SAP_MSG_NOTIF_HOST_LINK_DOWN = 1010,
SAP_MSG_NOTIF_WHO_OWNS_NIC = 1011,
SAP_MSG_NOTIF_WIFIDR_DOWN = 1012,
SAP_MSG_NOTIF_WIFIDR_UP = 1013,
/* 1014 is deprecated */
SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED = 1015,
SAP_MSG_NOTIF_SAR_LIMITS = 1016,
SAP_MSG_NOTIF_GET_NVM = 1017,
SAP_MSG_NOTIF_FROM_HOST_MAX,
SAP_MSG_DATA_MIN = 2000,
SAP_MSG_DATA_PACKET = SAP_MSG_DATA_MIN,
SAP_MSG_CB_DATA_PACKET = 2001,
SAP_MSG_DATA_MAX,
};
/**
* struct iwl_sap_hdr - prefixes any SAP message
* @type: See &enum iwl_sap_msg.
* @len: The length of the message (header not included).
* @seq_num: For debug.
* @payload: The payload of the message.
*/
struct iwl_sap_hdr {
__le16 type;
__le16 len;
__le32 seq_num;
u8 payload[0];
};
/**
* struct iwl_sap_msg_dw - suits any DW long SAP message
* @hdr: The SAP header
* @val: The value of the DW.
*/
struct iwl_sap_msg_dw {
struct iwl_sap_hdr hdr;
__le32 val;
};
/**
* enum iwl_sap_nic_owner - used by %SAP_MSG_NOTIF_NIC_OWNER
* @SAP_NIC_OWNER_UNKNOWN: Not used.
* @SAP_NIC_OWNER_HOST: The host owns the NIC.
* @SAP_NIC_OWNER_ME: CSME owns the NIC.
*/
enum iwl_sap_nic_owner {
SAP_NIC_OWNER_UNKNOWN,
SAP_NIC_OWNER_HOST,
SAP_NIC_OWNER_ME,
};
enum iwl_sap_wifi_auth_type {
SAP_WIFI_AUTH_TYPE_OPEN = IWL_MEI_AKM_AUTH_OPEN,
SAP_WIFI_AUTH_TYPE_RSNA = IWL_MEI_AKM_AUTH_RSNA,
SAP_WIFI_AUTH_TYPE_RSNA_PSK = IWL_MEI_AKM_AUTH_RSNA_PSK,
SAP_WIFI_AUTH_TYPE_SAE = IWL_MEI_AKM_AUTH_SAE,
SAP_WIFI_AUTH_TYPE_MAX,
};
/**
* enum iwl_sap_wifi_cipher_alg
* @SAP_WIFI_CIPHER_ALG_NONE: TBD
* @SAP_WIFI_CIPHER_ALG_CCMP: TBD
* @SAP_WIFI_CIPHER_ALG_GCMP: TBD
* @SAP_WIFI_CIPHER_ALG_GCMP_256: TBD
*/
enum iwl_sap_wifi_cipher_alg {
SAP_WIFI_CIPHER_ALG_NONE = IWL_MEI_CIPHER_NONE,
SAP_WIFI_CIPHER_ALG_CCMP = IWL_MEI_CIPHER_CCMP,
SAP_WIFI_CIPHER_ALG_GCMP = IWL_MEI_CIPHER_GCMP,
SAP_WIFI_CIPHER_ALG_GCMP_256 = IWL_MEI_CIPHER_GCMP_256,
};
/**
* struct iwl_sap_notif_connection_info - nested in other structures
* @ssid_len: The length of the SSID.
* @ssid: The SSID.
* @auth_mode: The authentication mode. See &enum iwl_sap_wifi_auth_type.
* @pairwise_cipher: The cipher used for unicast packets.
* See &enum iwl_sap_wifi_cipher_alg.
* @channel: The channel on which we are associated.
* @band: The band on which we are associated.
* @reserved: For alignment.
* @bssid: The BSSID.
* @reserved1: For alignment.
*/
struct iwl_sap_notif_connection_info {
__le32 ssid_len;
u8 ssid[32];
__le32 auth_mode;
__le32 pairwise_cipher;
u8 channel;
u8 band;
__le16 reserved;
u8 bssid[6];
__le16 reserved1;
} __packed;
/**
* enum iwl_sap_scan_request - for the scan_request field
* @SCAN_REQUEST_FILTERING: Filtering is requested.
* @SCAN_REQUEST_FAST: Fast scan is requested.
*/
enum iwl_sap_scan_request {
SCAN_REQUEST_FILTERING = 1 << 0,
SCAN_REQUEST_FAST = 1 << 1,
};
/**
* struct iwl_sap_notif_conn_status - payload of %SAP_MSG_NOTIF_CSME_CONN_STATUS
* @hdr: The SAP header
* @link_prot_state: Non-zero if link protection is active.
* @scan_request: See &enum iwl_sap_scan_request.
* @conn_info: Information about the connection.
*/
struct iwl_sap_notif_conn_status {
struct iwl_sap_hdr hdr;
__le32 link_prot_state;
__le32 scan_request;
struct iwl_sap_notif_connection_info conn_info;
} __packed;
/**
* enum iwl_sap_radio_state_bitmap - used for %SAP_MSG_NOTIF_RADIO_STATE
* @SAP_SW_RFKILL_DEASSERTED: If set, SW RfKill is de-asserted
* @SAP_HW_RFKILL_DEASSERTED: If set, HW RfKill is de-asserted
*
* If both bits are set, then the radio is on.
*/
enum iwl_sap_radio_state_bitmap {
SAP_SW_RFKILL_DEASSERTED = 1 << 0,
SAP_HW_RFKILL_DEASSERTED = 1 << 1,
};
/**
* enum iwl_sap_notif_host_suspends_bitmap - used for %SAP_MSG_NOTIF_HOST_SUSPENDS
* @SAP_OFFER_NIC: TBD
* @SAP_FILTER_CONFIGURED: TBD
* @SAP_NLO_CONFIGURED: TBD
* @SAP_HOST_OWNS_NIC: TBD
* @SAP_LINK_PROTECTED: TBD
*/
enum iwl_sap_notif_host_suspends_bitmap {
SAP_OFFER_NIC = 1 << 0,
SAP_FILTER_CONFIGURED = 1 << 1,
SAP_NLO_CONFIGURED = 1 << 2,
SAP_HOST_OWNS_NIC = 1 << 3,
SAP_LINK_PROTECTED = 1 << 4,
};
/**
* struct iwl_sap_notif_country_code - payload of %SAP_MSG_NOTIF_COUNTRY_CODE
* @hdr: The SAP header
* @mcc: The country code.
* @source_id: TBD
* @reserved: For alignment.
* @diff_time: TBD
*/
struct iwl_sap_notif_country_code {
struct iwl_sap_hdr hdr;
__le16 mcc;
u8 source_id;
u8 reserved;
__le32 diff_time;
} __packed;
/**
* struct iwl_sap_notif_host_link_up - payload of %SAP_MSG_NOTIF_HOST_LINK_UP
* @hdr: The SAP header
* @conn_info: Information about the connection.
* @colloc_channel: The collocated channel
* @colloc_band: The band of the collocated channel.
* @reserved: For alignment.
* @colloc_bssid: The collocated BSSID.
* @reserved1: For alignment.
*/
struct iwl_sap_notif_host_link_up {
struct iwl_sap_hdr hdr;
struct iwl_sap_notif_connection_info conn_info;
u8 colloc_channel;
u8 colloc_band;
__le16 reserved;
u8 colloc_bssid[6];
__le16 reserved1;
} __packed;
/**
* enum iwl_sap_notif_link_down_type - used in &struct iwl_sap_notif_host_link_down
* @HOST_LINK_DOWN_TYPE_NONE: TBD
* @HOST_LINK_DOWN_TYPE_TEMPORARY: TBD
* @HOST_LINK_DOWN_TYPE_LONG: TBD
*/
enum iwl_sap_notif_link_down_type {
HOST_LINK_DOWN_TYPE_NONE,
HOST_LINK_DOWN_TYPE_TEMPORARY,
HOST_LINK_DOWN_TYPE_LONG,
};
/**
* struct iwl_sap_notif_host_link_down - payload for %SAP_MSG_NOTIF_HOST_LINK_DOWN
* @hdr: The SAP header
* @type: See &enum iwl_sap_notif_link_down_type.
* @reserved: For alignment.
* @reason_valid: If 0, ignore the next field.
* @reason: The reason of the disconnection.
*/
struct iwl_sap_notif_host_link_down {
struct iwl_sap_hdr hdr;
u8 type;
u8 reserved[2];
u8 reason_valid;
__le32 reason;
} __packed;
/**
* struct iwl_sap_notif_host_nic_info - payload for %SAP_MSG_NOTIF_NIC_INFO
* @hdr: The SAP header
* @mac_address: The MAC address as configured to the interface.
* @nvm_address: The MAC address as configured in the NVM.
*/
struct iwl_sap_notif_host_nic_info {
struct iwl_sap_hdr hdr;
u8 mac_address[6];
u8 nvm_address[6];
} __packed;
/**
* struct iwl_sap_notif_dw - payload is a dw
* @hdr: The SAP header.
* @dw: The payload.
*/
struct iwl_sap_notif_dw {
struct iwl_sap_hdr hdr;
__le32 dw;
} __packed;
/**
* struct iwl_sap_notif_sar_limits - payload for %SAP_MSG_NOTIF_SAR_LIMITS
* @hdr: The SAP header
* @sar_chain_info_table: Tx power limits.
*/
struct iwl_sap_notif_sar_limits {
struct iwl_sap_hdr hdr;
__le16 sar_chain_info_table[2][5];
} __packed;
/**
* enum iwl_sap_nvm_caps - capabilities for NVM SAP
* @SAP_NVM_CAPS_LARI_SUPPORT: Lari is supported
* @SAP_NVM_CAPS_11AX_SUPPORT: 11AX is supported
*/
enum iwl_sap_nvm_caps {
SAP_NVM_CAPS_LARI_SUPPORT = BIT(0),
SAP_NVM_CAPS_11AX_SUPPORT = BIT(1),
};
/**
* struct iwl_sap_nvm - payload for %SAP_MSG_NOTIF_NVM
* @hdr: The SAP header.
* @hw_addr: The MAC address
* @n_hw_addrs: The number of MAC addresses
* @reserved: For alignment.
* @radio_cfg: The radio configuration.
* @caps: See &enum iwl_sap_nvm_caps.
* @nvm_version: The version of the NVM.
* @channels: The data for each channel.
*/
struct iwl_sap_nvm {
struct iwl_sap_hdr hdr;
u8 hw_addr[6];
u8 n_hw_addrs;
u8 reserved;
__le32 radio_cfg;
__le32 caps;
__le32 nvm_version;
__le32 channels[110];
} __packed;
/**
* enum iwl_sap_eth_filter_flags - used in &struct iwl_sap_eth_filter
* @SAP_ETH_FILTER_STOP: Do not process further filters.
* @SAP_ETH_FILTER_COPY: Copy the packet to the CSME.
* @SAP_ETH_FILTER_ENABLED: If false, the filter should be ignored.
*/
enum iwl_sap_eth_filter_flags {
SAP_ETH_FILTER_STOP = BIT(0),
SAP_ETH_FILTER_COPY = BIT(1),
SAP_ETH_FILTER_ENABLED = BIT(2),
};
/**
* struct iwl_sap_eth_filter - a L2 filter
* @mac_address: Address to filter.
* @flags: See &enum iwl_sap_eth_filter_flags.
*/
struct iwl_sap_eth_filter {
u8 mac_address[6];
u8 flags;
} __packed;
/**
* enum iwl_sap_flex_filter_flags - used in &struct iwl_sap_flex_filter
* @SAP_FLEX_FILTER_COPY: Pass UDP / TCP packets to CSME.
* @SAP_FLEX_FILTER_ENABLED: If false, the filter should be ignored.
* @SAP_FLEX_FILTER_IPV4: Filter requires match on the IP address as well.
* @SAP_FLEX_FILTER_IPV6: Filter requires match on the IP address as well.
* @SAP_FLEX_FILTER_TCP: Filter should be applied on TCP packets.
* @SAP_FLEX_FILTER_UDP: Filter should be applied on UDP packets.
*/
enum iwl_sap_flex_filter_flags {
SAP_FLEX_FILTER_COPY = BIT(0),
SAP_FLEX_FILTER_ENABLED = BIT(1),
SAP_FLEX_FILTER_IPV6 = BIT(2),
SAP_FLEX_FILTER_IPV4 = BIT(3),
SAP_FLEX_FILTER_TCP = BIT(4),
SAP_FLEX_FILTER_UDP = BIT(5),
};
/**
* struct iwl_sap_flex_filter -
* @src_port: Source port in network format.
* @dst_port: Destination port in network format.
* @flags: Flags and protocol, see &enum iwl_sap_flex_filter_flags.
* @reserved: For alignment.
*/
struct iwl_sap_flex_filter {
__be16 src_port;
__be16 dst_port;
u8 flags;
u8 reserved;
} __packed;
/**
* enum iwl_sap_ipv4_filter_flags - used in &struct iwl_sap_ipv4_filter
* @SAP_IPV4_FILTER_ICMP_PASS: Pass ICMP packets to CSME.
* @SAP_IPV4_FILTER_ICMP_COPY: Pass ICMP packets to host.
* @SAP_IPV4_FILTER_ARP_REQ_PASS: Pass ARP requests to CSME.
* @SAP_IPV4_FILTER_ARP_REQ_COPY: Pass ARP requests to host.
* @SAP_IPV4_FILTER_ARP_RESP_PASS: Pass ARP responses to CSME.
* @SAP_IPV4_FILTER_ARP_RESP_COPY: Pass ARP responses to host.
*/
enum iwl_sap_ipv4_filter_flags {
SAP_IPV4_FILTER_ICMP_PASS = BIT(0),
SAP_IPV4_FILTER_ICMP_COPY = BIT(1),
SAP_IPV4_FILTER_ARP_REQ_PASS = BIT(2),
SAP_IPV4_FILTER_ARP_REQ_COPY = BIT(3),
SAP_IPV4_FILTER_ARP_RESP_PASS = BIT(4),
SAP_IPV4_FILTER_ARP_RESP_COPY = BIT(5),
};
/**
* struct iwl_sap_ipv4_filter-
* @ipv4_addr: The IP address to filer.
* @flags: See &enum iwl_sap_ipv4_filter_flags.
*/
struct iwl_sap_ipv4_filter {
__be32 ipv4_addr;
__le32 flags;
} __packed;
/**
* enum iwl_sap_ipv6_filter_flags -
* @SAP_IPV6_ADDR_FILTER_COPY: Pass packets to the host.
* @SAP_IPV6_ADDR_FILTER_ENABLED: If false, the filter should be ignored.
*/
enum iwl_sap_ipv6_filter_flags {
SAP_IPV6_ADDR_FILTER_COPY = BIT(0),
SAP_IPV6_ADDR_FILTER_ENABLED = BIT(1),
};
/**
* struct iwl_sap_ipv6_filter -
* @addr_lo24: Lowest 24 bits of the IPv6 address.
* @flags: See &enum iwl_sap_ipv6_filter_flags.
*/
struct iwl_sap_ipv6_filter {
u8 addr_lo24[3];
u8 flags;
} __packed;
/**
* enum iwl_sap_icmpv6_filter_flags -
* @SAP_ICMPV6_FILTER_ENABLED: If false, the filter should be ignored.
* @SAP_ICMPV6_FILTER_COPY: Pass packets to the host.
*/
enum iwl_sap_icmpv6_filter_flags {
SAP_ICMPV6_FILTER_ENABLED = BIT(0),
SAP_ICMPV6_FILTER_COPY = BIT(1),
};
/**
* enum iwl_sap_vlan_filter_flags -
* @SAP_VLAN_FILTER_VLAN_ID_MSK: TBD
* @SAP_VLAN_FILTER_ENABLED: If false, the filter should be ignored.
*/
enum iwl_sap_vlan_filter_flags {
SAP_VLAN_FILTER_VLAN_ID_MSK = 0x0FFF,
SAP_VLAN_FILTER_ENABLED = BIT(15),
};
/**
* struct iwl_sap_oob_filters - Out of band filters (for RX only)
* @flex_filters: Array of &struct iwl_sap_flex_filter.
* @icmpv6_flags: See &enum iwl_sap_icmpv6_filter_flags.
* @ipv6_filters: Array of &struct iwl_sap_ipv6_filter.
* @eth_filters: Array of &struct iwl_sap_eth_filter.
* @reserved: For alignment.
* @ipv4_filter: &struct iwl_sap_ipv4_filter.
* @vlan: See &enum iwl_sap_vlan_filter_flags.
*/
struct iwl_sap_oob_filters {
struct iwl_sap_flex_filter flex_filters[14];
__le32 icmpv6_flags;
struct iwl_sap_ipv6_filter ipv6_filters[4];
struct iwl_sap_eth_filter eth_filters[5];
u8 reserved;
struct iwl_sap_ipv4_filter ipv4_filter;
__le16 vlan[4];
} __packed;
/**
* struct iwl_sap_csme_filters - payload of %SAP_MSG_NOTIF_CSME_FILTERS
* @hdr: The SAP header.
* @mode: Not used.
* @mac_address: Not used.
* @reserved: For alignment.
* @cbfilters: Not used.
* @filters: Out of band filters.
*/
struct iwl_sap_csme_filters {
struct iwl_sap_hdr hdr;
__le32 mode;
u8 mac_address[6];
__le16 reserved;
u8 cbfilters[1728];
struct iwl_sap_oob_filters filters;
} __packed;
#define CB_TX_DHCP_FILT_IDX 30
/**
* struct iwl_sap_cb_data - header to be added for transmitted packets.
* @hdr: The SAP header.
* @reserved: Not used.
* @to_me_filt_status: The filter that matches. Bit %CB_TX_DHCP_FILT_IDX should
* be set for DHCP (the only packet that uses this header).
* @reserved2: Not used.
* @data_len: The length of the payload.
* @payload: The payload of the transmitted packet.
*/
struct iwl_sap_cb_data {
struct iwl_sap_hdr hdr;
__le32 reserved[7];
__le32 to_me_filt_status;
__le32 reserved2;
__le32 data_len;
u8 payload[];
};
#endif /* __sap_h__ */

Просмотреть файл

@ -0,0 +1,82 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2021 Intel Corporation
*/
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
#define trace_iwlmei_sap_data(...)
#else
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA) || defined(TRACE_HEADER_MULTI_READ)
#ifndef __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA
enum iwl_sap_data_trace_type {
IWL_SAP_RX_DATA_TO_AIR,
IWL_SAP_TX_DATA_FROM_AIR,
IWL_SAP_RX_DATA_DROPPED_FROM_AIR,
IWL_SAP_TX_DHCP,
};
static inline size_t
iwlmei_sap_data_offset(enum iwl_sap_data_trace_type trace_type)
{
switch (trace_type) {
case IWL_SAP_RX_DATA_TO_AIR:
return 0;
case IWL_SAP_TX_DATA_FROM_AIR:
case IWL_SAP_RX_DATA_DROPPED_FROM_AIR:
return sizeof(struct iwl_sap_hdr);
case IWL_SAP_TX_DHCP:
return sizeof(struct iwl_sap_cb_data);
default:
WARN_ON_ONCE(1);
}
return 0;
}
#endif
#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA
#include <linux/tracepoint.h>
#include <linux/skbuff.h>
#include "sap.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlmei_sap_data
TRACE_EVENT(iwlmei_sap_data,
TP_PROTO(const struct sk_buff *skb,
enum iwl_sap_data_trace_type trace_type),
TP_ARGS(skb, trace_type),
TP_STRUCT__entry(
__dynamic_array(u8, data,
skb->len - iwlmei_sap_data_offset(trace_type))
__field(u32, trace_type)
),
TP_fast_assign(
size_t offset = iwlmei_sap_data_offset(trace_type);
__entry->trace_type = trace_type;
skb_copy_bits(skb, offset, __get_dynamic_array(data),
skb->len - offset);
),
TP_printk("sap_data:trace_type %d len %d",
__entry->trace_type, __get_dynamic_array_len(data))
);
/*
* If you add something here, add a stub in case
* !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
*/
#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace-data
#include <trace/define_trace.h>
#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */

Просмотреть файл

@ -0,0 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Intel Corporation
*/
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "trace-data.h"
#endif /* __CHECKER__ */

Просмотреть файл

@ -0,0 +1,76 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2021 Intel Corporation
*/
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
#define trace_iwlmei_sap_cmd(...)
#define trace_iwlmei_me_msg(...)
#else
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD) || defined(TRACE_HEADER_MULTI_READ)
#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlmei_sap_cmd
#include "mei/sap.h"
TRACE_EVENT(iwlmei_sap_cmd,
TP_PROTO(const struct iwl_sap_hdr *sap_cmd, bool tx),
TP_ARGS(sap_cmd, tx),
TP_STRUCT__entry(
__dynamic_array(u8, cmd,
le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd))
__field(u8, tx)
__field(u16, type)
__field(u16, len)
__field(u32, seq)
),
TP_fast_assign(
memcpy(__get_dynamic_array(cmd), sap_cmd,
le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd));
__entry->tx = tx;
__entry->type = le16_to_cpu(sap_cmd->type);
__entry->len = le16_to_cpu(sap_cmd->len);
__entry->seq = le32_to_cpu(sap_cmd->seq_num);
),
TP_printk("sap_cmd %s: type %d len %d seq %d", __entry->tx ? "Tx" : "Rx",
__entry->type, __entry->len, __entry->seq)
);
TRACE_EVENT(iwlmei_me_msg,
TP_PROTO(const struct iwl_sap_me_msg_hdr *hdr, bool tx),
TP_ARGS(hdr, tx),
TP_STRUCT__entry(
__field(u8, type)
__field(u8, tx)
__field(u32, seq_num)
),
TP_fast_assign(
__entry->type = le32_to_cpu(hdr->type);
__entry->seq_num = le32_to_cpu(hdr->seq_num);
__entry->tx = tx;
),
TP_printk("ME message: %s: type %d seq %d", __entry->tx ? "Tx" : "Rx",
__entry->type, __entry->seq_num)
);
/*
* If you add something here, add a stub in case
* !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
*/
#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */

Просмотреть файл

@ -10,5 +10,6 @@ iwlmvm-y += rfi.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
ccflags-y += -I $(srctree)/$(src)/../

Просмотреть файл

@ -757,6 +757,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
if (ret)
return ret;
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@ -1401,7 +1403,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm);
WARN_ON(!mvm->nvm_data);
ret = iwl_run_init_mvm_ucode(mvm);
if (ret) {

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -191,6 +191,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
if (IS_ERR_OR_NULL(resp)) {
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
PTR_ERR_OR_ZERO(resp));
resp = NULL;
goto out;
}
@ -212,7 +213,6 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le16_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
if (IS_ERR_OR_NULL(regd)) {
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
PTR_ERR_OR_ZERO(regd));
@ -224,7 +224,10 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));
out:
kfree(resp);
return regd;
}
@ -718,6 +721,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_PROTECTED_TWT);
iwl_mvm_vendor_cmds_register(mvm);
hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
@ -1084,6 +1089,27 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_mei_get_ownership(mvm);
if (ret)
return ret;
if (mvm->mei_nvm_data) {
/* We got the NIC, we can now free the MEI NVM data */
kfree(mvm->mei_nvm_data);
mvm->mei_nvm_data = NULL;
/*
* We can't free the nvm_data we allocated based on the SAP
* data because we registered to cfg80211 with the channels
* allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data
* just in order to be able free it later.
* NULLify nvm_data so that we will read the NVM from the
* firmware this time.
*/
mvm->temp_nvm_data = mvm->nvm_data;
mvm->nvm_data = NULL;
}
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@ -1144,6 +1170,8 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
mutex_unlock(&mvm->mutex);
iwl_mvm_mei_set_sw_rfkill_state(mvm);
return ret;
}
@ -1261,6 +1289,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
*/
flush_work(&mvm->roc_done_wk);
iwl_mvm_mei_set_sw_rfkill_state(mvm);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
mutex_unlock(&mvm->mutex);
@ -1531,6 +1561,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->monitor_on = true;
iwl_mvm_vif_dbgfs_register(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
!mvm->csme_vif && mvm->mei_registered) {
iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr);
iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev);
mvm->csme_vif = vif;
}
goto out_unlock;
out_unbind:
@ -1583,6 +1622,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (vif == mvm->csme_vif) {
iwl_mei_set_netdev(NULL);
mvm->csme_vif = NULL;
}
probe_data = rcu_dereference_protected(mvmvif->probe_resp_data,
lockdep_is_held(&mvm->mutex));
RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
@ -2393,6 +2437,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IEEE80211_SMPS_DYNAMIC);
}
} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
iwl_mvm_mei_host_disassociated(mvm);
/*
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
@ -3129,6 +3174,69 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
}
}
static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta)
{
#if IS_ENABLED(CONFIG_IWLMEI)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mei_conn_info conn_info = {
.ssid_len = vif->bss_conf.ssid_len,
.channel = vif->bss_conf.chandef.chan->hw_value,
};
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return;
if (!mvm->mei_registered)
return;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_CCMP:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP;
break;
case WLAN_CIPHER_SUITE_GCMP:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256;
break;
case 0:
/* open profile */
break;
default:
/* cipher not supported, don't send anything to iwlmei */
return;
}
switch (mvmvif->rekey_data.akm) {
case WLAN_AKM_SUITE_SAE & 0xff:
conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE;
break;
case WLAN_AKM_SUITE_PSK & 0xff:
conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK;
break;
case WLAN_AKM_SUITE_8021X & 0xff:
conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA;
break;
case 0:
/* open profile */
conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN;
break;
default:
/* auth method / AKM not supported */
/* TODO: All the FT vesions of these? */
return;
}
memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len);
memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
/* TODO: add support for collocated AP data */
iwl_mei_host_associated(&conn_info, NULL);
#endif
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -3273,6 +3381,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* multicast data frames can be forwarded to the driver
*/
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
@ -3482,6 +3591,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
int ret, i;
u8 key_offset;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (!mvm->trans->trans_cfg->gen2) {
@ -3590,7 +3701,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_seq seq;
int tid, q;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
ptk_pn = kzalloc(struct_size(ptk_pn, q,
mvm->trans->num_rx_queues),
@ -3617,6 +3727,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
else
key_offset = STA_KEY_IDX_INVALID;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
mvmsta->pairwise_cipher = key->cipher;
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
@ -3662,7 +3775,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
ptk_pn = rcu_dereference_protected(
mvmsta->ptk_pn[keyidx],
lockdep_is_held(&mvm->mutex));

Просмотреть файл

@ -30,6 +30,7 @@
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
#include "mei/iwl-mei.h"
#include "iwl-nvm-parse.h"
#include <linux/average.h>
@ -830,6 +831,18 @@ struct iwl_mvm {
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
struct iwl_mei_nvm *mei_nvm_data;
struct iwl_mvm_csme_conn_info __rcu *csme_conn_info;
bool mei_rfkill_blocked;
bool mei_registered;
struct work_struct sap_connected_wk;
/*
* NVM built based on the SAP data but that we can't free even after
* we get ownership because it contains the cfg80211's channel.
*/
struct iwl_nvm_data *temp_nvm_data;
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@ -1021,6 +1034,8 @@ struct iwl_mvm {
/* Indicate if 32Khz external clock is valid */
u32 ext_clock_valid;
/* This vif used by CSME to send / receive traffic */
struct ieee80211_vif *csme_vif;
struct ieee80211_vif __rcu *csa_vif;
struct ieee80211_vif __rcu *csa_tx_blocked_vif;
u8 csa_tx_block_bcn_timeout;
@ -1139,6 +1154,11 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_STARTING,
};
struct iwl_mvm_csme_conn_info {
struct rcu_head rcu_head;
struct iwl_mei_conn_info conn_info;
};
/* Keep track of completed init configuration */
enum iwl_mvm_init_status {
IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
@ -1942,6 +1962,17 @@ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
#if IS_ENABLED(CONFIG_IWLMEI)
/* vendor commands */
void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm);
#else
static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {}
#endif
/* Location Aware Regulatory */
struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
@ -2161,4 +2192,47 @@ enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
return IWL_LOCATION_CIPHER_INVALID;
}
}
struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm);
static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm)
{
if (mvm->mei_registered)
return iwl_mei_get_ownership();
return 0;
}
static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm,
struct sk_buff *skb,
unsigned int ivlen)
{
if (mvm->mei_registered)
iwl_mei_tx_copy_to_csme(skb, ivlen);
}
static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
{
if (mvm->mei_registered)
iwl_mei_host_disassociated();
}
static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
{
if (mvm->mei_registered)
iwl_mei_device_down();
}
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
sw_rfkill);
}
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
#endif /* __IWL_MVM_H__ */

Просмотреть файл

@ -683,14 +683,43 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
int ret;
if (trans->csme_own) {
if (WARN(!mvm->mei_registered,
"csme is owner, but we aren't registered to iwlmei\n"))
goto get_nvm_from_fw;
mvm->mei_nvm_data = iwl_mei_get_nvm();
if (mvm->mei_nvm_data) {
/*
* mvm->mei_nvm_data is set and because of that,
* we'll load the NVM from the FW when we'll get
* ownership.
*/
mvm->nvm_data =
iwl_parse_mei_nvm_data(trans, trans->cfg,
mvm->mei_nvm_data, mvm->fw);
return 0;
}
IWL_ERR(mvm,
"Got a NULL NVM from CSME, trying to get it from the device\n");
}
get_nvm_from_fw:
rtnl_lock();
wiphy_lock(mvm->hw->wiphy);
mutex_lock(&mvm->mutex);
ret = iwl_run_init_mvm_ucode(mvm);
ret = iwl_trans_start_hw(mvm->trans);
if (ret) {
mutex_unlock(&mvm->mutex);
return ret;
}
ret = iwl_run_init_mvm_ucode(mvm);
if (ret && ret != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!ret && iwl_mvm_is_lar_supported(mvm)) {
@ -705,7 +734,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (ret < 0)
if (ret)
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
return ret;
@ -713,6 +742,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
{
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
int ret;
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
@ -720,10 +750,17 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
ret = iwl_mvm_mac_setup_register(mvm);
if (ret)
return ret;
mvm->hw_registered = true;
iwl_mvm_dbgfs_register(mvm);
wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
mvm->mei_rfkill_blocked,
RFKILL_HARD_BLOCK_NOT_OWNER);
iwl_mvm_mei_set_sw_rfkill_state(mvm);
return 0;
}
@ -904,6 +941,109 @@ static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
.frob_mem = iwl_mvm_frob_mem,
};
static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
{
struct iwl_mvm *mvm = priv;
struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
/*
* This is protected by the guarantee that this function will not be
* called twice on two different threads
*/
prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
if (!curr_conn_info)
return;
curr_conn_info->conn_info = *conn_info;
rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
if (prev_conn_info)
kfree_rcu(prev_conn_info, rcu_head);
}
static void iwl_mvm_mei_rfkill(void *priv, bool blocked)
{
struct iwl_mvm *mvm = priv;
mvm->mei_rfkill_blocked = blocked;
if (!mvm->hw_registered)
return;
wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
mvm->mei_rfkill_blocked,
RFKILL_HARD_BLOCK_NOT_OWNER);
}
static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
{
struct iwl_mvm *mvm = priv;
if (!mvm->hw_registered || !mvm->csme_vif)
return;
iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
}
static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, sap_connected_wk);
int ret;
ret = iwl_mvm_start_get_nvm(mvm);
if (ret)
goto out_free;
ret = iwl_mvm_start_post_nvm(mvm);
if (ret)
goto out_free;
return;
out_free:
IWL_ERR(mvm, "Couldn't get started...\n");
iwl_mei_start_unregister();
iwl_mei_unregister_complete();
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_mvm_thermal_exit(mvm);
iwl_fw_runtime_free(&mvm->fwrt);
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(mvm->trans);
kfree(mvm->nvm_data);
kfree(mvm->mei_nvm_data);
ieee80211_free_hw(mvm->hw);
}
static void iwl_mvm_mei_sap_connected(void *priv)
{
struct iwl_mvm *mvm = priv;
if (!mvm->hw_registered)
schedule_work(&mvm->sap_connected_wk);
}
static void iwl_mvm_mei_nic_stolen(void *priv)
{
struct iwl_mvm *mvm = priv;
rtnl_lock();
cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
rtnl_unlock();
}
static const struct iwl_mei_ops mei_ops = {
.me_conn_status = iwl_mvm_me_conn_status,
.rfkill = iwl_mvm_mei_rfkill,
.roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
.sap_connected = iwl_mvm_mei_sap_connected,
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@ -915,9 +1055,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
int err, scan_size;
int scan_size;
u32 min_backoff;
enum iwl_amsdu_size rb_size_default;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
/*
* We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
@ -1017,6 +1158,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
@ -1139,10 +1281,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
err = iwl_trans_start_hw(mvm->trans);
if (err)
goto out_free;
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
@ -1167,8 +1305,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->debugfs_dir = dbgfs_dir;
if (iwl_mvm_start_get_nvm(mvm))
goto out_thermal_exit;
mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
/*
* Get NVM failed, but we are registered to MEI, we'll get
* the NVM later when it'll be possible to get it from CSME.
*/
if (iwl_mvm_start_get_nvm(mvm) && mvm->mei_registered)
return op_mode;
if (iwl_mvm_start_post_nvm(mvm))
goto out_thermal_exit;
@ -1177,6 +1321,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_thermal_exit:
iwl_mvm_thermal_exit(mvm);
if (mvm->mei_registered) {
iwl_mei_start_unregister();
iwl_mei_unregister_complete();
}
out_free:
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@ -1203,6 +1351,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
iwl_fw_dump_conf_clear(&mvm->fwrt);
iwl_mvm_mei_device_down(mvm);
}
static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
@ -1210,11 +1359,33 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int i;
if (mvm->mei_registered) {
rtnl_lock();
iwl_mei_set_netdev(NULL);
rtnl_unlock();
iwl_mei_start_unregister();
}
/*
* After we unregister from mei, the worker can't be scheduled
* anymore.
*/
cancel_work_sync(&mvm->sap_connected_wk);
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
ieee80211_unregister_hw(mvm->hw);
/*
* If we couldn't get ownership on the device and we couldn't
* get the NVM from CSME, we haven't registered to mac80211.
* In that case, we didn't fail op_mode_start, because we are
* waiting for CSME to allow us to get the NVM to register to
* mac80211. If that didn't happen, we haven't registered to
* mac80211, hence the if below.
*/
if (mvm->hw_registered)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
kfree(mvm->mcast_filter_cmd);
@ -1229,6 +1400,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
kfree(mvm->nvm_data);
kfree(mvm->mei_nvm_data);
kfree(rcu_access_pointer(mvm->csme_conn_info));
kfree(mvm->temp_nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
@ -1237,6 +1411,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
if (mvm->mei_registered)
iwl_mei_unregister_complete();
ieee80211_free_hw(mvm->hw);
}
@ -1519,6 +1696,12 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
iwl_mvm_set_rfkill_state(mvm);
}
struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
{
return rcu_dereference_protected(mvm->csme_conn_info,
lockdep_is_held(&mvm->mutex));
}
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@ -373,6 +373,7 @@ struct iwl_mvm_rxq_dup_data {
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
* exchange).
* @pairwise_cipher: used to feed iwlmei upon authorization
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@ -415,6 +416,7 @@ struct iwl_mvm_sta {
u8 sleep_tx_count;
u8 avg_energy;
u8 tx_ant;
u32 pairwise_cipher;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -1128,6 +1128,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
if (ieee80211_is_data(fc))
iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
info->control.hw_key ?
info->control.hw_key->iv_len : 0);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;

Просмотреть файл

@ -0,0 +1,151 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Intel Corporation
*/
#include "mvm.h"
#include <linux/nl80211-vnd-intel.h>
static const struct nla_policy
iwl_mvm_vendor_attr_policy[NUM_IWL_MVM_VENDOR_ATTR] = {
[IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN] = { .type = NLA_U8 },
[IWL_MVM_VENDOR_ATTR_AUTH_MODE] = { .type = NLA_U32 },
[IWL_MVM_VENDOR_ATTR_CHANNEL_NUM] = { .type = NLA_U8 },
[IWL_MVM_VENDOR_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
[IWL_MVM_VENDOR_ATTR_BAND] = { .type = NLA_U8 },
[IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL] = { .type = NLA_U8 },
[IWL_MVM_VENDOR_ATTR_COLLOC_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
};
static int iwl_mvm_vendor_get_csme_conn_info(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_csme_conn_info *csme_conn_info;
struct sk_buff *skb;
int err = 0;
mutex_lock(&mvm->mutex);
csme_conn_info = iwl_mvm_get_csme_conn_info(mvm);
if (!csme_conn_info) {
err = -EINVAL;
goto out_unlock;
}
skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 200);
if (!skb) {
err = -ENOMEM;
goto out_unlock;
}
if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_AUTH_MODE,
csme_conn_info->conn_info.auth_mode) ||
nla_put(skb, IWL_MVM_VENDOR_ATTR_SSID,
csme_conn_info->conn_info.ssid_len,
csme_conn_info->conn_info.ssid) ||
nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_STA_CIPHER,
csme_conn_info->conn_info.pairwise_cipher) ||
nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_CHANNEL_NUM,
csme_conn_info->conn_info.channel) ||
nla_put(skb, IWL_MVM_VENDOR_ATTR_ADDR, ETH_ALEN,
csme_conn_info->conn_info.bssid)) {
kfree_skb(skb);
err = -ENOBUFS;
}
out_unlock:
mutex_unlock(&mvm->mutex);
if (err)
return err;
return cfg80211_vendor_cmd_reply(skb);
}
static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
mutex_lock(&mvm->mutex);
iwl_mvm_mei_get_ownership(mvm);
mutex_unlock(&mvm->mutex);
return 0;
}
static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
{
.info = {
.vendor_id = INTEL_OUI,
.subcmd = IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO,
},
.doit = iwl_mvm_vendor_get_csme_conn_info,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV,
.policy = iwl_mvm_vendor_attr_policy,
.maxattr = MAX_IWL_MVM_VENDOR_ATTR,
},
{
.info = {
.vendor_id = INTEL_OUI,
.subcmd = IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP,
},
.doit = iwl_mvm_vendor_host_get_ownership,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV,
.policy = iwl_mvm_vendor_attr_policy,
.maxattr = MAX_IWL_MVM_VENDOR_ATTR,
},
};
enum iwl_mvm_vendor_events_idx {
/* 0x0 - 0x3 are deprecated */
IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
NUM_IWL_MVM_VENDOR_EVENT_IDX
};
static const struct nl80211_vendor_cmd_info
iwl_mvm_vendor_events[NUM_IWL_MVM_VENDOR_EVENT_IDX] = {
[IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN] = {
.vendor_id = INTEL_OUI,
.subcmd = IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT,
},
};
void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm)
{
mvm->hw->wiphy->vendor_commands = iwl_mvm_vendor_commands;
mvm->hw->wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands);
mvm->hw->wiphy->vendor_events = iwl_mvm_vendor_events;
mvm->hw->wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events);
}
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden)
{
struct sk_buff *msg =
cfg80211_vendor_event_alloc(mvm->hw->wiphy,
ieee80211_vif_to_wdev(vif),
200, IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN,
GFP_ATOMIC);
if (!msg)
return;
if (WARN_ON(!vif))
return;
if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR,
ETH_ALEN, vif->addr) ||
nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN, forbidden))
goto nla_put_failure;
cfg80211_vendor_event(msg, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(msg);
}

Просмотреть файл

@ -1426,15 +1426,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* first trying to load the firmware etc. and potentially only
* detecting any problems when the first interface is brought up.
*/
ret = iwl_finish_nic_init(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
ret = -EIO;
goto out_free_trans;
ret = iwl_pcie_prepare_card_hw(iwl_trans);
if (!ret) {
ret = iwl_finish_nic_init(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
ret = -EIO;
goto out_free_trans;
}
}
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
@ -1569,6 +1572,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
pci_set_drvdata(pdev, iwl_trans);
/* try to get ownership so that we'll know if we don't own it */
iwl_pcie_prepare_card_hw(iwl_trans);
iwl_trans->drv = iwl_drv_start(iwl_trans);
if (IS_ERR(iwl_trans->drv)) {

Просмотреть файл

@ -24,6 +24,7 @@
#include "fw/error-dump.h"
#include "fw/dbg.h"
#include "fw/api/tx.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
@ -594,8 +595,10 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
ret = iwl_pcie_set_hw_ready(trans);
/* If the card is ready, exit 0 */
if (ret >= 0)
if (ret >= 0) {
trans->csme_own = false;
return 0;
}
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@ -608,8 +611,22 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do {
ret = iwl_pcie_set_hw_ready(trans);
if (ret >= 0)
if (ret >= 0) {
trans->csme_own = false;
return 0;
}
if (iwl_mei_is_connected()) {
IWL_DEBUG_INFO(trans,
"Couldn't prepare the card but SAP is connected\n");
trans->csme_own = true;
if (trans->trans_cfg->device_family !=
IWL_DEVICE_FAMILY_9000)
IWL_ERR(trans,
"SAP not supported for this NIC family\n");
return -EBUSY;
}
usleep_range(200, 1000);
t += 200;

Просмотреть файл

@ -1815,8 +1815,9 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
memset(&txdesc, 0, sizeof(txdesc));
/* skb->data starts with txdesc->frame_control */
hdr_len = 24;
skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
hdr_len = sizeof(txdesc.header);
BUILD_BUG_ON(hdr_len != 24);
skb_copy_from_linear_data(skb, &txdesc.header, hdr_len);
if (ieee80211_is_data(txdesc.frame_control) &&
ieee80211_has_a4(txdesc.frame_control) &&
skb->len >= 30) {

Просмотреть файл

@ -115,12 +115,14 @@ struct hfa384x_tx_frame {
__le16 tx_control; /* HFA384X_TX_CTRL_ flags */
/* 802.11 */
__le16 frame_control; /* parts not used */
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN]; /* filled by firmware */
u8 addr3[ETH_ALEN];
__le16 seq_ctrl; /* filled by firmware */
struct_group(header,
__le16 frame_control; /* parts not used */
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN]; /* filled by firmware */
u8 addr3[ETH_ALEN];
__le16 seq_ctrl; /* filled by firmware */
);
u8 addr4[ETH_ALEN];
__le16 data_len;

Просмотреть файл

@ -308,10 +308,12 @@ struct txpd {
__le32 tx_packet_location;
/* Tx packet length */
__le16 tx_packet_length;
/* First 2 byte of destination MAC address */
u8 tx_dest_addr_high[2];
/* Last 4 byte of destination MAC address */
u8 tx_dest_addr_low[4];
struct_group(tx_dest_addr,
/* First 2 byte of destination MAC address */
u8 tx_dest_addr_high[2];
/* Last 4 byte of destination MAC address */
u8 tx_dest_addr_low[4];
);
/* Pkt Priority */
u8 priority;
/* Pkt Trasnit Power control */

Просмотреть файл

@ -113,6 +113,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
p802x_hdr = skb->data;
pkt_len = skb->len;
BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN);
if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data;
@ -124,10 +125,10 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pkt_len -= sizeof(*rtap_hdr);
/* copy destination address from 802.11 header */
memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN);
memcpy(&txpd->tx_dest_addr, p802x_hdr + 4, ETH_ALEN);
} else {
/* copy destination address from 802.3 header */
memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN);
memcpy(&txpd->tx_dest_addr, p802x_hdr, ETH_ALEN);
}
txpd->tx_packet_length = cpu_to_le16(pkt_len);

Просмотреть файл

@ -268,10 +268,12 @@ struct txpd {
__le32 tx_packet_location;
/* Tx packet length */
__le16 tx_packet_length;
/* First 2 byte of destination MAC address */
u8 tx_dest_addr_high[2];
/* Last 4 byte of destination MAC address */
u8 tx_dest_addr_low[4];
struct_group(tx_dest_addr,
/* First 2 byte of destination MAC address */
u8 tx_dest_addr_high[2];
/* Last 4 byte of destination MAC address */
u8 tx_dest_addr_low[4];
);
/* Pkt Priority */
u8 priority;
/* Pkt Trasnit Power control */

Просмотреть файл

@ -232,7 +232,8 @@ static void lbtf_tx_work(struct work_struct *work)
ieee80211_get_tx_rate(priv->hw, info)->hw_value);
/* copy destination address from 802.11 header */
memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN);
memcpy(&txpd->tx_dest_addr, skb->data + sizeof(struct txpd) + 4,
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));

Просмотреть файл

@ -2071,9 +2071,11 @@ struct mwifiex_ie_types_robust_coex {
__le32 mode;
} __packed;
#define MWIFIEX_VERSION_STR_LENGTH 128
struct host_cmd_ds_version_ext {
u8 version_str_sel;
char version_str[128];
char version_str[MWIFIEX_VERSION_STR_LENGTH];
} __packed;
struct host_cmd_ds_mgmt_frame_reg {

Просмотреть файл

@ -226,6 +226,23 @@ exit_rx_proc:
return 0;
}
static void maybe_quirk_fw_disable_ds(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
struct mwifiex_ver_ext ver_ext;
if (test_and_set_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &adapter->work_flags))
return;
memset(&ver_ext, 0, sizeof(ver_ext));
ver_ext.version_str_sel = 1;
if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
HostCmd_ACT_GEN_GET, 0, &ver_ext, false)) {
mwifiex_dbg(priv->adapter, MSG,
"Checking hardware revision failed.\n");
}
}
/*
* The main process.
*
@ -356,6 +373,7 @@ process_start:
if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
mwifiex_init_fw_complete(adapter);
maybe_quirk_fw_disable_ds(adapter);
}
}

Просмотреть файл

@ -524,6 +524,7 @@ enum mwifiex_adapter_work_flags {
MWIFIEX_IS_SUSPENDED,
MWIFIEX_IS_HS_CONFIGURED,
MWIFIEX_IS_HS_ENABLING,
MWIFIEX_IS_REQUESTING_FW_VEREXT,
};
struct mwifiex_band_config {
@ -646,7 +647,7 @@ struct mwifiex_private {
struct wireless_dev wdev;
struct mwifiex_chan_freq_power cfp;
u32 versionstrsel;
char version_str[128];
char version_str[MWIFIEX_VERSION_STR_LENGTH];
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_dev_dir;
#endif
@ -1055,6 +1056,8 @@ struct mwifiex_adapter {
void *devdump_data;
int devdump_len;
struct timer_list devdump_timer;
bool ignore_btcoex_events;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);

Просмотреть файл

@ -3152,6 +3152,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
if (ret)
goto err_alloc_buffers;
if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897)
adapter->ignore_btcoex_events = true;
return 0;
err_alloc_buffers:

Просмотреть файл

@ -708,11 +708,35 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
{
struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext;
if (test_and_clear_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &priv->adapter->work_flags)) {
if (strncmp(ver_ext->version_str, "ChipRev:20, BB:9b(10.00), RF:40(21)",
MWIFIEX_VERSION_STR_LENGTH) == 0) {
struct mwifiex_ds_auto_ds auto_ds = {
.auto_ds = DEEP_SLEEP_OFF,
};
mwifiex_dbg(priv->adapter, MSG,
"Bad HW revision detected, disabling deep sleep\n");
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, false)) {
mwifiex_dbg(priv->adapter, MSG,
"Disabling deep sleep failed.\n");
}
}
return 0;
}
if (version_ext) {
version_ext->version_str_sel = ver_ext->version_str_sel;
memcpy(version_ext->version_str, ver_ext->version_str,
sizeof(char) * 128);
memcpy(priv->version_str, ver_ext->version_str, 128);
MWIFIEX_VERSION_STR_LENGTH);
memcpy(priv->version_str, ver_ext->version_str,
MWIFIEX_VERSION_STR_LENGTH);
/* Ensure the version string from the firmware is 0-terminated */
priv->version_str[MWIFIEX_VERSION_STR_LENGTH - 1] = '\0';
}
return 0;
}

Просмотреть файл

@ -1058,6 +1058,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_BT_COEX_WLAN_PARA_CHANGE:
dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n");
if (adapter->ignore_btcoex_events)
break;
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;

Просмотреть файл

@ -130,7 +130,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
default:
mwifiex_dbg(adapter, ERROR,
"unknown recv_type %#x\n", recv_type);
return -1;
ret = -1;
goto exit_restore_skb;
}
break;
case MWIFIEX_USB_EP_DATA:

Просмотреть файл

@ -4225,9 +4225,11 @@ struct mwl8k_cmd_set_key {
__le32 key_info;
__le32 key_id;
__le16 key_len;
__u8 key_material[MAX_ENCR_KEY_LENGTH];
__u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
__u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
struct {
__u8 key_material[MAX_ENCR_KEY_LENGTH];
__u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
__u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
} tkip;
__le16 tkip_rsc_low;
__le32 tkip_rsc_high;
__le16 tkip_tsc_low;
@ -4375,7 +4377,7 @@ static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
goto done;
}
memcpy(cmd->key_material, key->key, keymlen);
memcpy(&cmd->tkip, key->key, keymlen);
cmd->action = cpu_to_le32(action);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);

Просмотреть файл

@ -626,7 +626,6 @@ void chip_wakeup(struct wilc *wilc)
u32 clk_status_val = 0, trials = 0;
u32 wakeup_reg, wakeup_bit;
u32 clk_status_reg, clk_status_bit;
u32 to_host_from_fw_reg, to_host_from_fw_bit;
u32 from_host_to_fw_reg, from_host_to_fw_bit;
const struct wilc_hif_func *hif_func = wilc->hif_func;
@ -637,8 +636,6 @@ void chip_wakeup(struct wilc *wilc)
clk_status_bit = WILC_SDIO_CLK_STATUS_BIT;
from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG;
from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT;
to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG;
to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT;
} else {
wakeup_reg = WILC_SPI_WAKEUP_REG;
wakeup_bit = WILC_SPI_WAKEUP_BIT;
@ -646,8 +643,6 @@ void chip_wakeup(struct wilc *wilc)
clk_status_bit = WILC_SPI_CLK_STATUS_BIT;
from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG;
from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT;
to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG;
to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT;
}
/* indicate host wakeup */

Просмотреть файл

@ -899,7 +899,7 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
u8 place = chnl;
if (chnl > 14) {
for (place = 14; place < sizeof(channel5g); place++) {
for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
if (channel5g[place] == chnl) {
place++;
break;
@ -1366,7 +1366,7 @@ u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
u8 place;
if (chnl > 14) {
for (place = 14; place < sizeof(channel_all); place++) {
for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
if (channel_all[place] == chnl)
return place - 13;
}
@ -2428,7 +2428,7 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
int i;
for (i = 0; i < sizeof(channel5g); i++)
for (i = 0; i < ARRAY_SIZE(channel5g); i++)
if (channel == channel5g[i])
return true;
return false;
@ -2692,9 +2692,8 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
u8 i;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"settings regs %d default regs %d\n",
(int)(sizeof(rtlphy->iqk_matrix) /
sizeof(struct iqk_matrix_regs)),
"settings regs %zu default regs %d\n",
ARRAY_SIZE(rtlphy->iqk_matrix),
IQK_MATRIX_REG_NUM);
/* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
@ -2861,16 +2860,14 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
case BAND_ON_5G:
/* Get first channel error when change between
* 5G and 2.4G band. */
if (channel <= 14)
if (WARN_ONCE(channel <= 14, "rtl8192de: 5G but channel<=14\n"))
return 0;
WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n");
break;
case BAND_ON_2_4G:
/* Get first channel error when change between
* 5G and 2.4G band. */
if (channel > 14)
if (WARN_ONCE(channel > 14, "rtl8192de: 2G but channel>14\n"))
return 0;
WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n");
break;
default:
WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n",

Просмотреть файл

@ -108,7 +108,6 @@
#define CHANNEL_GROUP_IDX_5GM 6
#define CHANNEL_GROUP_IDX_5GH 9
#define CHANNEL_GROUP_MAX_5G 9
#define CHANNEL_MAX_NUMBER_2G 14
#define AVG_THERMAL_NUM 8
#define AVG_THERMAL_NUM_88E 4
#define AVG_THERMAL_NUM_8723BE 4

Просмотреть файл

@ -904,6 +904,39 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v)
return 0;
}
static ssize_t rtw_debugfs_set_force_lowest_basic_rate(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
bool input;
int err;
err = kstrtobool_from_user(buffer, count, &input);
if (err)
return err;
if (input)
set_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
else
clear_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
return count;
}
static int rtw_debugfs_get_force_lowest_basic_rate(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
seq_printf(m, "force lowest basic rate: %d\n",
test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags));
return 0;
}
static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
@ -1094,6 +1127,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
.cb_read = rtw_debugfs_get_fw_crash,
};
static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = {
.cb_write = rtw_debugfs_set_force_lowest_basic_rate,
.cb_read = rtw_debugfs_get_force_lowest_basic_rate,
};
static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
.cb_write = rtw_debugfs_set_dm_cap,
.cb_read = rtw_debugfs_get_dm_cap,
@ -1174,6 +1212,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(tx_pwr_tbl);
rtw_debugfs_add_rw(edcca_enable);
rtw_debugfs_add_rw(fw_crash);
rtw_debugfs_add_rw(force_lowest_basic_rate);
rtw_debugfs_add_rw(dm_cap);
}

Просмотреть файл

@ -364,6 +364,7 @@ enum rtw_flags {
RTW_FLAG_WOWLAN,
RTW_FLAG_RESTARTING,
RTW_FLAG_RESTART_TRIGGERING,
RTW_FLAG_FORCE_LOWEST_RATE,
NUM_OF_RTW_FLAGS,
};

Просмотреть файл

@ -1738,6 +1738,15 @@ static const struct dmi_system_id rtw88_pci_quirks[] = {
},
.driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
},
{
.callback = disable_pci_caps,
.ident = "HP HP 250 G7 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
},
.driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
},
{}
};

Просмотреть файл

@ -233,17 +233,34 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src)
spin_unlock_irqrestore(&tx_report->q_lock, flags);
}
static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb,
u8 lowest_rate, bool ignore_rate)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = tx_info->control.vif;
bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest)
return lowest_rate;
return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
}
static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb)
struct sk_buff *skb,
bool ignore_rate)
{
if (rtwdev->hal.current_band_type == RTW_BAND_2G) {
pkt_info->rate_id = RTW_RATEID_B_20M;
pkt_info->rate = DESC_RATE1M;
pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M,
ignore_rate);
} else {
pkt_info->rate_id = RTW_RATEID_G;
pkt_info->rate = DESC_RATE6M;
pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M,
ignore_rate);
}
pkt_info->use_rate = true;
pkt_info->dis_rate_fallback = true;
}
@ -280,7 +297,7 @@ static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb);
rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false);
pkt_info->dis_qselseq = true;
pkt_info->en_hwseq = true;
pkt_info->hw_ssn_sel = 0;
@ -404,7 +421,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
if (type != RSVD_BEACON && type != RSVD_DUMMY)
pkt_info->qsel = TX_DESC_QSEL_MGMT;
rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb);
rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true);
bmc = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);

Просмотреть файл

@ -411,12 +411,13 @@ enum rtw89_regulation_type {
RTW89_NA = 4,
RTW89_IC = 5,
RTW89_KCC = 6,
RTW89_NCC = 7,
RTW89_CHILE = 8,
RTW89_ACMA = 9,
RTW89_MEXICO = 10,
RTW89_ACMA = 7,
RTW89_NCC = 8,
RTW89_MEXICO = 9,
RTW89_CHILE = 10,
RTW89_UKRAINE = 11,
RTW89_CN = 12,
RTW89_QATAR = 13,
RTW89_REGD_NUM,
};

Просмотреть файл

@ -723,6 +723,7 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
}
static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
[RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
[RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
[RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
[RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
@ -735,6 +736,10 @@ static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
[RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
[RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
[RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
[RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
[RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
[RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
[RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
};
static void rtw89_debug_dump_mac_mem(struct seq_file *m,
@ -814,7 +819,7 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
return -EINVAL;
}
enable = set == 0 ? false : true;
enable = set != 0;
switch (sel) {
case 0:
debugfs_priv->dbgpkg_en.ss_dbg = enable;

Просмотреть файл

@ -1093,7 +1093,6 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
static int dmac_func_en(struct rtw89_dev *rtwdev)
{
u32 val32;
u32 ret = 0;
val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
@ -1107,7 +1106,7 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
B_AX_WD_RLS_CLK_EN);
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
return ret;
return 0;
}
static int chip_func_en(struct rtw89_dev *rtwdev)
@ -3695,7 +3694,7 @@ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
{
struct rtw89_traffic_stats *stats = &rtwdev->stats;
struct rtw89_vif *rtwvif;
bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true;
bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv;
bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
if (en == old)

Просмотреть файл

@ -227,6 +227,7 @@ enum rtw89_mac_dbg_port_sel {
/* SRAM mem dump */
#define R_AX_INDIR_ACCESS_ENTRY 0x40000
#define AXIDMA_BASE_ADDR 0x18006000
#define STA_SCHED_BASE_ADDR 0x18808000
#define RXPLD_FLTR_CAM_BASE_ADDR 0x18813000
#define SECURITY_CAM_BASE_ADDR 0x18814000
@ -240,10 +241,15 @@ enum rtw89_mac_dbg_port_sel {
#define DMAC_TBL_BASE_ADDR 0x18800000
#define SHCUT_MACHDR_BASE_ADDR 0x18800800
#define BCN_IE_CAM1_BASE_ADDR 0x188A0000
#define TXD_FIFO_0_BASE_ADDR 0x18856200
#define TXD_FIFO_1_BASE_ADDR 0x188A1080
#define TXDATA_FIFO_0_BASE_ADDR 0x18856000
#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000
#define CCTL_INFO_SIZE 32
enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_AXIDMA,
RTW89_MAC_MEM_SHARED_BUF,
RTW89_MAC_MEM_DMAC_TBL,
RTW89_MAC_MEM_SHCUT_MACHDR,
@ -256,6 +262,10 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_BA_CAM,
RTW89_MAC_MEM_BCN_IE_CAM0,
RTW89_MAC_MEM_BCN_IE_CAM1,
RTW89_MAC_MEM_TXD_FIFO_0,
RTW89_MAC_MEM_TXD_FIFO_1,
RTW89_MAC_MEM_TXDATA_FIFO_0,
RTW89_MAC_MEM_TXDATA_FIFO_1,
/* keep last */
RTW89_MAC_MEM_LAST,

Просмотреть файл

@ -654,6 +654,12 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
if (page >= RTW89_H2C_RF_PAGE_NUM) {
rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
rf_path, info->curr_idx);
return;
}
info->rtw89_phy_config_rf_h2c[page][idx] =
cpu_to_le32((reg->addr << 20) | reg->data);
info->curr_idx++;
@ -662,30 +668,29 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info)
{
u16 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
u16 len = (info->curr_idx % RTW89_H2C_RF_PAGE_SIZE) * 4;
u16 remain = info->curr_idx;
u16 len = 0;
u8 i;
int ret = 0;
if (page > RTW89_H2C_RF_PAGE_NUM) {
if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
rtw89_warn(rtwdev,
"rf reg h2c total page num %d larger than %d (RTW89_H2C_RF_PAGE_NUM)\n",
page, RTW89_H2C_RF_PAGE_NUM);
return -EINVAL;
"rf reg h2c total len %d larger than %d\n",
remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
ret = -EINVAL;
goto out;
}
for (i = 0; i < page; i++) {
ret = rtw89_fw_h2c_rf_reg(rtwdev, info,
RTW89_H2C_RF_PAGE_SIZE * 4, i);
for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
if (ret)
return ret;
goto out;
}
ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len, i);
if (ret)
return ret;
out:
info->curr_idx = 0;
return 0;
return ret;
}
static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
@ -1099,9 +1104,15 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
switch (band) {
case RTW89_BAND_2G:
lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf][regd][ch_idx];
if (!lmt)
lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf]
[RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf][regd][ch_idx];
if (!lmt)
lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf]
[RTW89_WW][ch_idx];
break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
@ -1224,9 +1235,15 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
switch (band) {
case RTW89_BAND_2G:
lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx][regd][ch_idx];
if (!lmt_ru)
lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx]
[RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx][regd][ch_idx];
if (!lmt_ru)
lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx]
[RTW89_WW][ch_idx];
break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
@ -1767,7 +1784,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
}
rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
cfo->cfo_avg_pre = new_cfo;
x_cap_update = cfo->crystal_cap == pre_x_cap ? false : true;
x_cap_update = cfo->crystal_cap != pre_x_cap;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,

Просмотреть файл

@ -15,243 +15,244 @@ static const struct rtw89_regulatory rtw89_ww_regd =
COUNTRY_REGD("00", RTW89_WW, RTW89_WW);
static const struct rtw89_regulatory rtw89_regd_map[] = {
COUNTRY_REGD("AR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BO", RTW89_WW, RTW89_FCC),
COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO),
COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CL", RTW89_WW, RTW89_CHILE),
COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE),
COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SV", RTW89_WW, RTW89_FCC),
COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("HN", RTW89_WW, RTW89_FCC),
COUNTRY_REGD("MX", RTW89_FCC, RTW89_MEXICO),
COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO),
COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("UY", RTW89_WW, RTW89_FCC),
COUNTRY_REGD("VE", RTW89_WW, RTW89_FCC),
COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CY", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("DK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("EE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("FI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("FR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("DE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("HU", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IS", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LV", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LU", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MC", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NL", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("PL", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("PT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ES", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GB", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AL", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("HR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("EG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IQ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IL", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("JO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KW", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LB", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LS", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("OM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("QA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("RO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("RU", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("RS", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ME", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ZA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("UA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("YE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ZW", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BD", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("HK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR),
COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE),
COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CN", RTW89_CN, RTW89_CN),
COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC),
COUNTRY_REGD("MY", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("PK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("PH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("TH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("VN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AU", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("NZ", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("PG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CA", RTW89_IC, RTW89_IC),
COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK),
COUNTRY_REGD("JM", RTW89_WW, RTW89_FCC),
COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("TN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("DZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AD", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AQ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BY", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BJ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BW", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BV", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BF", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CV", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CF", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TD", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CX", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("CC", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CD", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("DJ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GQ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ER", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ET", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("FK", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("FO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GF", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("PF", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TF", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GL", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GP", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GW", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GY", RTW89_FCC, RTW89_NCC),
COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC),
COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("HM", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("VA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("JE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("KI", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LA", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("LY", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MW", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MV", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ML", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MQ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MU", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("YT", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MD", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MN", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("MS", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NP", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NC", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("NU", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("NF", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("RE", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("RW", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SL", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SB", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("GS", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SJ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TJ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TK", RTW89_WW, RTW89_ACMA),
COUNTRY_REGD("TO", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("TC", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("UG", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("UZ", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("VU", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("WF", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("EH", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("ZM", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("IR", RTW89_WW, RTW89_ETSI),
COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI),
};
static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2)

Просмотреть файл

@ -1053,10 +1053,10 @@ static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
struct rtw89_channel_params *param,
enum rtw89_phy_idx phy_idx)
{
bool cck_en = param->center_chan > 14 ? false : true;
bool cck_en = param->center_chan <= 14;
u8 pri_ch_idx = param->pri_ch_idx;
if (param->center_chan <= 14)
if (cck_en)
rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan,
param->primary_chan, param->bandwidth);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -23,6 +23,7 @@
#include "rsi_common.h"
#include "rsi_coex.h"
#include "rsi_hal.h"
#include "rsi_usb.h"
u32 rsi_zone_enabled = /* INFO_ZONE |
INIT_ZONE |
@ -168,6 +169,9 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len)
frame_desc = &rx_pkt[index];
actual_length = *(u16 *)&frame_desc[0];
offset = *(u16 *)&frame_desc[2];
if (!rcv_pkt_len && offset >
RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ)
goto fail;
queueno = rsi_get_queueno(frame_desc, offset);
length = rsi_get_length(frame_desc, offset);

Просмотреть файл

@ -269,8 +269,12 @@ static void rsi_rx_done_handler(struct urb *urb)
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data;
int status = -EINVAL;
if (!rx_cb->rx_skb)
return;
if (urb->status) {
dev_kfree_skb(rx_cb->rx_skb);
rx_cb->rx_skb = NULL;
return;
}
@ -294,8 +298,10 @@ out:
if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC))
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__);
if (status)
if (status) {
dev_kfree_skb(rx_cb->rx_skb);
rx_cb->rx_skb = NULL;
}
}
static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num)
@ -324,7 +330,6 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags)
struct sk_buff *skb;
u8 dword_align_bytes = 0;
#define RSI_MAX_RX_USB_PKT_SIZE 3000
skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE);
if (!skb)
return -ENOMEM;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше