wireless-drivers-next patches for v5.14

Second, and most likely the last, set of patches for v5.14. mt76 and
 iwlwifi have most patches in this round, but rtw88 also has some new
 features. Nothing special really standing out.
 
 mt76
 
 * mt7915 MSI support
 
 * disable ASPM on mt7915
 
 * mt7915 tx status reporting
 
 * mt7921 decap offload
 
 rtw88
 
 * beacon filter support
 
 * path diversity support
 
 * firmware crash information via devcoredump
 
 * quirks for disabling pci capabilities
 
 mt7601u
 
 * add USB ID for a XiaoDu WiFi Dongle
 
 ath11k
 
 * enable support for QCN9074 PCI devices
 
 brcmfmac
 
 * support parse country code map from DeviceTree
 
 iwlwifi
 
 * support for new hardware
 
 * support for BIOS control of 11ax enablement in Russia
 
 * support UNII4 band enablement from BIOS
 -----BEGIN PGP SIGNATURE-----
 
 iQFJBAABCgAzFiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmDVyOMVHGt2YWxvQGNv
 ZGVhdXJvcmEub3JnAAoJEG4XJFUm622bXtsH/RB2SKxPY4Rgb7+afjpJIeGDBq8T
 7p5/Xe/PaKMIuusFuJ1gYbTShm5FjdA16F7ZMYSgzpf2b9XBv6hDw7xsPW3qPPtJ
 MJynfnyvpNH9/NWcSrqOMLeFGv1ot5861BLESCXIaWNud+8OMUSw+qf8P6Z80RCD
 tasOayIJVQ/6xLpzbqYoLXfXGeIwlWTiQnQROA8Joq7vQziHSnHTqC7RyTF6SrFn
 AUnGJ10t62hsGt64tt1QeaCV65SCTPjxI2KKpUpvhD/yQtdcEq2f+1vCfnCk+NFz
 VWxCAI6U7s6gf0Lu4/+cJhHt/ZCtLZ2d06g93sfplSWHBImYxG55cO3tHzU=
 =pRFB
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2021-06-25' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.14

Second, and most likely the last, set of patches for v5.14. mt76 and
iwlwifi have most patches in this round, but rtw88 also has some new
features. Nothing special really standing out.

mt76

* mt7915 MSI support

* disable ASPM on mt7915

* mt7915 tx status reporting

* mt7921 decap offload

rtw88

* beacon filter support

* path diversity support

* firmware crash information via devcoredump

* quirks for disabling pci capabilities

mt7601u

* add USB ID for a XiaoDu WiFi Dongle

ath11k

* enable support for QCN9074 PCI devices

brcmfmac

* support parse country code map from DeviceTree

iwlwifi

* support for new hardware

* support for BIOS control of 11ax enablement in Russia

* support UNII4 band enablement from BIOS
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-06-25 11:50:02 -07:00
Родитель ac53c26433 c2a3823dad
Коммит 4e3db44a24
145 изменённых файлов: 5558 добавлений и 3071 удалений

Просмотреть файл

@ -2795,7 +2795,7 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_STARTING:
ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
goto exit;
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:

Просмотреть файл

@ -1314,10 +1314,16 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->he_flag = true;
memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
sizeof(arg->peer_he_cap_macinfo));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(arg->peer_he_cap_phyinfo));
memcpy_and_pad(&arg->peer_he_cap_macinfo,
sizeof(arg->peer_he_cap_macinfo),
he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info),
0);
memcpy_and_pad(&arg->peer_he_cap_phyinfo,
sizeof(arg->peer_he_cap_phyinfo),
he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info),
0);
arg->peer_he_ops = vif->bss_conf.he_oper.params;
/* the top most byte is used to indicate BSS color info */

Просмотреть файл

@ -41,7 +41,7 @@
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
/* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */
{ PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
{0}
};

Просмотреть файл

@ -445,22 +445,12 @@ out:
return ret;
}
static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
enum wcn36xx_hal_host_msg_type msg_type,
size_t msg_size)
{
memset(hdr, 0, msg_size + sizeof(*hdr));
hdr->msg_type = msg_type;
hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
hdr->len = msg_size + sizeof(*hdr);
}
#define __INIT_HAL_MSG(msg_body, type, version) \
do { \
memset(&msg_body, 0, sizeof(msg_body)); \
msg_body.header.msg_type = type; \
msg_body.header.msg_version = version; \
msg_body.header.len = sizeof(msg_body); \
memset(&(msg_body), 0, sizeof(msg_body)); \
(msg_body).header.msg_type = type; \
(msg_body).header.msg_version = version; \
(msg_body).header.len = sizeof(msg_body); \
} while (0) \
#define INIT_HAL_MSG(msg_body, type) \
@ -2729,8 +2719,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
wcn->hal_buf;
init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
sizeof(msg_body->mc_addr_list));
INIT_HAL_MSG(*msg_body, WCN36XX_HAL_8023_MULTICAST_LIST_REQ);
/* An empty list means all mc traffic will be received */
if (fp)

Просмотреть файл

@ -2895,8 +2895,13 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
&cfg->assoclist,
sizeof(cfg->assoclist));
if (err) {
bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
err);
/* GET_ASSOCLIST unsupported by firmware of older chips */
if (err == -EBADE)
bphy_info_once(drvr, "BRCMF_C_GET_ASSOCLIST unsupported\n");
else
bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST failed, err=%d\n",
err);
cfg->assoclist.count = 0;
return -EOPNOTSUPP;
}
@ -6851,7 +6856,12 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
bphy_err(drvr, "rxchain error (%d)\n", err);
/* rxchain unsupported by firmware of older chips */
if (err == -EBADE)
bphy_info_once(drvr, "rxchain unsupported\n");
else
bphy_err(drvr, "rxchain error (%d)\n", err);
nchain = 1;
} else {
for (nchain = 0; rxchain; nchain++)

Просмотреть файл

@ -188,9 +188,14 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
/*Finally, pick up the PROMISC flag */
cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
if (err < 0)
bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
err);
if (err < 0) {
/* PROMISC unsupported by firmware of older chips */
if (err == -EBADE)
bphy_info_once(drvr, "BRCMF_C_SET_PROMISC unsupported\n");
else
bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, err=%d\n",
err);
}
brcmf_configure_arp_nd_offload(ifp, !cmd_value);
}

Просмотреть файл

@ -60,6 +60,10 @@ void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...);
##__VA_ARGS__); \
} while (0)
#define bphy_info_once(drvr, fmt, ...) \
wiphy_info_once((drvr)->wiphy, "%s: " fmt, __func__, \
##__VA_ARGS__)
#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
/* For debug/tracing purposes treat info messages as errors */

Просмотреть файл

@ -12,12 +12,59 @@
#include "common.h"
#include "of.h"
static int brcmf_of_get_country_codes(struct device *dev,
struct brcmf_mp_device *settings)
{
struct device_node *np = dev->of_node;
struct brcmfmac_pd_cc_entry *cce;
struct brcmfmac_pd_cc *cc;
int count;
int i;
count = of_property_count_strings(np, "brcm,ccode-map");
if (count < 0) {
/* The property is optional, so return success if it doesn't
* exist. Otherwise propagate the error code.
*/
return (count == -EINVAL) ? 0 : count;
}
cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
if (!cc)
return -ENOMEM;
cc->table_size = count;
for (i = 0; i < count; i++) {
const char *map;
cce = &cc->table[i];
if (of_property_read_string_index(np, "brcm,ccode-map",
i, &map))
continue;
/* String format e.g. US-Q2-86 */
if (sscanf(map, "%2c-%2c-%d", cce->iso3166, cce->cc,
&cce->rev) != 3)
brcmf_err("failed to read country map %s\n", map);
else
brcmf_dbg(INFO, "%s-%s-%d\n", cce->iso3166, cce->cc,
cce->rev);
}
settings->country_codes = cc;
return 0;
}
void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
struct brcmf_mp_device *settings)
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
int irq;
int err;
u32 irqf;
u32 val;
@ -43,8 +90,14 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
of_node_put(root);
}
if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
!of_device_is_compatible(np, "brcm,bcm4329-fmac"))
if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
return;
err = brcmf_of_get_country_codes(dev, settings);
if (err)
brcmf_err("failed to get OF country code map (err=%d)\n", err);
if (bus_type != BRCMF_BUSTYPE_SDIO)
return;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)

Просмотреть файл

@ -16,9 +16,10 @@ iwlwifi-objs += iwl-trans.o
iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
iwlwifi-objs += $(iwlwifi-m)

Просмотреть файл

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 63
#define IWL_22000_UCODE_API_MAX 64
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@ -47,6 +47,7 @@
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-"
#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
#define IWL_MA_A_FM_A_FW_PRE "iwlwifi-ma-a0-fm-a0-"
#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-"
#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-"
#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-"
@ -93,6 +94,8 @@
IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
@ -389,6 +392,7 @@ const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
const char iwl_ax200_killer_1650w_name[] =
@ -724,6 +728,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
.uhb_supported = true,
@ -797,6 +808,7 @@ MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

Просмотреть файл

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@ -171,8 +171,12 @@ const char iwl9260_killer_1550_name[] =
"Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
const char iwl9560_killer_1550i_name[] =
"Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
const char iwl9560_killer_1550i_160_name[] =
"Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
const char iwl9560_killer_1550s_name[] =
"Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
const char iwl9560_killer_1550s_160_name[] =
"Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
const struct iwl_cfg iwl9260_2ac_cfg = {
.fw_name_pre = IWL9260_FW_PRE,

Просмотреть файл

@ -163,6 +163,27 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
/*
* Evaluate a DSM with no arguments and a u32 return value,
*/
int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
const guid_t *guid, u32 *value)
{
int ret;
u64 val;
ret = iwl_acpi_get_dsm_integer(dev, rev, func,
guid, &val, sizeof(u32));
if (ret < 0)
return ret;
/* cast val (u64) to be u32 */
*value = (u32)val;
return 0;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev)
@ -696,68 +717,37 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func)
{
union acpi_object *obj;
u32 ret;
obj = iwl_acpi_get_dsm_object(dev, 0,
eval_func, NULL,
&iwl_guid);
if (IS_ERR(obj)) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM func '%d': Got Error in obj = %ld\n",
eval_func,
PTR_ERR(obj));
return 0;
}
if (obj->type != ACPI_TYPE_INTEGER) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM func '%d' did not return a valid object, type=%d\n",
eval_func,
obj->type);
ret = 0;
goto out;
}
ret = obj->integer.value;
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func='%d', ret=%d\n",
eval_func,
ret);
out:
ACPI_FREE(obj);
return ret;
}
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
u32 ret;
int ret;
u8 value;
__le32 config_bitmap = 0;
/*
** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
*/
ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2);
ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
DSM_FUNC_ENABLE_INDONESIA_5G2,
&iwl_guid, &value);
if (ret == DSM_VALUE_INDONESIA_ENABLE)
if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
/*
** Evaluate func 'DSM_FUNC_DISABLE_SRD'
*/
ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD);
if (ret == DSM_VALUE_SRD_PASSIVE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
else if (ret == DSM_VALUE_SRD_DISABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
DSM_FUNC_DISABLE_SRD,
&iwl_guid, &value);
if (!ret) {
if (value == DSM_VALUE_SRD_PASSIVE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
else if (value == DSM_VALUE_SRD_DISABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
}
return config_bitmap;
}

Просмотреть файл

@ -78,6 +78,7 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_DISABLE_SRD = 1,
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
DSM_FUNC_11AX_ENABLEMENT = 6,
DSM_FUNC_ENABLE_UNII4_CHAN = 7
};
enum iwl_dsm_values_srd {
@ -116,6 +117,9 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method);
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
const guid_t *guid, u8 *value);
int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
const guid_t *guid, u32 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev);
@ -182,6 +186,12 @@ static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
return -ENOENT;
}
static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
const guid_t *guid, u32 *value)
{
return -ENOENT;
}
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size,

Просмотреть файл

@ -534,11 +534,6 @@ enum iwl_legacy_cmds {
*/
OFFLOADS_QUERY_CMD = 0xd5,
/**
* @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
*/
REMOTE_WAKE_CONFIG_CMD = 0xd6,
/**
* @D0I3_END_CMD: End D0i3/D3 state, no command data
*/

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -159,6 +159,22 @@ struct iwl_proto_offload_cmd_v3_large {
struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
/**
* struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration
* @sta_id: station id
* @common: common/IPv4 configuration
* @num_valid_ipv6_addrs: number of valid IPv6 addresses
* @targ_addrs: target IPv6 addresses
* @ns_config: NS offload configurations
*/
struct iwl_proto_offload_cmd_v4 {
__le32 sta_id;
struct iwl_proto_offload_cmd_common common;
__le32 num_valid_ipv6_addrs;
struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */
/*
* WOWLAN_PATTERNS
*/
@ -302,13 +318,23 @@ struct iwl_wowlan_patterns_cmd {
/**
* @n_patterns: number of patterns
*/
__le32 n_patterns;
u8 n_patterns;
/**
* @n_patterns: sta_id
*/
u8 sta_id;
/**
* @reserved: reserved for alignment
*/
__le16 reserved;
/**
* @patterns: the patterns, array length in @n_patterns
*/
struct iwl_wowlan_pattern_v2 patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */
enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
@ -339,9 +365,10 @@ enum iwl_wowlan_flags {
};
/**
* struct iwl_wowlan_config_cmd - WoWLAN configuration
* struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
* @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
* @non_qos_seq: non-QoS sequence counter to use next
* @non_qos_seq: non-QoS sequence counter to use next.
* Reserved if the struct has version >= 6.
* @qos_seq: QoS sequence counters to use next
* @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
* @is_11n_connection: indicates HT connection
@ -456,6 +483,23 @@ struct iwl_wowlan_kek_kck_material_cmd_v3 {
__le32 bigtk_cipher;
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
struct iwl_wowlan_kek_kck_material_cmd_v4 {
__le32 sta_id;
u8 kck[IWL_KCK_MAX_SIZE];
u8 kek[IWL_KEK_MAX_SIZE];
__le16 kck_len;
__le16 kek_len;
__le64 replay_ctr;
__le32 akm;
__le32 gtk_cipher;
__le32 igtk_cipher;
__le32 bigtk_cipher;
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */
struct iwl_wowlan_get_status_cmd {
__le32 sta_id;
} __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */
#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
enum iwl_wowlan_rekey_status {
@ -604,12 +648,13 @@ struct iwl_wowlan_status_v7 {
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
/**
* struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
* struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10)
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched pattern
* @non_qos_seq_ctr: non-QoS sequence counter to use next
* @non_qos_seq_ctr: non-QoS sequence counter to use next.
* Reserved if the struct has version >= 10.
* @qos_seq_ctr: QoS sequence counters to use next
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
* @num_of_gtk_rekeys: number of GTK rekeys
@ -638,7 +683,7 @@ struct iwl_wowlan_status_v9 {
u8 tid_tear_down;
u8 reserved[3];
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
/**
* struct iwl_wowlan_status - WoWLAN status
@ -683,55 +728,6 @@ static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
}
#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
struct iwl_tcp_packet_info {
__le16 tcp_pseudo_header_checksum;
__le16 tcp_payload_length;
} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
struct iwl_tcp_packet {
struct iwl_tcp_packet_info info;
u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
struct iwl_remote_wake_packet {
struct iwl_tcp_packet_info info;
u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
struct iwl_wowlan_remote_wake_config {
__le32 connection_max_time; /* unused */
/* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
u8 max_syn_retries;
u8 max_data_retries;
u8 tcp_syn_ack_timeout;
u8 tcp_ack_timeout;
struct iwl_tcp_packet syn_tx;
struct iwl_tcp_packet synack_rx;
struct iwl_tcp_packet keepalive_ack_rx;
struct iwl_tcp_packet fin_tx;
struct iwl_remote_wake_packet keepalive_tx;
struct iwl_remote_wake_packet wake_rx;
/* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
u8 sequence_number_offset;
u8 sequence_number_length;
u8 token_offset;
u8 token_length;
/* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
__le32 initial_sequence_number;
__le16 keepalive_interval;
__le16 num_tokens;
u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */

Просмотреть файл

@ -63,6 +63,12 @@ enum iwl_data_path_subcmd_ids {
*/
RX_NO_DATA_NOTIF = 0xF5,
/**
* @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
* &struct iwl_thermal_dual_chain_request
*/
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
/**
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
*/
@ -169,4 +175,24 @@ struct iwl_datapath_monitor_notif {
u8 reserved[3];
} __packed; /* MONITOR_NTF_API_S_VER_1 */
/**
* enum iwl_thermal_dual_chain_req_events - firmware SMPS request event
* @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation
* (subject to other constraints)
* @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation
* (static SMPS)
*/
enum iwl_thermal_dual_chain_req_events {
THERMAL_DUAL_CHAIN_REQ_ENABLE,
THERMAL_DUAL_CHAIN_REQ_DISABLE,
}; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */
/**
* struct iwl_thermal_dual_chain_request - SMPS request
* @event: the type of request, see &enum iwl_thermal_dual_chain_req_events
*/
struct iwl_thermal_dual_chain_request {
__le32 event;
} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_datapath_h__ */

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@ -11,6 +11,7 @@
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
/**
* struct iwl_fw_ini_hcmd

Просмотреть файл

@ -452,6 +452,25 @@ struct iwl_lari_config_change_cmd_v3 {
__le32 oem_11ax_allow_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
/**
* struct iwl_lari_config_change_cmd_v4 - change LARI configuration
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
* different predefined FW config operation.
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
* @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
* per country, one to indicate whether to override and the other to
* indicate the value to use.
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
* per country, one to indicate whether to override and the other to
* indicate allow/disallow unii4 channels.
*/
struct iwl_lari_config_change_cmd_v4 {
__le32 config_bitmap;
__le32 oem_uhb_allow_bitmap;
__le32 oem_11ax_allow_bitmap;
__le32 oem_unii4_allow_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status

Просмотреть файл

@ -1933,6 +1933,13 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
u32 num_of_ranges, i, size;
void *range;
/*
* The higher part of the ID in version 2 is irrelevant for
* us, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) == 2)
id &= IWL_FW_INI_REGION_V2_MASK;
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
return 0;
@ -1957,7 +1964,7 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
header->region_id = reg->id;
header->region_id = cpu_to_le32(id);
header->num_of_ranges = cpu_to_le32(num_of_ranges);
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
@ -2752,44 +2759,6 @@ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt)
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
#define FSEQ_REG(x) { .addr = (x), .str = #x, }
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
int i;
struct {
u32 addr;
const char *str;
} fseq_regs[] = {
FSEQ_REG(FSEQ_ERROR_CODE),
FSEQ_REG(FSEQ_TOP_INIT_VERSION),
FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
FSEQ_REG(FSEQ_OTP_VERSION),
FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
FSEQ_REG(FSEQ_ALIVE_TOKEN),
FSEQ_REG(FSEQ_CNVI_ID),
FSEQ_REG(FSEQ_CNVR_ID),
FSEQ_REG(CNVI_AUX_MISC_CHIP),
FSEQ_REG(CNVR_AUX_MISC_CHIP),
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
};
if (!iwl_trans_grab_nic_access(trans))
return;
IWL_ERR(fwrt, "Fseq Registers:\n");
for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
IWL_ERR(fwrt, "0x%08X | %s\n",
iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
fseq_regs[i].str);
iwl_trans_release_nic_access(trans);
}
IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
{
struct iwl_dbg_suspend_resume_cmd cmd = {

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2019 Intel Corporation
* Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -321,4 +321,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
}
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */

Просмотреть файл

@ -0,0 +1,418 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#include <linux/devcoredump.h>
#include "iwl-drv.h"
#include "runtime.h"
#include "dbg.h"
#include "debugfs.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_error_event_table_v1 {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 gp3; /* GP3 timer register */
u32 ucode_ver; /* uCode version */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 trm_hw_status0; /* TRM HW status */
u32 trm_hw_status1; /* TRM HW status */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 fw_rev_type; /* firmware revision type */
u32 major; /* uCode version major */
u32 minor; /* uCode version minor */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 last_cmd_id; /* last HCMD id handled by the firmware */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
/*
* UMAC error struct - relevant starting from family 8000 chip.
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_umac_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 umac_major;
u32 umac_minor;
u32 frame_pointer; /* core register 27*/
u32 stack_pointer; /* core register 28 */
u32 cmd_header; /* latest host cmd sent to UMAC */
u32 nic_isr_pref; /* ISR status register */
} __packed;
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_umac_error_event_table table = {};
u32 base = fwrt->trans->dbg.umac_error_event_table;
if (!base &&
!(fwrt->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_UMAC))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
fwrt->dump.umac_err_id = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
fwrt->trans->status, table.valid);
}
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_error_event_table table = {};
u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num];
if (fwrt->cur_fw_img == IWL_UCODE_INIT) {
if (!base)
base = fwrt->fw->init_errlog_ptr;
} else {
if (!base)
base = fwrt->fw->inst_errlog_ptr;
}
if (base < 0x400000) {
IWL_ERR(fwrt,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(fwrt->cur_fw_img == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
/* check if there is a HW error */
val = iwl_trans_read_mem32(trans, base);
if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
int err;
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
iwl_trans_sw_reset(trans);
err = iwl_finish_nic_init(trans, trans->trans_cfg);
if (err)
return;
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
fwrt->trans->status, table.valid);
}
/* Do not change this output - scripts rely on it */
IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version);
IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
}
/*
* TCM error struct.
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_tcm_error_event_table {
u32 valid;
u32 error_id;
u32 blink2;
u32 ilink1;
u32 ilink2;
u32 data1, data2, data3;
u32 logpc;
u32 frame_pointer;
u32 stack_pointer;
u32 msgid;
u32 isr;
u32 hw_status[5];
u32 sw_status[1];
u32 reserved[4];
} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_tcm_error_event_table table = {};
u32 base = fwrt->trans->dbg.tcm_error_event_table;
int i;
if (!base ||
!(fwrt->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_TCM))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
IWL_ERR(fwrt, "TCM status:\n");
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
table.hw_status[i], i);
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
table.sw_status[i], i);
}
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
u32 error, data1;
if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
error = UMAG_SB_CPU_2_STATUS;
data1 = UMAG_SB_CPU_1_STATUS;
} else if (fwrt->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
error = SB_CPU_2_STATUS;
data1 = SB_CPU_1_STATUS;
} else {
return;
}
error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
IWL_ERR(trans, "IML/ROM dump:\n");
if (error & 0xFFFF0000)
IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error);
IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
iwl_read_umac_prph(trans, data1));
if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
}
#define FSEQ_REG(x) { .addr = (x), .str = #x, }
static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
int i;
struct {
u32 addr;
const char *str;
} fseq_regs[] = {
FSEQ_REG(FSEQ_ERROR_CODE),
FSEQ_REG(FSEQ_TOP_INIT_VERSION),
FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
FSEQ_REG(FSEQ_OTP_VERSION),
FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
FSEQ_REG(FSEQ_ALIVE_TOKEN),
FSEQ_REG(FSEQ_CNVI_ID),
FSEQ_REG(FSEQ_CNVR_ID),
FSEQ_REG(CNVI_AUX_MISC_CHIP),
FSEQ_REG(CNVR_AUX_MISC_CHIP),
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
};
if (!iwl_trans_grab_nic_access(trans))
return;
IWL_ERR(fwrt, "Fseq Registers:\n");
for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
IWL_ERR(fwrt, "0x%08X | %s\n",
iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
fseq_regs[i].str);
iwl_trans_release_nic_access(trans);
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
{
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt,
"DEVICE_ENABLED bit is not set. Aborting dump.\n");
return;
}
iwl_fwrt_dump_lmac_error_log(fwrt, 0);
if (fwrt->trans->dbg.lmac_error_event_table[1])
iwl_fwrt_dump_lmac_error_log(fwrt, 1);
iwl_fwrt_dump_umac_error_log(fwrt);
iwl_fwrt_dump_tcm_error_log(fwrt);
iwl_fwrt_dump_iml_error_log(fwrt);
iwl_fwrt_dump_fseq_regs(fwrt);
}
IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2008-2014, 2018-2020 Intel Corporation
* Copyright (C) 2008-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -52,7 +52,8 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INIT_DATA = 4,
IWL_UCODE_TLV_BOOT = 5,
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
IWL_UCODE_TLV_PAN = 7,
IWL_UCODE_TLV_PAN = 7, /* deprecated -- only used in DVM */
IWL_UCODE_TLV_MEM_DESC = 7, /* replaces PAN in non-DVM */
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
@ -97,6 +98,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
@ -277,10 +279,11 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
= 128
/* sparse says it cannot increment the previous enum member */
#define NUM_IWL_UCODE_TLV_API 128
#else
NUM_IWL_UCODE_TLV_API
#endif
};
@ -411,6 +414,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@ -446,10 +450,11 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
= 128
/* sparse says it cannot increment the previous enum member */
#define NUM_IWL_UCODE_TLV_CAPA 128
#else
NUM_IWL_UCODE_TLV_CAPA
#endif
};
@ -946,6 +951,10 @@ struct iwl_fw_cmd_version {
u8 notif_ver;
} __packed;
struct iwl_fw_tcm_error_addr {
__le32 addr;
}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
size_t fixed_size, size_t var_size)
{

Просмотреть файл

@ -10,7 +10,7 @@
#include "fw/api/commands.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/alive.h"
#include <linux/efi.h>
#include "fw/uefi.h"
struct iwl_pnvm_section {
__le32 offset;
@ -220,83 +220,6 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
#if defined(CONFIG_EFI)
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
0xb2, 0xec, 0xf5, 0xa3, \
0x59, 0x4f, 0x4a, 0xea)
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_HARDCODED_PNVM_SIZE 4096
struct pnvm_sku_package {
u8 rev;
u8 reserved1[3];
u32 total_size;
u8 n_skus;
u8 reserved2[11];
u8 data[];
};
static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
u8 **data, size_t *len)
{
struct efivar_entry *pnvm_efivar;
struct pnvm_sku_package *package;
unsigned long package_size;
int err;
pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
if (!pnvm_efivar)
return -ENOMEM;
memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
sizeof(IWL_UEFI_OEM_PNVM_NAME));
pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
/*
* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
*/
package_size = IWL_HARDCODED_PNVM_SIZE;
package = kmalloc(package_size, GFP_KERNEL);
if (!package) {
err = -ENOMEM;
goto out;
}
err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
if (err) {
IWL_DEBUG_FW(trans,
"PNVM UEFI variable not found %d (len %lu)\n",
err, package_size);
goto out;
}
IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %lu\n", package_size);
*data = kmemdup(package->data, *len, GFP_KERNEL);
if (!*data)
err = -ENOMEM;
*len = package_size - sizeof(*package);
out:
kfree(package);
kfree(pnvm_efivar);
return err;
}
#else /* CONFIG_EFI */
static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
u8 **data, size_t *len)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_EFI */
static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
{
const struct firmware *pnvm;
@ -335,6 +258,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
{
u8 *data;
size_t len;
struct pnvm_sku_package *package;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
@ -356,9 +280,19 @@ int iwl_pnvm_load(struct iwl_trans *trans,
}
/* First attempt to get the PNVM from BIOS */
ret = iwl_pnvm_get_from_efi(trans, &data, &len);
if (!ret)
goto parse;
package = iwl_uefi_get_pnvm(trans, &len);
if (!IS_ERR_OR_NULL(package)) {
data = kmemdup(package->data, len, GFP_KERNEL);
/* free package regardless of whether kmemdup succeeded */
kfree(package);
if (data) {
/* we need only the data size */
len -= sizeof(*package);
goto parse;
}
}
/* If it's not available, try from the filesystem */
ret = iwl_pnvm_get_from_fs(trans, &data, &len);
@ -379,6 +313,30 @@ parse:
kfree(data);
skip_parse:
data = NULL;
/* now try to get the reduce power table, if not loaded yet */
if (!trans->reduce_power_loaded) {
data = iwl_uefi_get_reduced_power(trans, &len);
if (IS_ERR_OR_NULL(data)) {
/*
* Pretend we've loaded it - at least we've tried and
* couldn't load it at all, so there's no point in
* trying again over and over.
*/
trans->reduce_power_loaded = true;
goto skip_reduce_power;
}
}
ret = iwl_trans_set_reduce_power(trans, data, len);
if (ret)
IWL_DEBUG_FW(trans,
"Failed to set reduce power table %d\n",
ret);
kfree(data);
skip_reduce_power:
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
iwl_pnvm_complete_fn, trans);

Просмотреть файл

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/******************************************************************************
*
* Copyright(c) 2020 Intel Corporation
* Copyright(c) 2020-2021 Intel Corporation
*
*****************************************************************************/
@ -10,7 +10,7 @@
#include "fw/notif-wait.h"
#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait);

Просмотреть файл

@ -0,0 +1,262 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2021 Intel Corporation
*/
#include "iwl-drv.h"
#include "pnvm.h"
#include "iwl-prph.h"
#include "iwl-io.h"
#include "fw/uefi.h"
#include "fw/api/alive.h"
#include <linux/efi.h>
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
0xb2, 0xec, 0xf5, 0xa3, \
0x59, 0x4f, 0x4a, 0xea)
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
struct efivar_entry *pnvm_efivar;
void *data;
unsigned long package_size;
int err;
*len = 0;
pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
if (!pnvm_efivar)
return ERR_PTR(-ENOMEM);
memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
sizeof(IWL_UEFI_OEM_PNVM_NAME));
pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
/*
* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
*/
package_size = IWL_HARDCODED_PNVM_SIZE;
data = kmalloc(package_size, GFP_KERNEL);
if (!data) {
data = ERR_PTR(-ENOMEM);
goto out;
}
err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
if (err) {
IWL_DEBUG_FW(trans,
"PNVM UEFI variable not found %d (len %zd)\n",
err, package_size);
kfree(data);
data = ERR_PTR(err);
goto out;
}
IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %zd\n", package_size);
*len = package_size;
out:
kfree(pnvm_efivar);
return data;
}
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
const u8 *data, size_t len)
{
struct iwl_ucode_tlv *tlv;
u8 *reduce_power_data = NULL, *tmp;
u32 size = 0;
IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n");
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
if (len < tlv_len) {
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
len, tlv_len);
reduce_power_data = ERR_PTR(-EINVAL);
goto out;
}
data += sizeof(*tlv);
switch (tlv_type) {
case IWL_UCODE_TLV_MEM_DESC: {
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_MEM_DESC len %d\n",
tlv_len);
IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len);
tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL);
if (!tmp) {
IWL_DEBUG_FW(trans,
"Couldn't allocate (more) reduce_power_data\n");
reduce_power_data = ERR_PTR(-ENOMEM);
goto out;
}
reduce_power_data = tmp;
memcpy(reduce_power_data + size, data, tlv_len);
size += tlv_len;
break;
}
case IWL_UCODE_TLV_PNVM_SKU:
IWL_DEBUG_FW(trans,
"New REDUCE_POWER section started, stop parsing.\n");
goto done;
default:
IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
tlv_type, tlv_len);
break;
}
len -= ALIGN(tlv_len, 4);
data += ALIGN(tlv_len, 4);
}
done:
if (!size) {
IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
reduce_power_data = ERR_PTR(-ENOENT);
goto out;
}
IWL_INFO(trans, "loaded REDUCE_POWER\n");
out:
return reduce_power_data;
}
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len)
{
struct iwl_ucode_tlv *tlv;
void *sec_data;
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
if (len < tlv_len) {
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
len, tlv_len);
return ERR_PTR(-EINVAL);
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
struct iwl_sku_id *sku_id =
(void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
tlv_len);
IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
le32_to_cpu(sku_id->data[0]),
le32_to_cpu(sku_id->data[1]),
le32_to_cpu(sku_id->data[2]));
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
sec_data = iwl_uefi_reduce_power_section(trans,
data,
len);
if (!IS_ERR(sec_data))
return sec_data;
} else {
IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
}
} else {
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
}
}
return ERR_PTR(-ENOENT);
}
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
{
struct efivar_entry *reduce_power_efivar;
struct pnvm_sku_package *package;
void *data = NULL;
unsigned long package_size;
int err;
*len = 0;
reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
if (!reduce_power_efivar)
return ERR_PTR(-ENOMEM);
memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
sizeof(IWL_UEFI_REDUCED_POWER_NAME));
reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
/*
* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
*/
package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
package = kmalloc(package_size, GFP_KERNEL);
if (!package) {
package = ERR_PTR(-ENOMEM);
goto out;
}
err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
if (err) {
IWL_DEBUG_FW(trans,
"Reduced Power UEFI variable not found %d (len %lu)\n",
err, package_size);
kfree(package);
data = ERR_PTR(err);
goto out;
}
IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
package_size);
*len = package_size;
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
package->rev, package->total_size, package->n_skus);
data = iwl_uefi_reduce_power_parse(trans, package->data,
*len - sizeof(*package));
kfree(package);
out:
kfree(reduce_power_efivar);
return data;
}

Просмотреть файл

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2021 Intel Corporation
*/
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
/*
* TODO: we have these hardcoded values that the caller must pass,
* because reading from the UEFI is not working. To implement this
* properly, we have to change iwl_pnvm_get_from_uefi() to call
* efivar_entry_size() and return the value to the caller instead.
*/
#define IWL_HARDCODED_PNVM_SIZE 4096
#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
struct pnvm_sku_package {
u8 rev;
u32 total_size;
u8 n_skus;
u32 reserved[2];
u8 data[];
} __packed;
#ifdef CONFIG_EFI
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
#else /* CONFIG_EFI */
static inline
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
{
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_EFI */

Просмотреть файл

@ -426,6 +426,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
@ -505,8 +506,11 @@ extern const char iwl_ax201_killer_1650s_name[];
extern const char iwl_ax201_killer_1650i_name[];
extern const char iwl_ax210_killer_1675w_name[];
extern const char iwl_ax210_killer_1675x_name[];
extern const char iwl9560_killer_1550i_160_name[];
extern const char iwl9560_killer_1550s_160_name[];
extern const char iwl_ax211_name[];
extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
@ -586,7 +590,6 @@ extern const struct iwl_cfg iwl_qu_b0_hr_b0;
extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
@ -613,6 +616,7 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018, 2020 Intel Corporation
* Copyright (C) 2018, 2020-2021 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@ -127,6 +127,17 @@ struct iwl_prph_scratch_rbd_cfg {
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
/*
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
* @base_addr: reduce power table address
* @size: table size in dwords
*/
struct iwl_prph_scratch_uefi_cfg {
__le64 base_addr;
__le32 size;
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
/*
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
@ -141,6 +152,7 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
/*
@ -151,7 +163,7 @@ struct iwl_prph_scratch_ctrl_cfg {
*/
struct iwl_prph_scratch {
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
__le32 reserved[16];
__le32 reserved[12];
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
@ -245,9 +257,11 @@ struct iwl_context_info_gen3 {
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw);
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const void *data, u32 len);
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const void *data, u32 len);
#endif /* __iwl_context_info_file_gen3_h__ */

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@ -325,9 +325,6 @@ enum {
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
/* HW_RF CHIP STEP */
#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)

Просмотреть файл

@ -57,7 +57,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,},
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
@ -178,9 +178,20 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
u32 type = le32_to_cpu(reg->type);
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
* The higher part of the ID in version 2 is irrelevant for
* us, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) == 2)
id &= IWL_FW_INI_REGION_V2_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
/* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
IWL_FW_INI_MAX_NAME, reg->name);
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;

Просмотреть файл

@ -1117,6 +1117,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
}
case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
if (tlv_len != sizeof(*ptr))
goto invalid_tlv_len;
drv->trans->dbg.tcm_error_event_table =
le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM;
break;
}
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -549,8 +549,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
.mac_cap_info[4] =
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
@ -579,25 +578,20 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
.phy_cap_info[5] =
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP7_MAX_NC_1,
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
.phy_cap_info[8] =
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
@ -632,19 +626,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_BSR,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
.mac_cap_info[4] =
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
.mac_cap_info[5] =
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
.phy_cap_info[2] =
@ -654,27 +640,14 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
.phy_cap_info[5] =
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP7_MAX_NC_1,
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
.phy_cap_info[8] =
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
},
/*
@ -745,12 +718,72 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
}
static void
iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
struct ieee80211_supported_band *sband,
struct ieee80211_sband_iftype_data *iftype_data,
u8 tx_chains, u8 rx_chains,
const struct iwl_fw *fw)
{
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
/* Advertise an A-MPDU exponent extension based on
* operating band
*/
if (sband->band != NL80211_BAND_2GHZ)
iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1;
else
iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
if (is_ap && iwlwifi_mod_params.nvm_file)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
if ((tx_chains & rx_chains) == ANT_AB) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
if (!is_ap)
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_2;
} else if (!is_ap) {
/* If not 2x2, we need to indicate 1x1 in the
* Midamble RX Max NSTS - but not for AP mode
*/
iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_1;
}
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_MR:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
}
if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
u8 tx_chains, u8 rx_chains)
u8 tx_chains, u8 rx_chains,
const struct iwl_fw *fw)
{
struct ieee80211_sband_iftype_data *iftype_data;
int i;
/* should only initialize once */
if (WARN_ON(sband->iftype_data))
@ -777,26 +810,18 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
sband->iftype_data = iftype_data;
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
if ((tx_chains & rx_chains) != ANT_AB) {
int i;
for (i = 0; i < sband->n_iftype_data; i++)
iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
tx_chains, rx_chains, fw);
for (i = 0; i < sband->n_iftype_data; i++) {
iftype_data[i].he_cap.he_cap_elem.phy_cap_info[1] &=
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data[i].he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
iftype_data[i].he_cap.he_cap_elem.phy_cap_info[7] &=
~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
}
}
iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
}
static void iwl_init_sbands(struct iwl_trans *trans,
struct iwl_nvm_data *data,
const void *nvm_ch_flags, u8 tx_chains,
u8 rx_chains, u32 sbands_flags, bool v4)
u8 rx_chains, u32 sbands_flags, bool v4,
const struct iwl_fw *fw)
{
struct device *dev = trans->dev;
const struct iwl_cfg *cfg = trans->cfg;
@ -816,7 +841,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
fw);
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
@ -831,7 +857,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
fw);
/* 6GHz band. */
sband = &data->bands[NL80211_BAND_6GHZ];
@ -843,7 +870,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
NL80211_BAND_6GHZ);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
fw);
else
sband->n_channels = 0;
if (n_channels != n_used)
@ -1154,7 +1182,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains,
sbands_flags, false);
sbands_flags, false, fw);
data->calib_version = 255;
return data;
@ -1661,7 +1689,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
channel_profile,
nvm->valid_tx_ant & fw->valid_tx_ant,
nvm->valid_rx_ant & fw->valid_rx_ant,
sbands_flags, v4);
sbands_flags, v4, fw);
iwl_free_resp(&hcmd);
return nvm;

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@ -412,6 +412,8 @@ enum {
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
#define CNVI_MBOX_C 0xA3400C
#define FSEQ_ERROR_CODE 0xA340C8
#define FSEQ_TOP_INIT_VERSION 0xA34038
#define FSEQ_CNVIO_INIT_VERSION 0xA3403C

Просмотреть файл

@ -193,6 +193,7 @@ enum iwl_error_event_table_status {
IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
};
/**
@ -589,6 +590,8 @@ struct iwl_trans_ops {
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
int (*set_reduce_power)(struct iwl_trans *trans,
const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
};
@ -706,6 +709,7 @@ struct iwl_self_init_dram {
* @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
* @tcm_error_event_table: address of TCM error table
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
@ -732,6 +736,7 @@ struct iwl_trans_debug {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
u32 tcm_error_event_table;
unsigned int error_event_table_tlv_status;
enum iwl_ini_cfg_state internal_ini_cfg;
@ -957,6 +962,7 @@ struct iwl_trans {
bool pm_support;
bool ltr_enabled;
u8 pnvm_loaded:1;
u8 reduce_power_loaded:1;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@ -1420,6 +1426,20 @@ static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
return 0;
}
static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
const void *data, u32 len)
{
if (trans->ops->set_reduce_power) {
int ret = trans->ops->set_reduce_power(trans, data, len);
if (ret)
return ret;
}
trans->reduce_power_loaded = true;
return 0;
}
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||

Просмотреть файл

@ -104,7 +104,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
bool error, use_rsc_tsc, use_tkip, configure_keys;
int wep_key_idx;
};
@ -393,14 +393,19 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int i, err;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
WOWLAN_PATTERNS,
IWL_FW_CMD_VER_UNKNOWN);
if (!wowlan->n_patterns)
return 0;
@ -408,11 +413,13 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
cmd.len[0] = sizeof(*pattern_cmd) +
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
return -ENOMEM;
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
pattern_cmd->n_patterns = wowlan->n_patterns;
if (ver >= 3)
pattern_cmd->sta_id = mvmvif->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@ -636,7 +643,6 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
struct ieee80211_sta *ap_sta)
{
int ret;
struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
@ -646,12 +652,16 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
return ret;
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
if (ret < 0)
return ret;
wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
}
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
@ -706,7 +716,8 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 cmd_flags)
{
struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd;
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@ -715,7 +726,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
.kek_kck_cmd = &kek_kck_cmd,
.kek_kck_cmd = _kek_kck_cmd,
};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@ -809,13 +820,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
IWL_ALWAYS_LONG_GROUP,
WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN);
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
return -EINVAL;
if (cmd_ver == 3)
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
else
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
mvmvif->rekey_data.kck_len);
@ -825,6 +832,21 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id);
if (cmd_ver == 4) {
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
} else {
if (cmd_ver == 3)
cmd_size =
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
else
cmd_size =
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
/* skip the sta_id at the beginning */
_kek_kck_cmd = (void *)
((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
}
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
mvmvif->rekey_data.akm);
@ -832,7 +854,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
cmd_size,
&kek_kck_cmd);
_kek_kck_cmd);
if (ret)
goto out;
}
@ -884,7 +906,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
ret = iwl_mvm_send_patterns(mvm, wowlan);
ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
@ -1534,9 +1556,12 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
}
out:
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
WOWLAN_GET_STATUSES, 0) < 10) {
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
}
return true;
}
@ -1587,15 +1612,27 @@ iwl_mvm_parse_wowlan_status_common(v6)
iwl_mvm_parse_wowlan_status_common(v7)
iwl_mvm_parse_wowlan_status_common(v9)
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
static struct iwl_wowlan_status *
iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
{
struct iwl_wowlan_status *status;
struct iwl_wowlan_get_status_cmd get_status_cmd = {
.sta_id = cpu_to_le32(sta_id),
};
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
.data = { &get_status_cmd, },
.len = { sizeof(get_status_cmd), },
};
int ret, len;
u8 notif_ver;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
WOWLAN_GET_STATUSES,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
cmd.len[0] = 0;
lockdep_assert_held(&mvm->mutex);
@ -1608,8 +1645,11 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
/* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
WOWLAN_GET_STATUSES, 7);
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
WOWLAN_GET_STATUSES, 0);
if (!notif_ver)
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
WOWLAN_GET_STATUSES, 7);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
@ -1654,7 +1694,7 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
status->gtk[0] = v7->gtk[0];
status->igtk[0] = v7->igtk[0];
} else if (notif_ver == 9) {
} else if (notif_ver == 9 || notif_ver == 10) {
struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
@ -1680,29 +1720,37 @@ out_free_resp:
}
static struct iwl_wowlan_status *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
{
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
OFFLOADS_QUERY_CMD,
IWL_FW_CMD_VER_UNKNOWN);
__le32 station_id = cpu_to_le32(sta_id);
u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
if (!mvm->net_detect) {
/* only for tracing for now */
int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
cmd_size, &station_id);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
}
return iwl_mvm_send_wowlan_get_status(mvm);
return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_status_data status;
struct iwl_wowlan_status *fw_status;
int i;
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm);
fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
@ -1880,7 +1928,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm);
fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -460,7 +460,7 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
int pos = 0;
mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL);
mutex_unlock(&mvm->mutex);
do_div(curr_os, NSEC_PER_USEC);

Просмотреть файл

@ -1023,7 +1023,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mvm->fw_restart++;
/* take the return value to make compiler happy - it will fail anyway */
ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(LONG_GROUP, REPLY_ERROR),
0, 0, NULL);
mutex_unlock(&mvm->mutex);

Просмотреть файл

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@ -430,6 +430,10 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
FTM_PUT_FLAG(TB);
else if (peer->ftm.non_trigger_based)
FTM_PUT_FLAG(NON_TB);
if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
peer->ftm.lmr_feedback)
FTM_PUT_FLAG(LMR_FEEDBACK);
}
static int
@ -879,7 +883,8 @@ static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
u32 curr_gp2, diff;
u64 now_from_boot_ns;
iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns);
iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
&now_from_boot_ns, NULL);
if (curr_gp2 >= gp2_ts)
diff = curr_gp2 - gp2_ts;

Просмотреть файл

@ -1139,19 +1139,34 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int cmd_ret;
struct iwl_lari_config_change_cmd_v3 cmd = {};
int ret;
u32 value;
struct iwl_lari_config_change_cmd_v4 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT,
&iwl_guid, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
/* apply more config masks here */
if (cmd.config_bitmap) {
ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0,
DSM_FUNC_ENABLE_UNII4_CHAN,
&iwl_guid, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
if (cmd.config_bitmap ||
cmd.oem_11ax_allow_bitmap ||
cmd.oem_unii4_allow_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE, 1);
if (cmd_ver == 3)
if (cmd_ver == 4)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
else if (cmd_ver == 3)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
else if (cmd_ver == 2)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
@ -1159,16 +1174,21 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap));
cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
0, cmd_size, &cmd);
if (cmd_ret < 0)
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap),
le32_to_cpu(cmd.oem_11ax_allow_bitmap));
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n",
le32_to_cpu(cmd.oem_unii4_allow_bitmap),
cmd_ver);
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
cmd_ret);
ret);
}
}
#else /* CONFIG_ACPI */

Просмотреть файл

@ -3800,6 +3800,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct cfg80211_chan_def chandef;
struct iwl_mvm_phy_ctxt *phy_ctxt;
bool band_change_removal;
int ret, i;
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
@ -3880,19 +3881,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
/*
* Change the PHY context configuration as it is currently referenced
* only by the P2P Device MAC
* Check if the remain-on-channel is on a different band and that
* requires context removal, see iwl_mvm_phy_ctxt_changed(). If
* so, we'll need to release and then re-configure here, since we
* must not remove a PHY context that's part of a binding.
*/
if (mvmvif->phy_ctxt->ref == 1) {
band_change_removal =
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
mvmvif->phy_ctxt->channel->band != chandef.chan->band;
if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
/*
* Change the PHY context configuration as it is currently
* referenced only by the P2P Device MAC (and we can modify it)
*/
ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
&chandef, 1, 1);
if (ret)
goto out_unlock;
} else {
/*
* The PHY context is shared with other MACs. Need to remove the
* P2P Device from the binding, allocate an new PHY context and
* create a new binding
* The PHY context is shared with other MACs (or we're trying to
* switch bands), so remove the P2P Device from the binding,
* allocate an new PHY context and create a new binding.
*/
phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!phy_ctxt) {
@ -4211,7 +4223,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
struct ieee80211_vif *disabled_vif = NULL;
lockdep_assert_held(&mvm->mutex);
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
switch (vif->type) {

Просмотреть файл

@ -16,6 +16,8 @@
#include <linux/thermal.h>
#endif
#include <linux/ktime.h>
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#include "fw/notif-wait.h"
@ -195,6 +197,7 @@ enum iwl_mvm_smps_type_request {
IWL_MVM_SMPS_REQ_BT_COEX,
IWL_MVM_SMPS_REQ_TT,
IWL_MVM_SMPS_REQ_PROT,
IWL_MVM_SMPS_REQ_FW,
NUM_IWL_MVM_SMPS_REQ,
};
@ -991,6 +994,8 @@ struct iwl_mvm {
*/
bool temperature_test; /* Debug test temperature is enabled */
bool fw_static_smps_request;
unsigned long bt_coex_last_tcm_ts;
struct iwl_mvm_tcm tcm;
@ -1447,10 +1452,16 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
iwl_fwrt_dump_error_logs(&mvm->fwrt);
}
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
/* Tx / Host Commands */
@ -1769,7 +1780,6 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
#ifdef CONFIG_PM
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
@ -1827,7 +1837,9 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif);
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2012-2014, 2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@ -36,7 +36,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
struct iwl_proto_offload_cmd_v3_large v3l;
struct iwl_proto_offload_cmd_v4 v4;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
@ -47,6 +47,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
PROT_OFFLOAD_CONFIG_CMD, 0);
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
@ -72,9 +75,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
nsc = cmd.v3l.ns_config;
nsc = cmd.v4.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
addrs = cmd.v3l.targ_addrs;
addrs = cmd.v4.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
@ -116,7 +119,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
cmd.v3s.num_valid_ipv6_addrs =
cpu_to_le32(i - num_skipped);
else
cmd.v3l.num_valid_ipv6_addrs =
cmd.v4.num_valid_ipv6_addrs =
cpu_to_le32(i - num_skipped);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
bool found = false;
@ -171,8 +174,17 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
common = &cmd.v3l.common;
size = sizeof(cmd.v3l);
common = &cmd.v4.common;
size = sizeof(cmd.v4);
if (ver < 4) {
/*
* This basically uses iwl_proto_offload_cmd_v3_large
* which doesn't have the sta_id parameter before the
* common part.
*/
size -= sizeof(cmd.v4.sta_id);
hcmd.data[0] = common;
}
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);

Просмотреть файл

@ -210,6 +210,39 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
ieee80211_disconnect(vif, true);
}
void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW,
mvm->fw_static_smps_request ?
IEEE80211_SMPS_STATIC :
IEEE80211_SMPS_AUTOMATIC);
}
static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
iwl_mvm_apply_fw_smps_request(vif);
}
static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
/*
* We could pass it to the iterator data, but also need to remember
* it for new interfaces that are added while in this state.
*/
mvm->fw_static_smps_request =
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_intf_dual_chain_req, NULL);
}
/**
* enum iwl_rx_handler_context context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@ -358,6 +391,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
iwl_mvm_rx_thermal_dual_chain_req,
RX_HANDLER_ASYNC_LOCKED,
struct iwl_thermal_dual_chain_request),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@ -445,7 +483,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(D3_CONFIG_CMD),
HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
HCMD_NAME(OFFLOADS_QUERY_CMD),
HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
HCMD_NAME(MATCH_FOUND_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
HCMD_NAME(WOWLAN_PATTERNS),
@ -503,6 +540,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@ -76,6 +76,7 @@ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
}
static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
__le32 *rxchain_info,
u8 chains_static,
u8 chains_dynamic)
@ -93,11 +94,22 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
* between the two antennas is sufficiently different to impact
* performance.
*/
if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
idle_cnt = 2;
active_cnt = 2;
}
/*
* If the firmware requested it, then we know that it supports
* getting zero for the values to indicate "use one, but pick
* which one yourself", which means it can dynamically pick one
* that e.g. has better RSSI.
*/
if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
idle_cnt = 0;
active_cnt = 0;
}
*rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
*rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@ -113,6 +125,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
* Add the phy configuration to the PHY context command
*/
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
@ -123,7 +136,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
chains_static, chains_dynamic);
tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@ -133,6 +146,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
* Add the phy configuration to the PHY context command
*/
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
@ -143,7 +157,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
chains_static, chains_dynamic);
}
@ -170,7 +184,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
/* Set the command data */
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
chains_static,
chains_dynamic);
@ -186,7 +200,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
action);
/* Set the command data */
iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
chains_static,
chains_dynamic);
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -2001,8 +2001,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct sk_buff *skb;
u8 channel, energy_a, energy_b;
struct iwl_mvm_rx_phy_data phy_data = {
.info_type = le32_get_bits(desc->phy_info[1],
IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
.d0 = desc->phy_info[0],
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
.d1 = desc->phy_info[1],
};
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
@ -2015,10 +2017,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
phy_data.info_type =
le32_get_bits(desc->phy_info[1],
IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -2327,9 +2327,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
&scan_p->general_params,
gen_flags);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
if (ret)
return ret;
@ -2362,9 +2362,9 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
&scan_p->general_params,
gen_flags);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
if (ret)
return ret;

Просмотреть файл

@ -3794,8 +3794,12 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
mvm_sta->disable_tx = disable;
/* Tell mac80211 to start/stop queuing tx for this station */
ieee80211_sta_block_awake(mvm->hw, sta, disable);
/*
* If sta PS state is handled by mac80211, tell it to start/stop
* queuing tx for this station.
*/
if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
ieee80211_sta_block_awake(mvm->hw, sta, disable);
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);

Просмотреть файл

@ -31,6 +31,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
return;
list_del(&te_data->list);
/*
* the list is only used for AUX ROC events so make sure it is always
* initialized
*/
INIT_LIST_HEAD(&te_data->list);
te_data->running = false;
te_data->uid = 0;
te_data->id = TE_MAX;
@ -310,6 +317,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
!te_data->vif->bss_conf.assoc ?
"Not associated and the time event is over already..." :
"No beacon heard and the time event is over already...");
break;
default:
@ -607,14 +616,15 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
}
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif)
struct iwl_mvm_vif *mvmvif,
u32 id)
{
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.conf_id = cpu_to_le32(mvmvif->time_event_data.id),
.conf_id = cpu_to_le32(id),
};
int ret;
@ -632,6 +642,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
{
u32 id;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
enum nl80211_iftype iftype;
if (!te_data->vif)
return false;
iftype = te_data->vif->type;
/*
* It is possible that by the time we got to this point the time
@ -656,8 +672,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, mvmvif);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
}
@ -738,11 +754,6 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Couldn't remove the time event\n");
}
/*
* When the firmware supports the session protection API,
* this is not needed since it'll automatically remove the
* session protection after association + beacon reception.
*/
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@ -756,7 +767,15 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
id = te_data->id;
spin_unlock_bh(&mvm->time_event_lock);
if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
if (id != SESSION_PROTECT_CONF_ASSOC) {
IWL_DEBUG_TE(mvm,
"don't remove session protection id=%u\n",
id);
return;
}
} else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
IWL_DEBUG_TE(mvm,
"don't remove TE with id=%u (not session protection)\n",
id);
@ -808,6 +827,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, vif,
!vif->bss_conf.assoc ?
"Not associated and the session protection is over already..." :
"No beacon heard and the session protection is over already...");
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, te_data);
@ -981,7 +1002,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
iwl_mvm_cancel_session_protection(mvm, mvmvif);
iwl_mvm_cancel_session_protection(mvm, mvmvif,
mvmvif->time_event_data.id);
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
@ -1141,6 +1163,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
te_data->duration = le32_to_cpu(cmd.duration_tu);
te_data->vif = vif;
spin_unlock_bh(&mvm->time_event_lock);
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",

Просмотреть файл

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -238,316 +238,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_error_event_table_v1 {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 gp3; /* GP3 timer register */
u32 ucode_ver; /* uCode version */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 trm_hw_status0; /* TRM HW status */
u32 trm_hw_status1; /* TRM HW status */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 fw_rev_type; /* firmware revision type */
u32 major; /* uCode version major */
u32 minor; /* uCode version minor */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 last_cmd_id; /* last HCMD id handled by the firmware */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
/*
* UMAC error struct - relevant starting from family 8000 chip.
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_umac_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 umac_major;
u32 umac_minor;
u32 frame_pointer; /* core register 27*/
u32 stack_pointer; /* core register 28 */
u32 cmd_header; /* latest host cmd sent to UMAC */
u32 nic_isr_pref; /* ISR status register */
} __packed;
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_umac_error_event_table table = {};
u32 base = mvm->trans->dbg.umac_error_event_table;
if (!base &&
!(mvm->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_UMAC))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
mvm->fwrt.dump.umac_err_id = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
mvm->status, table.valid);
}
IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table = {};
u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
if (!base)
base = mvm->fw->init_errlog_ptr;
} else {
if (!base)
base = mvm->fw->inst_errlog_ptr;
}
if (base < 0x400000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
/* check if there is a HW error */
val = iwl_trans_read_mem32(trans, base);
if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
int err;
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
iwl_trans_sw_reset(trans);
err = iwl_finish_nic_init(trans, trans->trans_cfg);
if (err)
return;
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
mvm->status, table.valid);
}
/* Do not change this output - scripts rely on it */
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
}
static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
u32 error, data1;
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
error = UMAG_SB_CPU_2_STATUS;
data1 = UMAG_SB_CPU_1_STATUS;
} else if (mvm->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
error = SB_CPU_2_STATUS;
data1 = SB_CPU_1_STATUS;
} else {
return;
}
error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
IWL_ERR(trans, "IML/ROM dump:\n");
if (error & 0xFFFF0000)
IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
iwl_read_umac_prph(trans, data1));
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
IWL_ERR(mvm,
"DEVICE_ENABLED bit is not set. Aborting dump.\n");
return;
}
iwl_mvm_dump_lmac_error_log(mvm, 0);
if (mvm->trans->dbg.lmac_error_event_table[1])
iwl_mvm_dump_lmac_error_log(mvm, 1);
iwl_mvm_dump_umac_error_log(mvm);
iwl_mvm_dump_iml_error_log(mvm);
iwl_fw_error_print_fseq_regs(&mvm->fwrt);
}
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
@ -621,7 +311,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
enum ieee80211_smps_mode smps_mode;
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
int i;
lockdep_assert_held(&mvm->mutex);
@ -630,10 +320,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return;
if (vif->type == NL80211_IFTYPE_AP)
smps_mode = IEEE80211_SMPS_OFF;
else
smps_mode = IEEE80211_SMPS_AUTOMATIC;
if (vif->type != NL80211_IFTYPE_STATION)
return;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
@ -683,23 +371,37 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
}
struct iwl_mvm_diversity_iter_data {
struct iwl_mvm_phy_ctxt *ctxt;
bool result;
};
static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool *result = _data;
struct iwl_mvm_diversity_iter_data *data = _data;
int i;
if (mvmvif->phy_ctxt != data->ctxt)
return;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
*result = false;
mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
data->result = false;
break;
}
}
}
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt)
{
bool result = true;
struct iwl_mvm_diversity_iter_data data = {
.ctxt = ctxt,
.result = true,
};
lockdep_assert_held(&mvm->mutex);
@ -711,9 +413,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_diversity_iter, &result);
iwl_mvm_diversity_iter, &data);
return result;
return data.result;
}
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
@ -1398,7 +1100,8 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
return iwl_read_prph(mvm->trans, reg_addr);
}
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
u32 *gp2, u64 *boottime, ktime_t *realtime)
{
bool ps_disabled;
@ -1412,7 +1115,11 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
}
*gp2 = iwl_mvm_get_systime(mvm);
*boottime = ktime_get_boottime_ns();
if (clock_type == CLOCK_BOOTTIME && boottime)
*boottime = ktime_get_boottime_ns();
else if (clock_type == CLOCK_REALTIME && realtime)
*realtime = ktime_get_real();
if (!ps_disabled) {
mvm->ps_disabled = ps_disabled;

Просмотреть файл

@ -79,7 +79,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
void *iml_img;
u32 control_flags = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
@ -138,8 +137,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
* assigned later */
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
* assigned later
*
* We also use the second half of this page to give the device some
* dummy TR/CR tail pointers - which shouldn't be necessary as we don't
* use this, but the hardware still reads/writes there and we can't let
* it go do that with a NULL pointer.
*/
BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
if (!prph_info) {
@ -166,13 +172,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->cr_head_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
ctxt_info_gen3->tr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
ctxt_info_gen3->cr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
@ -187,14 +189,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
trans_pcie->prph_scratch = prph_scratch;
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
if (!iml_img) {
trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr,
GFP_KERNEL);
if (!trans_pcie->iml) {
ret = -ENOMEM;
goto err_free_ctxt_info;
}
memcpy(iml_img, trans->iml, trans->iml_len);
memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
iwl_enable_fw_load_int_ctx_info(trans);
@ -216,10 +219,8 @@ err_free_ctxt_info:
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
prph_info,
trans_pcie->prph_info_dma_addr);
dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
trans_pcie->prph_info_dma_addr);
err_free_prph_scratch:
dma_free_coherent(trans->dev,
@ -230,29 +231,40 @@ err_free_prph_scratch:
}
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans_pcie->iml) {
dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
trans_pcie->iml_dma_addr);
trans_pcie->iml_dma_addr = 0;
trans_pcie->iml = NULL;
}
iwl_pcie_ctxt_info_free_fw_img(trans);
if (alive)
return;
if (!trans_pcie->ctxt_info_gen3)
return;
/* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
trans_pcie->ctxt_info_gen3,
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_dma_addr = 0;
trans_pcie->ctxt_info_gen3 = NULL;
iwl_pcie_ctxt_info_free_fw_img(trans);
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
trans_pcie->prph_scratch,
trans_pcie->prph_scratch_dma_addr);
trans_pcie->prph_scratch_dma_addr = 0;
trans_pcie->prph_scratch = NULL;
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
trans_pcie->prph_info,
/* this is needed for the entire lifetime */
dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
trans_pcie->prph_info_dma_addr);
trans_pcie->prph_info_dma_addr = 0;
trans_pcie->prph_info = NULL;
@ -290,3 +302,37 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
return 0;
}
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const void *data, u32 len)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
int ret;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
/* only allocate the DRAM if not allocated yet */
if (!trans->reduce_power_loaded) {
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
return -EBUSY;
ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
&trans_pcie->reduce_power_dram);
if (ret < 0) {
IWL_DEBUG_FW(trans,
"Failed to allocate reduce power DMA %d.\n",
ret);
return ret;
}
}
prph_sc_ctrl->reduce_power_cfg.base_addr =
cpu_to_le64(trans_pcie->reduce_power_dram.physical);
prph_sc_ctrl->reduce_power_cfg.size =
cpu_to_le32(trans_pcie->reduce_power_dram.size);
return 0;
}

Просмотреть файл

@ -532,6 +532,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
@ -1029,6 +1031,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
@ -1209,14 +1216,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
}

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2003-2015, 2018-2020 Intel Corporation
* Copyright (C) 2003-2015, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -109,12 +109,8 @@ struct iwl_rx_completion_desc {
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
* In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
* @tr_tail: driver's pointer to the transmission ring tail buffer
* @tr_tail_dma: physical address of the buffer for the transmission ring tail
* @cr_tail: driver's pointer to the completion ring tail buffer
* @cr_tail_dma: physical address of the buffer for the completion ring tail
* @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@ -142,10 +138,6 @@ struct iwl_rxq {
struct iwl_rx_completion_desc *cd;
};
dma_addr_t used_bd_dma;
__le16 *tr_tail;
dma_addr_t tr_tail_dma;
__le16 *cr_tail;
dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
@ -279,6 +271,8 @@ struct cont_rec {
* Context information addresses will be taken from here.
* This is driver's local copy for keeping track of size and
* count for allocating and freeing the memory.
* @iml: image loader image virtual address
* @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @kw: keep warm address
@ -317,6 +311,7 @@ struct cont_rec {
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
* @rf_name: name/version of the CRF, if any
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@ -329,6 +324,7 @@ struct iwl_trans_pcie {
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
void *iml;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
@ -353,6 +349,7 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr kw;
struct iwl_dram_data pnvm_dram;
struct iwl_dram_data reduce_power_dram;
struct iwl_txq *txq_memory;
@ -409,6 +406,8 @@ struct iwl_trans_pcie {
bool fw_reset_handshake;
bool fw_reset_done;
wait_queue_head_t fw_reset_waitq;
char rf_name[32];
};
static inline struct iwl_trans_pcie *
@ -530,9 +529,6 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
#define IWL_NUM_OF_COMPLETION_RINGS 31
#define IWL_NUM_OF_TRANSFER_RINGS 527
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
int start)
{

Просмотреть файл

@ -663,7 +663,6 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct device *dev = trans->dev;
bool use_rx_td = (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
@ -685,21 +684,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (rxq->tr_tail)
dma_free_coherent(dev, sizeof(__le16),
rxq->tr_tail, rxq->tr_tail_dma);
rxq->tr_tail_dma = 0;
rxq->tr_tail = NULL;
if (rxq->cr_tail)
dma_free_coherent(dev, sizeof(__le16),
rxq->cr_tail, rxq->cr_tail_dma);
rxq->cr_tail_dma = 0;
rxq->cr_tail = NULL;
}
static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
@ -744,21 +728,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
if (!use_rx_td)
return 0;
/* Allocate the driver's pointer to TR tail */
rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
&rxq->tr_tail_dma, GFP_KERNEL);
if (!rxq->tr_tail)
goto err;
/* Allocate the driver's pointer to CR tail */
rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
&rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
return 0;
err:
@ -1590,9 +1559,6 @@ restart:
out:
/* Backtrack one entry */
rxq->read = i;
/* update cr tail with the rxq read pointer */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
/*

Просмотреть файл

@ -149,7 +149,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
iwl_pcie_ctxt_info_free_paging(trans);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_gen3_free(trans);
iwl_pcie_ctxt_info_gen3_free(trans, false);
else
iwl_pcie_ctxt_info_free(trans);
@ -240,6 +240,75 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return 0;
}
static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char *buf = trans_pcie->rf_name;
size_t buflen = sizeof(trans_pcie->rf_name);
size_t pos;
u32 version;
if (buf[0])
return;
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
pos = scnprintf(buf, buflen, "JF");
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
pos = scnprintf(buf, buflen, "GF");
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
pos = scnprintf(buf, buflen, "GF4");
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
pos = scnprintf(buf, buflen, "HR");
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
pos = scnprintf(buf, buflen, "HR1");
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
pos = scnprintf(buf, buflen, "HRCDB");
break;
default:
return;
}
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
version = iwl_read_prph(trans, CNVI_MBOX_C);
switch (version) {
case 0x20000:
pos += scnprintf(buf + pos, buflen - pos, " B3");
break;
case 0x120000:
pos += scnprintf(buf + pos, buflen - pos, " B5");
break;
default:
pos += scnprintf(buf + pos, buflen - pos,
" (0x%x)", version);
break;
}
break;
default:
break;
}
pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
trans->hw_rf_id);
IWL_INFO(trans, "Detected RF %s\n", buf);
/*
* also add a \n for debugfs - need to do it after printing
* since our IWL_INFO machinery wants to see a static \n at
* the end of the string
*/
pos += scnprintf(buf + pos, buflen - pos, "\n");
}
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -254,7 +323,10 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
*/
iwl_pcie_ctxt_info_free(trans);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_gen3_free(trans, true);
else
iwl_pcie_ctxt_info_free(trans);
/*
* Re-enable all the interrupts, including the RF-Kill one, now that
@ -263,6 +335,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_enable_interrupts(trans);
mutex_lock(&trans_pcie->mutex);
iwl_pcie_check_hw_rf_kill(trans);
iwl_pcie_get_rf_name(trans);
mutex_unlock(&trans_pcie->mutex);
}

Просмотреть файл

@ -1648,7 +1648,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
if (ret)
IWL_ERR(trans_pcie->trans,
"Failed to set affinity mask for IRQ %d\n",
i);
trans_pcie->msix_entries[i].vector);
}
}
@ -1943,6 +1943,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
trans_pcie->pnvm_dram.block,
trans_pcie->pnvm_dram.physical);
if (trans_pcie->reduce_power_dram.size)
dma_free_coherent(trans->dev,
trans_pcie->reduce_power_dram.size,
trans_pcie->reduce_power_dram.block,
trans_pcie->reduce_power_dram.physical);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
@ -2848,11 +2854,28 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
return bytes_copied;
}
static ssize_t iwl_dbgfs_rf_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->rf_name[0])
return -ENODEV;
return simple_read_from_buffer(user_buf, count, ppos,
trans_pcie->rf_name,
strlen(trans_pcie->rf_name));
}
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
DEBUGFS_READ_FILE_OPS(rf);
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
.owner = THIS_MODULE,
.open = iwl_dbgfs_tx_queue_open,
@ -2879,6 +2902,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
DEBUGFS_ADD_FILE(rf, dir, 0400);
}
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
@ -3400,6 +3424,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
.set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
@ -3413,6 +3438,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans *trans;
int ret, addr_size;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
void __iomem * const *table;
if (!cfg_trans->gen2)
ops = &trans_ops_pcie;
@ -3485,9 +3511,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
if (!trans_pcie->hw_base) {
table = pcim_iomap_table(pdev);
if (!table) {
dev_err(&pdev->dev, "pcim_iomap_table failed\n");
ret = -ENOMEM;
goto out_no_pci;
}
trans_pcie->hw_base = table[0];
if (!trans_pcie->hw_base) {
dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
ret = -ENODEV;
goto out_no_pci;
}

Просмотреть файл

@ -988,15 +988,18 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
* tsc must be NULL or up to 8 bytes
*/
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
int set_tx, const u8 *key, const u8 *rsc,
size_t rsc_len, const u8 *tsc, size_t tsc_len)
int set_tx, const u8 *key, size_t key_len,
const u8 *rsc, size_t rsc_len,
const u8 *tsc, size_t tsc_len)
{
struct {
__le16 idx;
u8 rsc[ORINOCO_SEQ_LEN];
u8 key[TKIP_KEYLEN];
u8 tx_mic[MIC_KEYLEN];
u8 rx_mic[MIC_KEYLEN];
struct {
u8 key[TKIP_KEYLEN];
u8 tx_mic[MIC_KEYLEN];
u8 rx_mic[MIC_KEYLEN];
} tkip;
u8 tsc[ORINOCO_SEQ_LEN];
} __packed buf;
struct hermes *hw = &priv->hw;
@ -1011,8 +1014,9 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
key_idx |= 0x8000;
buf.idx = cpu_to_le16(key_idx);
memcpy(buf.key, key,
sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
if (key_len != sizeof(buf.tkip))
return -EINVAL;
memcpy(&buf.tkip, key, sizeof(buf.tkip));
if (rsc_len > sizeof(buf.rsc))
rsc_len = sizeof(buf.rsc);

Просмотреть файл

@ -38,8 +38,9 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
int __orinoco_hw_setup_enc(struct orinoco_private *priv);
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
int set_tx, const u8 *key, const u8 *rsc,
size_t rsc_len, const u8 *tsc, size_t tsc_len);
int set_tx, const u8 *key, size_t key_len,
const u8 *rsc, size_t rsc_len,
const u8 *tsc, size_t tsc_len);
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,

Просмотреть файл

@ -791,7 +791,7 @@ static int orinoco_ioctl_set_encodeext(struct net_device *dev,
err = __orinoco_hw_set_tkip_key(priv, idx,
ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
priv->keys[idx].key,
priv->keys[idx].key, priv->keys[idx].key_len,
tkip_iv, ORINOCO_SEQ_LEN, NULL, 0);
if (err)
printk(KERN_ERR "%s: Error %d setting TKIP key"

Просмотреть файл

@ -995,6 +995,11 @@ struct host_cmd_ds_802_11_key_material {
struct mwifiex_ie_type_key_param_set key_param_set;
} __packed;
struct host_cmd_ds_802_11_key_material_wep {
__le16 action;
struct mwifiex_ie_type_key_param_set key_param_set[NUM_WEP_KEYS];
} __packed;
struct host_cmd_ds_gen {
__le16 command;
__le16 size;
@ -2347,6 +2352,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
struct host_cmd_ds_802_11_key_material_wep key_material_wep;
struct host_cmd_ds_version_ext verext;
struct host_cmd_ds_mgmt_frame_reg reg_mask;
struct host_cmd_ds_remain_on_chan roc_cfg;

Просмотреть файл

@ -840,14 +840,15 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
}
if (!enc_key) {
memset(&key_material->key_param_set, 0,
(NUM_WEP_KEYS *
sizeof(struct mwifiex_ie_type_key_param_set)));
struct host_cmd_ds_802_11_key_material_wep *key_material_wep =
(struct host_cmd_ds_802_11_key_material_wep *)key_material;
memset(key_material_wep->key_param_set, 0,
sizeof(key_material_wep->key_param_set));
ret = mwifiex_set_keyparamset_wep(priv,
&key_material->key_param_set,
&key_material_wep->key_param_set[0],
&key_param_len);
cmd->size = cpu_to_le16(key_param_len +
sizeof(key_material->action) + S_DS_GEN);
sizeof(key_material_wep->action) + S_DS_GEN);
return ret;
} else
memset(&key_material->key_param_set, 0,

Просмотреть файл

@ -4552,7 +4552,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
else
rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
memcpy(p->ht_rates, &sta->ht_cap.mcs, 16);
p->interop = 1;
p->amsdu_enabled = 0;
@ -5034,7 +5034,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ap_legacy_rates =
ap->supp_rates[NL80211_BAND_5GHZ] << 5;
}
memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16);
rcu_read_unlock();

Просмотреть файл

@ -191,6 +191,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
q->entry[idx].txwi = txwi;
q->entry[idx].skb = skb;
q->entry[idx].wcid = 0xffff;
return idx;
}
@ -349,6 +350,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct ieee80211_tx_status status = {
.sta = sta,
};
struct mt76_tx_info tx_info = {
.skb = skb,
};
@ -360,11 +364,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_free_txskb(hw, skb);
return -ENOMEM;
}
if (!t)
goto free_skb;
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
@ -427,8 +429,13 @@ free:
}
#endif
dev_kfree_skb(tx_info.skb);
mt76_put_txwi(dev, t);
free_skb:
status.skb = tx_info.skb;
hw = mt76_tx_status_get_hw(dev, tx_info.skb);
ieee80211_tx_status_ext(hw, &status);
return ret;
}

Просмотреть файл

@ -83,6 +83,22 @@ static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
{ .throughput = 300 * 1024, .blink_time = 50 },
};
struct ieee80211_rate mt76_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
OFDM_RATE(11, 60),
OFDM_RATE(15, 90),
OFDM_RATE(10, 120),
OFDM_RATE(14, 180),
OFDM_RATE(9, 240),
OFDM_RATE(13, 360),
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
EXPORT_SYMBOL_GPL(mt76_rates);
static int mt76_led_init(struct mt76_dev *dev)
{
struct device_node *np = dev->dev->of_node;
@ -315,17 +331,6 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC);
}
struct mt76_phy *
@ -346,6 +351,17 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
phy->hw = hw;
phy->priv = hw->priv + phy_size;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC);
return phy;
}
EXPORT_SYMBOL_GPL(mt76_alloc_phy);
@ -428,6 +444,17 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
mutex_init(&dev->mcu.mutex);
dev->tx_worker.fn = mt76_tx_worker;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
@ -632,20 +659,19 @@ void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
}
EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
void mt76_update_survey(struct mt76_dev *dev)
void mt76_update_survey(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
ktime_t cur_time;
if (dev->drv->update_survey)
dev->drv->update_survey(dev);
dev->drv->update_survey(phy);
cur_time = ktime_get_boottime();
mt76_update_survey_active_time(&dev->phy, cur_time);
if (dev->phy2)
mt76_update_survey_active_time(dev->phy2, cur_time);
mt76_update_survey_active_time(phy, cur_time);
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
struct mt76_channel_state *state = dev->phy.chan_state;
struct mt76_channel_state *state = phy->chan_state;
spin_lock_bh(&dev->cc_lock);
state->cc_bss_rx += dev->cur_cc_bss_rx;
@ -664,7 +690,7 @@ void mt76_set_channel(struct mt76_phy *phy)
int timeout = HZ / 5;
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(dev);
mt76_update_survey(phy);
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
@ -689,7 +715,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
mutex_lock(&dev->mutex);
if (idx == 0 && dev->drv->update_survey)
mt76_update_survey(dev);
mt76_update_survey(phy);
sband = &phy->sband_2g;
if (idx >= sband->sband.n_channels) {

Просмотреть файл

@ -87,6 +87,22 @@ enum mt76_rxq_id {
__MT_RXQ_MAX
};
enum mt76_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_TKIP,
MT_CIPHER_TKIP_NO_MIC,
MT_CIPHER_AES_CCMP,
MT_CIPHER_WEP104,
MT_CIPHER_BIP_CMAC_128,
MT_CIPHER_WEP128,
MT_CIPHER_WAPI,
MT_CIPHER_CCMP_CCX,
MT_CIPHER_CCMP_256,
MT_CIPHER_GCMP,
MT_CIPHER_GCMP_256,
};
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
@ -320,6 +336,7 @@ enum {
struct mt76_hw_cap {
bool has_2ghz;
bool has_5ghz;
bool has_6ghz;
};
#define MT_DRV_TXWI_NO_FREE BIT(0)
@ -336,7 +353,7 @@ struct mt76_driver_ops {
u16 token_size;
u8 mcs_rates;
void (*update_survey)(struct mt76_dev *dev);
void (*update_survey)(struct mt76_phy *phy);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@ -738,6 +755,21 @@ enum mt76_phy_type {
MT_PHY_TYPE_HE_MU,
};
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
extern struct ieee80211_rate mt76_rates[12];
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
@ -1031,7 +1063,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@ -1056,7 +1088,14 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list);
void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
struct list_head *free_list);
static inline void
mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
{
__mt76_tx_complete_skb(dev, wcid, skb, NULL);
}
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@ -1253,4 +1292,15 @@ mt76_token_put(struct mt76_dev *dev, int token)
return txwi;
}
static inline int
mt76_get_next_pkt_id(struct mt76_wcid *wcid)
{
wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
wcid->packet_id == MT_PACKET_ID_NO_SKB)
wcid->packet_id = MT_PACKET_ID_FIRST;
return wcid->packet_id;
}
#endif

Просмотреть файл

@ -304,34 +304,6 @@ mt7603_init_hardware(struct mt7603_dev *dev)
return 0;
}
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
static struct ieee80211_rate mt7603_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
OFDM_RATE(11, 60),
OFDM_RATE(15, 90),
OFDM_RATE(10, 120),
OFDM_RATE(14, 180),
OFDM_RATE(9, 240),
OFDM_RATE(13, 360),
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@ -569,8 +541,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
ARRAY_SIZE(mt7603_rates));
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
return ret;

Просмотреть файл

@ -550,14 +550,27 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
u8 *data = (u8 *)rxd;
if (status->flag & RX_FLAG_DECRYPTED) {
status->iv[0] = data[5];
status->iv[1] = data[4];
status->iv[2] = data[3];
status->iv[3] = data[2];
status->iv[4] = data[1];
status->iv[5] = data[0];
insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
case MT_CIPHER_AES_CCMP:
case MT_CIPHER_CCMP_CCX:
case MT_CIPHER_CCMP_256:
insert_ccmp_hdr =
FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
fallthrough;
case MT_CIPHER_TKIP:
case MT_CIPHER_TKIP_NO_MIC:
case MT_CIPHER_GCMP:
case MT_CIPHER_GCMP_256:
status->iv[0] = data[5];
status->iv[1] = data[4];
status->iv[2] = data[3];
status->iv[3] = data[2];
status->iv[4] = data[1];
status->iv[5] = data[0];
break;
default:
break;
}
}
rxd += 4;
@ -831,7 +844,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
}
static enum mt7603_cipher_type
static enum mt76_cipher_type
mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
@ -863,7 +876,7 @@ mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
struct ieee80211_key_conf *key)
{
enum mt7603_cipher_type cipher;
enum mt76_cipher_type cipher;
u32 addr = mt7603_wtbl3_addr(wcid);
u8 key_data[32];
int key_len = sizeof(key_data);
@ -1213,7 +1226,7 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
}
@ -1584,12 +1597,12 @@ trigger:
return true;
}
void mt7603_update_channel(struct mt76_dev *mdev)
void mt7603_update_channel(struct mt76_phy *mphy)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
state = mdev->phy.chan_state;
state = mphy->chan_state;
state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
@ -1806,7 +1819,7 @@ void mt7603_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
dev->mphy.mac_work_count++;
mt76_update_survey(&dev->mt76);
mt76_update_survey(&dev->mphy);
mt7603_edcca_check(dev);
for (i = 0, idx = 0; i < 2; i++) {

Просмотреть файл

@ -256,7 +256,7 @@ void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t);
void mt7603_update_channel(struct mt76_dev *mdev);
void mt7603_update_channel(struct mt76_phy *mphy);
void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
void mt7603_cca_stats_reset(struct mt7603_dev *dev);

Просмотреть файл

@ -765,16 +765,4 @@ enum {
#define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300)
#define MT_WTBL1_OR_PSM_WRITE BIT(31)
enum mt7603_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_TKIP,
MT_CIPHER_TKIP_NO_MIC,
MT_CIPHER_AES_CCMP,
MT_CIPHER_WEP104,
MT_CIPHER_BIP_CMAC_128,
MT_CIPHER_WEP128,
MT_CIPHER_WAPI,
};
#endif

Просмотреть файл

@ -1,4 +1,4 @@
#SPDX-License-Identifier: ISC
# SPDX-License-Identifier: ISC
obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
obj-$(CONFIG_MT7615E) += mt7615e.o

Просмотреть файл

@ -75,7 +75,7 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_wait_for_mcu_init(dev))
return 0;
if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
if (val == pm->enable)
@ -319,24 +319,6 @@ mt7615_radio_read(struct seq_file *s, void *data)
return 0;
}
static int mt7615_read_temperature(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
int temp;
if (!mt7615_wait_for_mcu_init(dev))
return 0;
/* cpu */
mt7615_mutex_acquire(dev);
temp = mt7615_mcu_get_temperature(dev, 0);
mt7615_mutex_release(dev);
seq_printf(s, "Temperature: %d\n", temp);
return 0;
}
static int
mt7615_queues_acq(struct seq_file *s, void *data)
{
@ -566,8 +548,6 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_file("reset_test", 0200, dir, dev,
&fops_reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7615_read_temperature);
debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr);
debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);

Просмотреть файл

@ -81,7 +81,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
if (napi_complete(napi))
mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
mt76_connac_pm_unref(&dev->pm);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
return 0;
}
@ -99,7 +99,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget)
return 0;
}
done = mt76_dma_rx_poll(napi, budget);
mt76_connac_pm_unref(&dev->pm);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
return done;
}
@ -222,14 +222,9 @@ void mt7615_dma_start(struct mt7615_dev *dev)
int mt7615_dma_init(struct mt7615_dev *dev)
{
int rx_ring_size = MT7615_RX_RING_SIZE;
int rx_buf_size = MT_RX_BUF_SIZE;
u32 mask;
int ret;
/* Increase buffer size to receive large VHT MPDUs */
if (dev->mphy.cap.has_5ghz)
rx_buf_size *= 2;
mt76_dma_attach(&dev->mt76);
mt76_wr(dev, MT_WPDMA_GLO_CFG,
@ -270,7 +265,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
/* init rx queues */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT7615_RX_MCU_RING_SIZE, rx_buf_size,
MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
MT_RX_RING_BASE);
if (ret)
return ret;
@ -279,7 +274,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
rx_ring_size /= 2;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
if (ret)
return ret;

Просмотреть файл

@ -8,11 +8,61 @@
*/
#include <linux/etherdevice.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "eeprom.h"
static ssize_t mt7615_thermal_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mt7615_dev *mdev = dev_get_drvdata(dev);
int temperature;
if (!mt7615_wait_for_mcu_init(mdev))
return 0;
mt7615_mutex_acquire(mdev);
temperature = mt7615_mcu_get_temperature(mdev);
mt7615_mutex_release(mdev);
if (temperature < 0)
return temperature;
/* display in millidegree celcius */
return sprintf(buf, "%u\n", temperature * 1000);
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7615_thermal_show_temp,
NULL, 0);
static struct attribute *mt7615_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7615_hwmon);
int mt7615_thermal_init(struct mt7615_dev *dev)
{
struct wiphy *wiphy = mt76_hw(dev)->wiphy;
struct device *hwmon;
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
wiphy_name(wiphy), dev,
mt7615_hwmon_groups);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_thermal_init);
static void
mt7615_phy_init(struct mt7615_dev *dev)
{
@ -174,35 +224,6 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
struct ieee80211_rate mt7615_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
OFDM_RATE(11, 60),
OFDM_RATE(15, 90),
OFDM_RATE(10, 120),
OFDM_RATE(14, 180),
OFDM_RATE(9, 240),
OFDM_RATE(13, 360),
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
EXPORT_SYMBOL_GPL(mt7615_rates);
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@ -362,7 +383,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->max_sched_scan_plan_interval =
MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
@ -472,8 +493,8 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
for (i = 0; i <= MT_TXQ_PSD ; i++)
mphy->q_tx[i] = dev->mphy.q_tx[i];
ret = mt76_register_phy(mphy, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
ieee80211_free_hw(mphy->hw);

Просмотреть файл

@ -20,7 +20,7 @@
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
.pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
[5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
[6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
@ -34,7 +34,7 @@ static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
};
static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
.pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
[0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
[1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
@ -45,7 +45,7 @@ static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
};
static const struct mt7615_dfs_radar_spec jp_radar_specs = {
.pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
[0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
[1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
@ -57,6 +57,33 @@ static const struct mt7615_dfs_radar_spec jp_radar_specs = {
},
};
static enum mt76_cipher_type
mt7615_mac_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MT_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MT_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MT_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
return MT_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
return MT_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
return MT_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return MT_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return MT_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
return MT_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
}
}
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
{
@ -313,14 +340,27 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
u8 *data = (u8 *)rxd;
if (status->flag & RX_FLAG_DECRYPTED) {
status->iv[0] = data[5];
status->iv[1] = data[4];
status->iv[2] = data[3];
status->iv[3] = data[2];
status->iv[4] = data[1];
status->iv[5] = data[0];
insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
case MT_CIPHER_AES_CCMP:
case MT_CIPHER_CCMP_CCX:
case MT_CIPHER_CCMP_256:
insert_ccmp_hdr =
FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
fallthrough;
case MT_CIPHER_TKIP:
case MT_CIPHER_TKIP_NO_MIC:
case MT_CIPHER_GCMP:
case MT_CIPHER_GCMP_256:
status->iv[0] = data[5];
status->iv[1] = data[4];
status->iv[2] = data[3];
status->iv[3] = data[2];
status->iv[4] = data[1];
status->iv[5] = data[0];
break;
default:
break;
}
}
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
@ -1062,7 +1102,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
sta->rate_set_tsf |= rd.rateset;
@ -1078,7 +1118,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum mt7615_cipher_type cipher, u16 cipher_mask,
enum mt76_cipher_type cipher, u16 cipher_mask,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
@ -1118,7 +1158,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher, u16 cipher_mask,
enum mt76_cipher_type cipher, u16 cipher_mask,
int keyidx, enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@ -1157,7 +1197,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher, u16 cipher_mask,
enum mt76_cipher_type cipher, u16 cipher_mask,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
@ -1183,7 +1223,7 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
enum mt7615_cipher_type cipher;
enum mt76_cipher_type cipher;
u16 cipher_mask = wcid->cipher;
int err;
@ -1235,22 +1275,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
int first_idx = 0, last_idx;
int i, idx, count;
bool fixed_rate, ack_timeout;
bool probe, ampdu, cck = false;
bool ampdu, cck = false;
bool rs_idx;
u32 rate_set_tsf;
u32 final_rate, final_rate_flags, final_nss, txs;
fixed_rate = info->status.rates[0].count;
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
txs = le32_to_cpu(txs_data[1]);
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
ampdu = txs & MT_TXS1_AMPDU;
txs = le32_to_cpu(txs_data[3]);
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
txs = le32_to_cpu(txs_data[0]);
fixed_rate = txs & MT_TXS0_FIXED_RATE;
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
@ -1272,7 +1310,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
if (fixed_rate && !probe) {
if (fixed_rate) {
info->status.rates[0].count = count;
i = 0;
goto out;
@ -1391,7 +1429,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
}
@ -1821,43 +1859,41 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
state->noise = -(phy->noise >> 4);
}
static void __mt7615_update_channel(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
mt7615_phy_update_channel(&mdev->phy, 0);
if (mdev->phy2)
mt7615_phy_update_channel(mdev->phy2, 1);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
void mt7615_update_channel(struct mt76_dev *mdev)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
return;
__mt7615_update_channel(dev);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
EXPORT_SYMBOL_GPL(mt7615_update_channel);
static void mt7615_update_survey(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
ktime_t cur_time;
__mt7615_update_channel(dev);
/* MT7615 can only update both phys simultaneously
* since some reisters are shared across bands.
*/
mt7615_phy_update_channel(&mdev->phy, 0);
if (mdev->phy2)
mt7615_phy_update_channel(mdev->phy2, 1);
cur_time = ktime_get_boottime();
mt76_update_survey_active_time(&mdev->phy, cur_time);
if (mdev->phy2)
mt76_update_survey_active_time(mdev->phy2, cur_time);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
void mt7615_update_channel(struct mt76_phy *mphy)
{
struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
return;
mt7615_update_survey(dev);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
EXPORT_SYMBOL_GPL(mt7615_update_channel);
static void
mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
{
@ -1906,15 +1942,26 @@ void mt7615_pm_wake_work(struct work_struct *work)
mphy = dev->phy.mt76;
if (!mt7615_mcu_set_drv_ctrl(dev)) {
struct mt76_dev *mdev = &dev->mt76;
int i;
mt76_for_each_q_rx(&dev->mt76, i)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
if (test_bit(MT76_STATE_RUNNING, &mphy->state))
if (mt76_is_sdio(mdev)) {
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
mt76_for_each_q_rx(mdev, i)
napi_schedule(&mdev->napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
false);
}
if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
unsigned long timeout;
timeout = mt7615_get_macwork_timeout(dev);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7615_WATCHDOG_TIME);
timeout);
}
}
ieee80211_wake_queues(mphy->hw);
@ -1949,6 +1996,7 @@ void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_phy *phy;
struct mt76_phy *mphy;
unsigned long timeout;
mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
mac_work.work);
@ -1967,8 +2015,9 @@ void mt7615_mac_work(struct work_struct *work)
mt7615_mutex_release(phy->dev);
mt76_tx_status_check(mphy->dev, NULL, false);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7615_WATCHDOG_TIME);
timeout = mt7615_get_macwork_timeout(phy->dev);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
}
void mt7615_tx_token_put(struct mt7615_dev *dev)
@ -2049,14 +2098,12 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
{
const struct mt7615_dfs_radar_spec *radar_specs;
struct mt7615_dev *dev = phy->dev;
int err, i;
int err, i, lpn = 500;
switch (dev->mt76.region) {
case NL80211_DFS_FCC:
radar_specs = &fcc_radar_specs;
err = mt7615_mcu_set_fcc5_lpn(dev, 8);
if (err < 0)
return err;
lpn = 8;
break;
case NL80211_DFS_ETSI:
radar_specs = &etsi_radar_specs;
@ -2068,6 +2115,11 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
return -EINVAL;
}
/* avoid FCC radar detection in non-FCC region */
err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
if (err < 0)
return err;
for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
err = mt7615_mcu_set_radar_th(dev, i,
&radar_specs->radar_pattern[i]);

Просмотреть файл

@ -383,48 +383,6 @@ struct mt7615_dfs_radar_spec {
struct mt7615_dfs_pattern radar_pattern[16];
};
enum mt7615_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_TKIP,
MT_CIPHER_TKIP_NO_MIC,
MT_CIPHER_AES_CCMP,
MT_CIPHER_WEP104,
MT_CIPHER_BIP_CMAC_128,
MT_CIPHER_WEP128,
MT_CIPHER_WAPI,
MT_CIPHER_CCMP_256 = 10,
MT_CIPHER_GCMP,
MT_CIPHER_GCMP_256,
};
static inline enum mt7615_cipher_type
mt7615_mac_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MT_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MT_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MT_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
return MT_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
return MT_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
return MT_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return MT_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return MT_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
return MT_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
}
}
static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{

Просмотреть файл

@ -28,6 +28,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
unsigned long timeout;
bool running;
int ret;
@ -78,8 +79,8 @@ static int mt7615_start(struct ieee80211_hw *hw)
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7615_WATCHDOG_TIME);
timeout = mt7615_get_macwork_timeout(dev);
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
if (!running)
mt7615_mac_reset_counters(dev);
@ -240,8 +241,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
}
ret = mt7615_mcu_add_dev_info(phy, vif, true);
if (ret)
goto out;
out:
mt7615_mutex_release(dev);
@ -352,10 +351,12 @@ out:
mt7615_mutex_release(dev);
mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76))
if (!mt76_testmode_enabled(phy->mt76)) {
unsigned long timeout = mt7615_get_macwork_timeout(dev);
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
MT7615_WATCHDOG_TIME);
&phy->mt76->mac_work, timeout);
}
return ret;
}
@ -695,7 +696,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
msta->n_rates = i;
if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
mt76_connac_pm_unref(&dev->pm);
mt76_connac_pm_unref(phy->mt76, &dev->pm);
}
spin_unlock_bh(&dev->mt76.lock);
}
@ -711,7 +712,7 @@ void mt7615_tx_worker(struct mt76_worker *w)
}
mt76_tx_worker_run(&dev->mt76);
mt76_connac_pm_unref(&dev->pm);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
static void mt7615_tx(struct ieee80211_hw *hw,
@ -741,7 +742,7 @@ static void mt7615_tx(struct ieee80211_hw *hw,
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
mt76_connac_pm_unref(&dev->pm);
mt76_connac_pm_unref(mphy, &dev->pm);
return;
}
@ -881,7 +882,8 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7615_mutex_acquire(dev);
mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
/* TSF read */
mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
@ -911,7 +913,33 @@ mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
/* TSF software overwrite */
mt76_set(dev, reg, MT_LPON_TCR_WRITE);
mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_WRITE);
mt7615_mutex_release(dev);
}
static void
mt7615_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
s64 timestamp)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 idx = mvif->mt76.omac_idx;
u32 reg;
idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
mt7615_mutex_acquire(dev);
mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
/* TSF software adjust*/
mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_ADJUST);
mt7615_mutex_release(dev);
}
@ -1162,7 +1190,7 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt7615_mcu_sta_update_hdr_trans(dev, vif, sta);
mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
}
#ifdef CONFIG_PM
@ -1200,6 +1228,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = mt7615_hw_dev(hw);
unsigned long timeout;
bool running;
mt7615_mutex_acquire(dev);
@ -1223,8 +1252,8 @@ static int mt7615_resume(struct ieee80211_hw *hw)
mt76_connac_mcu_set_suspend_iter,
phy->mt76);
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7615_WATCHDOG_TIME);
timeout = mt7615_get_macwork_timeout(dev);
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
mt7615_mutex_release(dev);
@ -1278,6 +1307,7 @@ const struct ieee80211_ops mt7615_ops = {
.get_stats = mt7615_get_stats,
.get_tsf = mt7615_get_tsf,
.set_tsf = mt7615_set_tsf,
.offset_tsf = mt7615_offset_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
.set_antenna = mt7615_set_antenna,

Просмотреть файл

@ -411,6 +411,9 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
c = (struct mt7615_mcu_csa_notify *)skb->data;
if (c->omac_idx > EXT_BSSID_MAX)
return;
if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
mphy = dev->mt76.phy2;
@ -427,6 +430,10 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
r = (struct mt7615_mcu_rdd_report *)skb->data;
if (!dev->radar_pattern.n_pulses && !r->long_detected &&
!r->constant_prf_detected && !r->staggered_prf_detected)
return;
if (r->band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
@ -1021,9 +1028,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (IS_ERR(sskb))
return PTR_ERR(sskb);
mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable);
mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, true);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0);
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_RESET_AND_SET, NULL,
@ -1037,8 +1045,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
NULL, wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL,
wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
NULL, wtbl_hdr);
}
cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
@ -1058,6 +1066,26 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
}
static int
mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct sk_buff *skb = NULL;
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
true);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
.add_beacon_offload = mt7615_mcu_add_beacon_offload,
.set_pm_state = mt7615_mcu_ctrl_pm_state,
@ -1068,6 +1096,7 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
.sta_add = mt7615_mcu_wtbl_sta_add,
.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
.set_sta_decap_offload = mt7615_mcu_wtbl_update_hdr_trans,
};
static int
@ -1120,18 +1149,21 @@ mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
static int
__mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable, int cmd)
struct ieee80211_sta *sta, bool enable, int cmd,
bool offload_fw)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
.offload_fw = offload_fw,
.enable = enable,
.newly = true,
.cmd = cmd,
};
info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
return mt76_connac_mcu_add_sta_cmd(phy, &info);
return mt76_connac_mcu_sta_cmd(phy, &info);
}
static int
@ -1139,7 +1171,19 @@ mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
MCU_EXT_CMD_STA_REC_UPDATE);
MCU_EXT_CMD_STA_REC_UPDATE, false);
}
static int
mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
vif, &msta->wcid,
MCU_EXT_CMD_STA_REC_UPDATE);
}
static const struct mt7615_mcu_ops sta_update_ops = {
@ -1152,27 +1196,9 @@ static const struct mt7615_mcu_ops sta_update_ops = {
.sta_add = mt7615_mcu_add_sta,
.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
.set_sta_decap_offload = mt7615_mcu_sta_update_hdr_trans,
};
int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct sk_buff *skb = NULL;
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
true);
}
static int
mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
@ -1280,7 +1306,7 @@ mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
MCU_UNI_CMD_STA_REC_UPDATE);
MCU_UNI_CMD_STA_REC_UPDATE, true);
}
static int
@ -1338,6 +1364,18 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
MCU_UNI_CMD_STA_REC_UPDATE, true);
}
static int
mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
vif, &msta->wcid,
MCU_UNI_CMD_STA_REC_UPDATE);
}
static const struct mt7615_mcu_ops uni_update_ops = {
.add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
.set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
@ -1348,6 +1386,7 @@ static const struct mt7615_mcu_ops uni_update_ops = {
.sta_add = mt7615_mcu_uni_add_sta,
.set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
.set_sta_decap_offload = mt7615_mcu_sta_uni_update_hdr_trans,
};
int mt7615_mcu_restart(struct mt76_dev *dev)
@ -2322,14 +2361,12 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
int mt7615_mcu_get_temperature(struct mt7615_dev *dev)
{
struct {
u8 action;
u8 rsv[3];
} req = {
.action = index,
};
} req = {};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
sizeof(req), true);

Просмотреть файл

@ -229,7 +229,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
GFP_KERNEL);
if (!bus_ops) {
ret = -ENOMEM;
goto error;
goto err_free_dev;
}
bus_ops->rr = mt7615_rr;
@ -242,17 +242,20 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
goto err_free_dev;
if (is_mt7663(mdev))
mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
ret = mt7615_register_device(dev);
if (ret)
goto error;
goto err_free_irq;
return 0;
error:
err_free_irq:
devm_free_irq(pdev, irq, dev);
err_free_dev:
mt76_free_device(&dev->mt76);
return ret;

Просмотреть файл

@ -20,7 +20,6 @@
MT7615_MAX_INTERFACES)
#define MT7615_PM_TIMEOUT (HZ / 12)
#define MT7615_WATCHDOG_TIME (HZ / 10)
#define MT7615_HW_SCAN_TIMEOUT (HZ / 10)
#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
@ -202,6 +201,7 @@ struct mt7615_phy {
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev))
#define mt7615_mcu_set_sta_decap_offload(dev, ...) (dev)->mcu_ops->set_sta_decap_offload((dev), __VA_ARGS__)
struct mt7615_mcu_ops {
int (*add_tx_ba)(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
@ -221,6 +221,9 @@ struct mt7615_mcu_ops {
int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
int (*set_drv_ctrl)(struct mt7615_dev *dev);
int (*set_fw_ctrl)(struct mt7615_dev *dev);
int (*set_sta_decap_offload)(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
};
struct mt7615_dev {
@ -356,6 +359,7 @@ static inline int mt7622_wmac_init(struct mt7615_dev *dev)
}
#endif
int mt7615_thermal_init(struct mt7615_dev *dev);
int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
@ -456,6 +460,12 @@ static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx);
}
static inline unsigned long
mt7615_get_macwork_timeout(struct mt7615_dev *dev)
{
return dev->pm.enable ? HZ / 3 : HZ / 10;
}
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
@ -466,7 +476,7 @@ int mt7615_set_channel(struct mt7615_phy *phy);
void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_dev *mdev);
void mt7615_update_channel(struct mt76_phy *mphy);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
@ -494,7 +504,7 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev);
int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
void mt7615_mcu_exit(struct mt7615_dev *dev);
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
@ -518,9 +528,6 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7615_mac_work(struct work_struct *work);
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,

Просмотреть файл

@ -98,7 +98,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
addr = mt7615_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
mt76_connac_pm_unref(&dev->pm);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
static int
@ -147,8 +147,12 @@ int mt7615_register_device(struct mt7615_dev *dev)
if (ret)
return ret;
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
ret = mt7615_thermal_init(dev);
if (ret)
return ret;

Просмотреть файл

@ -131,20 +131,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
int pid, id;
u8 *txwi = (u8 *)txwi_ptr;
struct mt76_txwi_cache *t;
struct mt7615_sta *msta;
void *txp;
msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
if (!wcid)
wcid = &dev->mt76.global_wcid;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
struct mt7615_phy *phy = &dev->phy;
if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
@ -267,6 +268,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
struct mt7615_phy *phy2;
struct mt76_phy *ext_phy;
struct mt7615_dev *dev;
unsigned long timeout;
dev = container_of(work, struct mt7615_dev, reset_work);
ext_phy = dev->mt76.phy2;
@ -344,11 +346,11 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt7615_mutex_release(dev);
timeout = mt7615_get_macwork_timeout(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT7615_WATCHDOG_TIME);
timeout);
if (phy2)
ieee80211_queue_delayed_work(ext_phy->hw,
&phy2->mt76->mac_work,
MT7615_WATCHDOG_TIME);
&phy2->mt76->mac_work, timeout);
}

Просмотреть файл

@ -463,7 +463,9 @@ enum mt7615_reg_base {
#define MT_LPON_TCR0(_n) MT_LPON(0x010 + ((_n) * 4))
#define MT_LPON_TCR2(_n) MT_LPON(0x0f8 + ((_n) - 2) * 4)
#define MT_LPON_TCR_MODE GENMASK(1, 0)
#define MT_LPON_TCR_READ GENMASK(1, 0)
#define MT_LPON_TCR_WRITE BIT(0)
#define MT_LPON_TCR_ADJUST BIT(1)
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)

Просмотреть файл

@ -1,4 +1,4 @@
// SPDX-License-Identifier: ISC
/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Sean Wang <sean.wang@mediatek.com>

Просмотреть файл

@ -55,6 +55,7 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
u32 status;
int ret;
@ -66,37 +67,45 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot get ownership from device");
set_bit(MT76_STATE_PM, &mphy->state);
sdio_release_host(func);
} else {
clear_bit(MT76_STATE_PM, &mphy->state);
return ret;
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
}
sdio_release_host(func);
dev->pm.last_activity = jiffies;
return 0;
return ret;
}
static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
int ret = 0;
if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
return __mt7663s_mcu_drv_pmctrl(dev);
mutex_lock(&dev->pm.mutex);
return 0;
if (test_bit(MT76_STATE_PM, &mphy->state))
ret = __mt7663s_mcu_drv_pmctrl(dev);
mutex_unlock(&dev->pm.mutex);
return ret;
}
static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int ret = 0;
u32 status;
int ret;
if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
return 0;
mutex_lock(&pm->mutex);
if (mt76_connac_skip_fw_pmctrl(mphy, pm))
goto out;
sdio_claim_host(func);
@ -107,9 +116,15 @@ static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot set ownership to device");
clear_bit(MT76_STATE_PM, &mphy->state);
} else {
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
}
sdio_release_host(func);
out:
mutex_unlock(&pm->mutex);
return ret;
}

Просмотреть файл

@ -283,9 +283,15 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
{
struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
txrx_worker);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
int i, nframes, ret;
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
queue_work(mdev->wq, &dev->pm.wake_work);
return;
}
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
@ -295,16 +301,16 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
/* tx */
for (i = 0; i <= MT_TXQ_PSD; i++) {
ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
ret = mt7663s_tx_run_queue(mdev, mdev->phy.q_tx[i]);
if (ret > 0)
nframes += ret;
}
ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
ret = mt7663s_tx_run_queue(mdev, mdev->q_mcu[MT_MCUQ_WM]);
if (ret > 0)
nframes += ret;
/* rx */
ret = mt7663s_rx_handler(dev);
ret = mt7663s_rx_handler(mdev);
if (ret > 0)
nframes += ret;
} while (nframes > 0);
@ -312,6 +318,8 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
/* enable interrupt */
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
void mt7663s_sdio_irq(struct sdio_func *func)

Просмотреть файл

@ -123,7 +123,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
val = mt76_rr(dev, MT_LPON_UTTR0);
sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
@ -191,14 +191,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt7615_sta *msta;
int pad;
msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
!msta->rate_probe) {
msta && !msta->rate_probe) {
/* request to configure sampling rate */
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
@ -323,8 +324,8 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
hw->max_tx_fragments = 1;
}
err = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
err = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (err < 0)
return err;

Просмотреть файл

@ -7,12 +7,13 @@
#include "mt76.h"
#define MT76_CONNAC_SCAN_IE_LEN 600
#define MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL 10
#define MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL 10
#define MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL U16_MAX
#define MT76_CONNAC_MAX_SCHED_SCAN_SSID 10
#define MT76_CONNAC_MAX_SCAN_MATCH 16
#define MT76_CONNAC_COREDUMP_TIMEOUT (HZ / 20)
#define MT76_CONNAC_COREDUMP_SZ (128 * 1024)
#define MT76_CONNAC_COREDUMP_SZ (1300 * 1024)
enum {
CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
@ -45,6 +46,8 @@ enum {
struct mt76_connac_pm {
bool enable;
bool ds_enable;
bool suspended;
spinlock_t txq_lock;
struct {
@ -116,19 +119,27 @@ out:
}
static inline void
mt76_connac_pm_unref(struct mt76_connac_pm *pm)
mt76_connac_pm_unref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
spin_lock_bh(&pm->wake.lock);
pm->wake.count--;
pm->last_activity = jiffies;
if (--pm->wake.count == 0 &&
test_bit(MT76_STATE_MCU_RUNNING, &phy->state))
mt76_connac_power_save_sched(phy, pm);
spin_unlock_bh(&pm->wake.lock);
}
static inline bool
mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
struct mt76_dev *dev = phy->dev;
bool ret;
if (dev->token_count)
return true;
spin_lock_bh(&pm->wake.lock);
ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
spin_unlock_bh(&pm->wake.lock);

Просмотреть файл

@ -10,13 +10,16 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
if (!pm->enable)
return 0;
if (!mt76_is_mmio(dev))
if (mt76_is_usb(dev))
return 0;
cancel_delayed_work_sync(&pm->ps_work);
if (!test_bit(MT76_STATE_PM, &phy->state))
return 0;
if (pm->suspended)
return 0;
queue_work(dev->wq, &pm->wake_work);
if (!wait_event_timeout(pm->wait,
!test_bit(MT76_STATE_PM, &phy->state),
@ -34,12 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
{
struct mt76_dev *dev = phy->dev;
if (!mt76_is_mmio(dev))
if (mt76_is_usb(dev))
return;
if (!pm->enable)
return;
if (pm->suspended)
return;
pm->last_activity = jiffies;
if (!test_bit(MT76_STATE_PM, &phy->state)) {

Просмотреть файл

@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
bool enable)
bool enable, bool newly)
{
struct sta_rec_basic *basic;
struct tlv *tlv;
@ -316,7 +316,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
if (enable) {
basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
if (newly)
basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
basic->conn_state = CONN_STATE_PORT_SECURE;
} else {
basic->conn_state = CONN_STATE_DISCONNECT;
@ -393,6 +394,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
}
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
void *sta_wtbl, void *wtbl_tlv)
{
@ -404,9 +406,46 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (vif->type == NL80211_IFTYPE_STATION)
htr->to_ds = true;
else
htr->from_ds = true;
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
htr->to_ds = true;
htr->from_ds = true;
}
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
sizeof(struct tlv));
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@ -671,7 +710,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi)
u8 rcpi, u8 sta_state)
{
struct cfg80211_chan_def *chandef = &mphy->chandef;
enum nl80211_band band = chandef->chan->band;
@ -736,7 +775,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
state = (struct sta_rec_state *)tlv;
state->state = 2;
state->state = sta_state;
if (sta->vht_cap.vht_supported) {
state->vht_opmode = sta->bandwidth;
@ -828,8 +867,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
@ -841,10 +880,13 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable);
if (info->enable && info->sta)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif,
info->rcpi);
if (info->sta || !info->offload_fw)
mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta,
info->enable, info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
info->state);
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
sizeof(struct tlv));
@ -859,6 +901,8 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
info->sta, sta_wtbl,
wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
sta_wtbl, wtbl_hdr);
@ -866,7 +910,7 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_cmd);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
@ -895,8 +939,10 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
if (is_mt7921(dev))
if (is_mt7921(dev)) {
ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
}
if (enable && tx) {
u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
@ -1271,6 +1317,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
u8 pad[3];
} __packed hdr;
struct bss_info_uni_he he;
struct bss_info_uni_bss_color bss_color;
} he_req = {
.hdr = {
.bss_idx = mvif->idx,
@ -1279,8 +1326,21 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
.tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
.len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
},
.bss_color = {
.tag = cpu_to_le16(UNI_BSS_INFO_BSS_COLOR),
.len = cpu_to_le16(sizeof(struct bss_info_uni_bss_color)),
.enable = 0,
.bss_color = 0,
},
};
if (enable) {
he_req.bss_color.enable =
vif->bss_conf.he_bss_color.enabled;
he_req.bss_color.bss_color =
vif->bss_conf.he_bss_color.color;
}
mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
(struct tlv *)&he_req.he);
err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
@ -1463,14 +1523,16 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req->version = 1;
req->seq_num = mvif->scan_seq_num | ext_phy << 7;
if (is_mt7663(phy->dev) &&
(sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) {
get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr,
sreq->mac_addr_mask);
if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
: req->mt7921.random_mac;
req->scan_func = 1;
} else if (is_mt7921(phy->dev)) {
req->mt7921.bss_idx = mvif->idx;
get_random_mask_addr(addr, sreq->mac_addr,
sreq->mac_addr_mask);
}
if (is_mt7921(phy->dev))
req->mt7921.bss_idx = mvif->idx;
req->ssids_num = sreq->n_ssids;
for (i = 0; i < req->ssids_num; i++) {
@ -1556,6 +1618,26 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
int mt76_connac_sta_state_dp(struct mt76_dev *dev,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
if ((old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) ||
(old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST))
mt76_connac_mcu_set_deep_sleep(dev, true);
if ((old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) ||
(old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC))
mt76_connac_mcu_set_deep_sleep(dev, false);
return 0;
}
EXPORT_SYMBOL_GPL(mt76_connac_sta_state_dp);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump)
{
@ -1570,6 +1652,60 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
{
struct mt76_connac_cap_hdr {
__le16 n_element;
u8 rsv[2];
} __packed * hdr;
struct sk_buff *skb;
int ret, i;
ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
0, true, &skb);
if (ret)
return ret;
hdr = (struct mt76_connac_cap_hdr *)skb->data;
if (skb->len < sizeof(*hdr)) {
ret = -EINVAL;
goto out;
}
skb_pull(skb, sizeof(*hdr));
for (i = 0; i < le16_to_cpu(hdr->n_element); i++) {
struct tlv_hdr {
__le32 type;
__le32 len;
} __packed * tlv = (struct tlv_hdr *)skb->data;
int len;
if (skb->len < sizeof(*tlv))
break;
skb_pull(skb, sizeof(*tlv));
len = le32_to_cpu(tlv->len);
if (skb->len < len)
break;
switch (le32_to_cpu(tlv->type)) {
case MT_NIC_CAP_6G:
phy->cap.has_6ghz = skb->data[0];
break;
default:
break;
}
skb_pull(skb, len);
}
out:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability);
static void
mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
struct mt76_power_limits *limits,
@ -1632,12 +1768,15 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
142, 144, 149, 151, 153, 155, 157,
159, 161, 165
};
int i, n_chan, batch_size, idx = 0, tx_power, last_ch;
struct mt76_connac_sku_tlv sku_tlbv;
int i, n_chan, batch_size, idx = 0;
struct mt76_power_limits limits;
const u8 *ch_list;
sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
tx_power = 2 * phy->hw->conf.power_level;
if (!tx_power)
tx_power = 127;
if (band == NL80211_BAND_2GHZ) {
n_chan = ARRAY_SIZE(chan_list_2ghz);
@ -1648,39 +1787,48 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
}
batch_size = DIV_ROUND_UP(n_chan, batch_len);
if (!phy->cap.has_5ghz)
last_ch = chan_list_2ghz[n_chan - 1];
else
last_ch = chan_list_5ghz[n_chan - 1];
for (i = 0; i < batch_size; i++) {
bool last_msg = i == batch_size - 1;
int num_ch = last_msg ? n_chan % batch_len : batch_len;
struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
.band = band == NL80211_BAND_2GHZ ? 1 : 2,
.n_chan = num_ch,
.last_msg = last_msg,
};
int j, err, msg_len, num_ch;
struct sk_buff *skb;
int j, err, msg_len;
num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb)
return -ENOMEM;
skb_reserve(skb, sizeof(tx_power_tlv));
BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
tx_power_tlv.n_chan = num_ch;
skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
for (j = 0; j < num_ch; j++, idx++) {
struct ieee80211_channel chan = {
.hw_value = ch_list[idx],
.band = band,
};
mt76_get_rate_power_limits(phy, &chan, &limits, 127);
mt76_get_rate_power_limits(phy, &chan, &limits,
tx_power);
tx_power_tlv.last_msg = ch_list[idx] == last_ch;
sku_tlbv.channel = ch_list[idx];
mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
&limits, band);
skb_put_data(skb, &sku_tlbv, sku_len);
}
__skb_push(skb, sizeof(tx_power_tlv));
memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
err = mt76_mcu_skb_send_msg(dev, skb,
MCU_CMD_SET_RATE_TX_POWER, false);
@ -1695,11 +1843,20 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
{
int err;
err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
if (err < 0)
return err;
if (phy->cap.has_2ghz) {
err = mt76_connac_mcu_rate_txpower_band(phy,
NL80211_BAND_2GHZ);
if (err < 0)
return err;
}
if (phy->cap.has_5ghz) {
err = mt76_connac_mcu_rate_txpower_band(phy,
NL80211_BAND_5GHZ);
if (err < 0)
return err;
}
return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
return 0;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
@ -1939,7 +2096,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
ptlv->index = index;
memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
}
@ -1974,14 +2131,17 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
};
if (wowlan->magic_pkt)
req.wow_ctrl_tlv.trigger |= BIT(0);
req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
if (wowlan->disconnect)
req.wow_ctrl_tlv.trigger |= BIT(2);
req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
UNI_WOW_DETECT_TYPE_BCN_LOST);
if (wowlan->nd_config) {
mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
req.wow_ctrl_tlv.trigger |= BIT(5);
req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
}
if (wowlan->n_patterns)
req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
if (mt76_is_mmio(dev))
req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;

Просмотреть файл

@ -559,6 +559,7 @@ enum {
MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
@ -575,6 +576,7 @@ enum {
enum {
UNI_BSS_INFO_BASIC = 0,
UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_BSS_COLOR = 4,
UNI_BSS_INFO_HE_BASIC = 5,
UNI_BSS_INFO_BCN_CONTENT = 7,
UNI_BSS_INFO_QBSS = 15,
@ -590,6 +592,36 @@ enum {
UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
};
enum {
MT_NIC_CAP_TX_RESOURCE,
MT_NIC_CAP_TX_EFUSE_ADDR,
MT_NIC_CAP_COEX,
MT_NIC_CAP_SINGLE_SKU,
MT_NIC_CAP_CSUM_OFFLOAD,
MT_NIC_CAP_HW_VER,
MT_NIC_CAP_SW_VER,
MT_NIC_CAP_MAC_ADDR,
MT_NIC_CAP_PHY,
MT_NIC_CAP_MAC,
MT_NIC_CAP_FRAME_BUF,
MT_NIC_CAP_BEAM_FORM,
MT_NIC_CAP_LOCATION,
MT_NIC_CAP_MUMIMO,
MT_NIC_CAP_BUFFER_MODE_INFO,
MT_NIC_CAP_HW_ADIE_VERSION = 0x14,
MT_NIC_CAP_ANTSWP = 0x16,
MT_NIC_CAP_WFDMA_REALLOC,
MT_NIC_CAP_6G,
};
#define UNI_WOW_DETECT_TYPE_MAGIC BIT(0)
#define UNI_WOW_DETECT_TYPE_ANY BIT(1)
#define UNI_WOW_DETECT_TYPE_DISCONNECT BIT(2)
#define UNI_WOW_DETECT_TYPE_GTK_REKEY_FAIL BIT(3)
#define UNI_WOW_DETECT_TYPE_BCN_LOST BIT(4)
#define UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT BIT(5)
#define UNI_WOW_DETECT_TYPE_BITMAP BIT(6)
enum {
UNI_SUSPEND_MODE_SETTING,
UNI_SUSPEND_WOW_CTRL,
@ -762,7 +794,7 @@ struct mt76_connac_sched_scan_req {
u8 intervals_num;
u8 scan_func; /* MT7663: BIT(0) eable random mac address */
struct mt76_connac_mcu_scan_channel channels[64];
__le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL];
__le16 intervals[MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL];
union {
struct {
u8 random_mac[ETH_ALEN];
@ -770,7 +802,9 @@ struct mt76_connac_sched_scan_req {
} mt7663;
struct {
u8 bss_idx;
u8 pad2[63];
u8 pad2[19];
u8 random_mac[ETH_ALEN];
u8 pad3[38];
} mt7921;
};
} __packed;
@ -781,6 +815,14 @@ struct mt76_connac_sched_scan_done {
__le16 pad;
} __packed;
struct bss_info_uni_bss_color {
__le16 tag;
__le16 len;
u8 enable;
u8 bss_color;
u8 rsv[2];
} __packed;
struct bss_info_uni_he {
__le16 tag;
__le16 len;
@ -885,15 +927,24 @@ struct mt76_connac_suspend_tlv {
u8 pad[5];
} __packed;
enum mt76_sta_info_state {
MT76_STA_INFO_STATE_NONE,
MT76_STA_INFO_STATE_AUTH,
MT76_STA_INFO_STATE_ASSOC
};
struct mt76_sta_cmd_info {
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
struct ieee80211_vif *vif;
bool offload_fw;
bool enable;
bool newly;
int cmd;
u8 rcpi;
u8 state;
};
#define MT_SKU_POWER_LIMIT 161
@ -963,18 +1014,23 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
struct ieee80211_sta *sta, bool enable,
bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
void *sta_wtbl, void *wtbl_tlv);
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi);
u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
@ -996,8 +1052,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
bool enable);
int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info);
int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band);
@ -1008,6 +1064,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy);
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@ -1028,6 +1085,9 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_sta_state_dp(struct mt76_dev *dev,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,

Просмотреть файл

@ -68,7 +68,7 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
nic_conf1 &= 0xff00;
if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
dev_err(dev->mt76.dev,
dev_dbg(dev->mt76.dev,
"driver does not support HW RF ctrl\n");
if (!mt76x02_field_valid(nic_conf0 >> 8))

Просмотреть файл

@ -34,24 +34,24 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
if (!key)
return MT_CIPHER_NONE;
return MT76X02_CIPHER_NONE;
if (key->keylen > 32)
return MT_CIPHER_NONE;
return MT76X02_CIPHER_NONE;
memcpy(key_data, key->key, key->keylen);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MT_CIPHER_WEP40;
return MT76X02_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MT_CIPHER_WEP104;
return MT76X02_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MT_CIPHER_TKIP;
return MT76X02_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_CCMP:
return MT_CIPHER_AES_CCMP;
return MT76X02_CIPHER_AES_CCMP;
default:
return MT_CIPHER_NONE;
return MT76X02_CIPHER_NONE;
}
}
@ -63,7 +63,7 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u32 val;
cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
if (cipher == MT76X02_CIPHER_NONE && key)
return -EOPNOTSUPP;
val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
@ -91,10 +91,10 @@ void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
pn = (u64)eiv << 16;
if (cipher == MT_CIPHER_TKIP) {
if (cipher == MT76X02_CIPHER_TKIP) {
pn |= (iv >> 16) & 0xff;
pn |= (iv & 0xff) << 8;
} else if (cipher >= MT_CIPHER_AES_CCMP) {
} else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
pn |= iv & 0xffff;
} else {
return;
@ -112,7 +112,7 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
if (cipher == MT76X02_CIPHER_NONE && key)
return -EOPNOTSUPP;
mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
@ -126,16 +126,16 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
pn = atomic64_read(&key->tx_pn);
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP) {
if (cipher >= MT76X02_CIPHER_TKIP) {
iv_data[3] |= 0x20;
put_unaligned_le32(pn >> 16, &iv_data[4]);
}
if (cipher == MT_CIPHER_TKIP) {
if (cipher == MT76X02_CIPHER_TKIP) {
iv_data[0] = (pn >> 8) & 0xff;
iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
iv_data[2] = pn & 0xff;
} else if (cipher >= MT_CIPHER_AES_CCMP) {
} else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
put_unaligned_le16((pn & 0xffff), &iv_data[0]);
}
}
@ -1022,12 +1022,12 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
}
void mt76x02_update_channel(struct mt76_dev *mdev)
void mt76x02_update_channel(struct mt76_phy *mphy)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
state = mdev->phy.chan_state;
state = mphy->chan_state;
state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
@ -1169,7 +1169,7 @@ void mt76x02_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
mt76_update_survey(&dev->mt76);
mt76_update_survey(&dev->mphy);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));

Просмотреть файл

@ -195,7 +195,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_update_channel(struct mt76_phy *mphy);
void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);

Просмотреть файл

@ -692,15 +692,15 @@ struct mt76_wcid_key {
} __packed __aligned(4);
enum mt76x02_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_WEP104,
MT_CIPHER_TKIP,
MT_CIPHER_AES_CCMP,
MT_CIPHER_CKIP40,
MT_CIPHER_CKIP104,
MT_CIPHER_CKIP128,
MT_CIPHER_WAPI,
MT76X02_CIPHER_NONE,
MT76X02_CIPHER_WEP40,
MT76X02_CIPHER_WEP104,
MT76X02_CIPHER_TKIP,
MT76X02_CIPHER_AES_CCMP,
MT76X02_CIPHER_CKIP40,
MT76X02_CIPHER_CKIP104,
MT76X02_CIPHER_CKIP128,
MT76X02_CIPHER_WAPI,
};
#endif

Просмотреть файл

@ -7,24 +7,18 @@
#include <linux/module.h>
#include "mt76x02.h"
#define CCK_RATE(_idx, _rate) { \
#define MT76x02_CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
struct ieee80211_rate mt76x02_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
MT76x02_CCK_RATE(0, 10),
MT76x02_CCK_RATE(1, 20),
MT76x02_CCK_RATE(2, 55),
MT76x02_CCK_RATE(3, 110),
OFDM_RATE(0, 60),
OFDM_RATE(1, 90),
OFDM_RATE(2, 120),

Просмотреть файл

@ -1,4 +1,4 @@
#SPDX-License-Identifier: ISC
# SPDX-License-Identifier: ISC
obj-$(CONFIG_MT7915E) += mt7915e.o

Просмотреть файл

@ -3,6 +3,7 @@
#include "mt7915.h"
#include "eeprom.h"
#include "mcu.h"
/** global debugfs **/
@ -16,7 +17,7 @@ mt7915_implicit_txbf_set(void *data, u64 val)
dev->ibf = !!val;
return mt7915_mcu_set_txbf_type(dev);
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
static int
@ -147,6 +148,9 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
{
struct mt7915_dev *dev = s->private;
bool ext_phy = phy != &dev->phy;
static const char * const bw[] = {
"BW20", "BW40", "BW80", "BW160"
};
int cnt;
if (!phy)
@ -164,11 +168,16 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n",
seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld, ",
FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
seq_printf(s, "%s, NC: %ld, NR: %ld\n",
bw[FIELD_GET(MT_ETBF_RX_FB_BW, cnt)],
FIELD_GET(MT_ETBF_RX_FB_NC, cnt),
FIELD_GET(MT_ETBF_RX_FB_NR, cnt));
/* Tx Beamformee Rx NDPA & Tx feedback report */
cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
@ -204,7 +213,7 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
/* Tx amsdu info */
seq_puts(file, "Tx MSDU stat:\n");
seq_puts(file, "Tx MSDU statistics:\n");
for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
n += stat[i];
@ -224,18 +233,6 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static int mt7915_read_temperature(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
int temp;
/* cpu */
temp = mt7915_mcu_get_temperature(dev, 0);
seq_printf(s, "Temperature: %d\n", temp);
return 0;
}
static int
mt7915_queues_acq(struct seq_file *s, void *data)
{
@ -307,54 +304,23 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
"RU26", "RU52", "RU106", "RU242/SU20",
"RU484/SU40", "RU996/SU80", "RU2x996/SU160"
};
struct mt7915_dev *dev = dev_get_drvdata(s->private);
bool ext_phy = phy != &dev->phy;
u32 reg_base;
int i, idx = 0;
s8 txpower[MT7915_SKU_RATE_NUM], *buf;
int i;
if (!phy)
return;
reg_base = MT_TMAC_FP0R0(ext_phy);
seq_printf(s, "\nBand %d\n", ext_phy);
seq_printf(s, "\nBand %d\n", phy != &phy->dev->phy);
for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
u8 cnt, mcs_num = mt7915_sku_group_len[i];
s8 txpower[12];
int j;
mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
u8 mcs_num = mt7915_sku_group_len[i];
if (i == SKU_HT_BW20 || i == SKU_HT_BW40) {
mcs_num = 8;
} else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) {
if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
mcs_num = 10;
} else if (i == SKU_HE_RU26) {
reg_base = MT_TMAC_FP0R18(ext_phy);
idx = 0;
}
for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) {
u32 val;
if (i == SKU_VHT_BW160 && idx == 60) {
reg_base = MT_TMAC_FP0R15(ext_phy);
idx = 0;
}
val = mt76_rr(dev, reg_base + (idx / 4) * 4);
if (idx && idx % 4)
val >>= (idx % 4) * 8;
while (val > 0 && cnt < mcs_num) {
s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val);
txpower[cnt++] = pwr;
val >>= 8;
idx++;
}
}
mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num);
mt76_seq_puts_array(s, sku_group_name[i], buf, mcs_num);
buf += mt7915_sku_group_len[i];
}
}
@ -390,8 +356,6 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7915_read_temperature);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
mt7915_read_rate_txpower);

Просмотреть файл

@ -19,39 +19,6 @@ int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
return 0;
}
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
enum rx_pkt_type type;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, skb);
break;
case PKT_TYPE_RX_EVENT:
mt7915_mcu_rx_event(dev, skb);
break;
#ifdef CONFIG_NL80211_TESTMODE
case PKT_TYPE_TXRXV:
mt7915_mac_fill_rx_vector(dev, skb);
break;
#endif
case PKT_TYPE_NORMAL:
if (!mt7915_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
return;
}
fallthrough;
default:
dev_kfree_skb(skb);
break;
}
}
static void
mt7915_tx_cleanup(struct mt7915_dev *dev)
{
@ -112,8 +79,6 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
int mt7915_dma_init(struct mt7915_dev *dev)
{
/* Increase buffer size to receive large VHT/HE MPDUs */
int rx_buf_size = MT_RX_BUF_SIZE * 2;
u32 hif1_ofs = 0;
int ret;
@ -177,28 +142,28 @@ int mt7915_dma_init(struct mt7915_dev *dev)
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
rx_buf_size, MT_RX_EVENT_RING_BASE);
MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
rx_buf_size, MT_RX_EVENT_RING_BASE);
MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
if (ret)
return ret;
/* rx data queue */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
rx_buf_size, MT_RX_DATA_RING_BASE);
MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
if (ret)
return ret;
if (dev->dbdc_support) {
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
rx_buf_size,
MT_RX_BUF_SIZE,
MT_RX_DATA_RING_BASE + hif1_ofs);
if (ret)
return ret;
@ -207,7 +172,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
MT7915_RXQ_MCU_WA_EXT,
MT7915_RX_MCU_RING_SIZE,
rx_buf_size,
MT_RX_BUF_SIZE,
MT_RX_EVENT_RING_BASE + hif1_ofs);
if (ret)
return ret;

Просмотреть файл

@ -4,22 +4,12 @@
#include "mt7915.h"
#include "eeprom.h"
static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
{
u8 *data = dev->mt76.eeprom.data;
if (data[offset] == 0xff && !dev->flash_mode)
mt7915_mcu_get_eeprom(dev, offset);
return data[offset];
}
static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 val;
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL);
if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
return 0;
@ -43,7 +33,13 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
dev->flash_mode = true;
ret = mt7915_eeprom_load_precal(dev);
} else {
memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
u32 block_num, i;
block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
mt7915_mcu_get_eeprom(dev,
i * MT7915_EEPROM_BLOCK_SIZE);
}
return ret;
@ -52,10 +48,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
static int mt7915_check_eeprom(struct mt7915_dev *dev)
{
u8 *eeprom = dev->mt76.eeprom.data;
u16 val;
mt7915_eeprom_read(dev, MT_EE_CHIP_ID);
val = get_unaligned_le16(eeprom);
u16 val = get_unaligned_le16(eeprom);
switch (val) {
case 0x7915:
@ -69,9 +62,10 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy);
val = eeprom[MT_EE_WIFI_CONF + ext_phy];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
@ -143,6 +137,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
{
u8 *eeprom = dev->mt76.eeprom.data;
int index, target_power;
bool tssi_on;
@ -153,18 +148,18 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
if (chan->band == NL80211_BAND_2GHZ) {
index = MT_EE_TX0_POWER_2G + chain_idx * 3;
target_power = mt7915_eeprom_read(dev, index);
target_power = eeprom[index];
if (!tssi_on)
target_power += mt7915_eeprom_read(dev, index + 1);
target_power += eeprom[index + 1];
} else {
int group = mt7915_get_channel_group(chan->hw_value);
index = MT_EE_TX0_POWER_5G + chain_idx * 12;
target_power = mt7915_eeprom_read(dev, index + group);
target_power = eeprom[index + group];
if (!tssi_on)
target_power += mt7915_eeprom_read(dev, index + 8);
target_power += eeprom[index + 8];
}
return target_power;
@ -172,13 +167,14 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
s8 delta;
if (band == NL80211_BAND_2GHZ)
val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G);
val = eeprom[MT_EE_RATE_DELTA_2G];
else
val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G);
val = eeprom[MT_EE_RATE_DELTA_5G];
if (!(val & MT_EE_RATE_DELTA_EN))
return 0;

Просмотреть файл

@ -33,7 +33,7 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CAL_GROUP BIT(0)
#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
#define MT_EE_CAL_UNIT 1024
#define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT)
#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
@ -99,12 +99,15 @@ static inline bool
mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
{
u8 *eep = dev->mt76.eeprom.data;
u8 val = eep[MT_EE_WIFI_CONF + 7];
/* TODO: DBDC */
if (band == NL80211_BAND_5GHZ)
return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_5G;
if (band == NL80211_BAND_2GHZ)
return val & MT_EE_WIFI_CONF7_TSSI0_2G;
if (dev->dbdc_support)
return val & MT_EE_WIFI_CONF7_TSSI1_5G;
else
return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
return val & MT_EE_WIFI_CONF7_TSSI0_5G;
}
extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];

Просмотреть файл

@ -2,39 +2,14 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/thermal.h>
#include "mt7915.h"
#include "mac.h"
#include "mcu.h"
#include "eeprom.h"
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
static struct ieee80211_rate mt7915_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
OFDM_RATE(11, 60),
OFDM_RATE(15, 90),
OFDM_RATE(10, 120),
OFDM_RATE(14, 180),
OFDM_RATE(9, 240),
OFDM_RATE(13, 360),
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@ -67,6 +42,117 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
static ssize_t mt7915_thermal_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mt7915_phy *phy = dev_get_drvdata(dev);
int temperature;
temperature = mt7915_mcu_get_temperature(phy);
if (temperature < 0)
return temperature;
/* display in millidegree celcius */
return sprintf(buf, "%u\n", temperature * 1000);
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7915_thermal_show_temp,
NULL, 0);
static struct attribute *mt7915_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7915_hwmon);
static int
mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = MT7915_THERMAL_THROTTLE_MAX;
return 0;
}
static int
mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct mt7915_phy *phy = cdev->devdata;
*state = phy->throttle_state;
return 0;
}
static int
mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct mt7915_phy *phy = cdev->devdata;
int ret;
if (state > MT7915_THERMAL_THROTTLE_MAX)
return -EINVAL;
if (state == phy->throttle_state)
return 0;
ret = mt7915_mcu_set_thermal_throttling(phy, state);
if (ret)
return ret;
phy->throttle_state = state;
return 0;
}
static const struct thermal_cooling_device_ops mt7915_thermal_ops = {
.get_max_state = mt7915_thermal_get_max_throttle_state,
.get_cur_state = mt7915_thermal_get_cur_throttle_state,
.set_cur_state = mt7915_thermal_set_cur_throttle_state,
};
static void mt7915_unregister_thermal(struct mt7915_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
if (!phy->cdev)
return;
sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
thermal_cooling_device_unregister(phy->cdev);
}
static int mt7915_thermal_init(struct mt7915_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
struct thermal_cooling_device *cdev;
struct device *hwmon;
cdev = thermal_cooling_device_register(wiphy_name(wiphy), phy,
&mt7915_thermal_ops);
if (!IS_ERR(cdev)) {
if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
"cooling_device") < 0)
thermal_cooling_device_unregister(cdev);
else
phy->cdev = cdev;
}
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
wiphy_name(wiphy), phy,
mt7915_hwmon_groups);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
return 0;
}
static void
mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
@ -201,7 +287,6 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
@ -228,20 +313,19 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
{
int ret;
if (dev->dbdc_support) {
ret = mt7915_mcu_set_txbf_module(dev);
ret = mt7915_mcu_set_txbf(dev, MT_BF_MODULE_UPDATE);
if (ret)
return ret;
}
/* trigger sounding packets */
ret = mt7915_mcu_set_txbf_sounding(dev);
ret = mt7915_mcu_set_txbf(dev, MT_BF_SOUNDING_ON);
if (ret)
return ret;
/* enable eBF */
return mt7915_mcu_set_txbf_type(dev);
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
static int mt7915_register_ext_phy(struct mt7915_dev *dev)
@ -281,8 +365,12 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
if (ret)
goto error;
ret = mt76_register_phy(mphy, true, mt7915_rates,
ARRAY_SIZE(mt7915_rates));
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
goto error;
ret = mt7915_thermal_init(phy);
if (ret)
goto error;
@ -480,6 +568,9 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
if (nss < 2)
return;
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
if (vif != NL80211_IFTYPE_AP)
return;
@ -493,9 +584,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
}
static void
@ -579,8 +667,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
switch (i) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[0] |=
IEEE80211_HE_MAC_CAP0_TWT_RES;
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
he_cap_elem->mac_cap_info[4] |=
@ -594,8 +680,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[0] |=
IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
@ -690,6 +774,7 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
if (!phy)
return;
mt7915_unregister_thermal(phy);
mt76_unregister_phy(mphy);
ieee80211_free_hw(mphy->hw);
}
@ -731,8 +816,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
ret = mt76_register_device(&dev->mt76, true, mt7915_rates,
ARRAY_SIZE(mt7915_rates));
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
ret = mt7915_thermal_init(&dev->phy);
if (ret)
return ret;
@ -748,10 +837,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
void mt7915_unregister_device(struct mt7915_dev *dev)
{
mt7915_unregister_ext_phy(dev);
mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
mt7915_mcu_exit(dev);
mt7915_tx_token_put(dev);
mt7915_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше