* fixes and cleanups in scan
 * support of several scan plans
 * improvements in FTM
 * fixes in FW API
 * improvements in the failure paths when the bus is dead
 * other various small things here and there
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJWLMIEAAoJEC0Llv5uNjIBWjYQAJrk79EyhKLJoTVNAkLVggun
 zk/yEZM/pCkxiunHwOhcxKDcQhRVIlMARQCzG8V7jUa3nWOrUFn7Moh5FB9NS80L
 aS92HvUAGcPqXFM2sqanPr3xrGz+tsJ1OWW0u2GljmOwUP2V69UhhZuw+TWE94Hv
 l27x3odL/4USaMv7ZZB55POosVlg3G79Xn47eyCTJVewlSizmXCKG6NDdtRLS6JZ
 GEB8bFCr/kS/j0Xa+NtB8Ylc1vKKNbIDGjwnzX98vUUDsFHU7J4frcnPtzBkGlJ0
 deNRV6HscvYU9aECibBqVSZ/tenSIKPOBPOMGhT3VOK+401VVOTR3FOjzLsdvZVX
 6oJ5R+U/Ue7OWAtqd5wf7kDbBUTCMdAczAJFHQjXigD1OBCEdWIcdyIhDeJBxcbi
 aKVPt+tHQQBxUMG938v68Lj40fO5vVreRAwKScpr7mOLW22Vlio9GwvPAnaVu55a
 IMIWOEde3l5uZ/k+9L0qi5XQMUz5w5deHCVBFW1A2WE3uWqhK0OKPR7Hx4hd6nT/
 EesevSPFJllGpB862Gs8GgE+RwxKhN9QIjlBubi2Lz9/cq5WneN8dTfjdkpxfmyi
 0C90lXVjd5HrBPtaU8BqZWXcYYfAxVqC3yeW4aaeKjeVXOQrbTqlhbVVDpTA0B/s
 +8oyFzwoFg1v0M2pUUSF
 =a6Vu
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2015-10-25' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* bug fix for TDLS
* fixes and cleanups in scan
* support of several scan plans
* improvements in FTM
* fixes in FW API
* improvements in the failure paths when the bus is dead
* other various small things here and there
This commit is contained in:
Kalle Valo 2015-10-28 20:48:26 +02:00
Родитель 6d08f61787 2edb7a3372
Коммит b3bcb1b272
20 изменённых файлов: 297 добавлений и 102 удалений

Просмотреть файл

@ -86,6 +86,8 @@
* Structured as &struct iwl_fw_error_dump_trigger_desc. * Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb * &struct iwl_fw_error_dump_rb
* @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
*/ */
enum iwl_fw_error_dump_type { enum iwl_fw_error_dump_type {
/* 0 is deprecated */ /* 0 is deprecated */
@ -100,6 +102,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_MEM = 9, IWL_FW_ERROR_DUMP_MEM = 9,
IWL_FW_ERROR_DUMP_ERROR_INFO = 10, IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_PAGING = 12,
IWL_FW_ERROR_DUMP_MAX, IWL_FW_ERROR_DUMP_MAX,
}; };
@ -239,6 +242,19 @@ struct iwl_fw_error_dump_rb {
u8 data[]; u8 data[];
}; };
/**
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
* @reserved:
* @data: the content of the page block
*/
struct iwl_fw_error_dump_paging {
__le32 index;
__le32 reserved;
u8 data[];
};
/** /**
* iwl_fw_error_next_data - advance fw error dump data pointer * iwl_fw_error_next_data - advance fw error dump data pointer
* @data: previous data block * @data: previous data block

Просмотреть файл

@ -306,6 +306,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* is supported. * is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* *
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/ */
@ -330,6 +332,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
NUM_IWL_UCODE_TLV_CAPA NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__ #ifdef __CHECKER__

Просмотреть файл

@ -409,6 +409,7 @@ enum iwl_d3_status {
* @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
* are sent * are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
*/ */
enum iwl_trans_status { enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE, STATUS_SYNC_HCMD_ACTIVE,
@ -419,6 +420,7 @@ enum iwl_trans_status {
STATUS_FW_ERROR, STATUS_FW_ERROR,
STATUS_TRANS_GOING_IDLE, STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE, STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
}; };
/** /**

Просмотреть файл

@ -71,6 +71,9 @@
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ #define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
@ -101,7 +104,6 @@
#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
#define IWL_MVM_QUOTA_THRESHOLD 4 #define IWL_MVM_QUOTA_THRESHOLD 4
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_DISABLE_P2P_MIMO 0
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1

Просмотреть файл

@ -715,11 +715,30 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
goto out; goto out;
} }
data = iwl_dbgfs_is_match("ctrl_ch_position=", buf); data = iwl_dbgfs_is_match("center_freq=", buf);
if (data) { if (data) {
struct iwl_tof_responder_config_cmd *cmd =
&mvm->tof_data.responder_cfg;
ret = kstrtou32(data, 10, &value); ret = kstrtou32(data, 10, &value);
if (ret == 0) if (ret == 0 && value) {
mvm->tof_data.responder_cfg.ctrl_ch_position = value; enum ieee80211_band band = (cmd->channel_num <= 14) ?
IEEE80211_BAND_2GHZ :
IEEE80211_BAND_5GHZ;
struct ieee80211_channel chn = {
.band = band,
.center_freq = ieee80211_channel_to_frequency(
cmd->channel_num, band),
};
struct cfg80211_chan_def chandef = {
.chan = &chn,
.center_freq1 =
ieee80211_channel_to_frequency(value,
band),
};
cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
}
goto out; goto out;
} }

Просмотреть файл

@ -85,7 +85,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk); IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count; ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret; return ret;

Просмотреть файл

@ -101,6 +101,7 @@ struct iwl_ssid_ie {
#define IWL_FULL_SCAN_MULTIPLIER 5 #define IWL_FULL_SCAN_MULTIPLIER 5
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 #define IWL_FAST_SCHED_SCAN_ITERATIONS 3
#define IWL_MAX_SCHED_SCAN_PLANS 2
enum scan_framework_client { enum scan_framework_client {
SCAN_CLIENT_SCHED_SCAN = BIT(0), SCAN_CLIENT_SCHED_SCAN = BIT(0),
@ -359,7 +360,7 @@ struct iwl_scan_req_lmac {
/* SCAN_REQ_PERIODIC_PARAMS_API_S */ /* SCAN_REQ_PERIODIC_PARAMS_API_S */
__le32 iter_num; __le32 iter_num;
__le32 delay; __le32 delay;
struct iwl_scan_schedule_lmac schedule[2]; struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
struct iwl_scan_channel_opt channel_opt[2]; struct iwl_scan_channel_opt channel_opt[2];
u8 data[]; u8 data[];
} __packed; } __packed;
@ -582,7 +583,7 @@ struct iwl_scan_umac_schedule {
*/ */
struct iwl_scan_req_umac_tail { struct iwl_scan_req_umac_tail {
/* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
struct iwl_scan_umac_schedule schedule[2]; struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
__le16 delay; __le16 delay;
__le16 reserved; __le16 reserved;
/* SCAN_PROBE_PARAMS_API_S_VER_1 */ /* SCAN_PROBE_PARAMS_API_S_VER_1 */

Просмотреть файл

@ -1523,6 +1523,69 @@ struct iwl_dts_measurement_cmd {
__le32 flags; __le32 flags;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
/**
* enum iwl_dts_control_measurement_mode - DTS measurement type
* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
* back (latest value. Not waiting for new value). Use automatic
* SW DTS configuration.
* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
* trigger DTS reading and provide read back temperature read
* when available.
* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
* without measurement trigger.
*/
enum iwl_dts_control_measurement_mode {
DTS_AUTOMATIC = 0,
DTS_REQUEST_READ = 1,
DTS_OVER_WRITE = 2,
DTS_DIRECT_WITHOUT_MEASURE = 3,
};
/**
* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
* @DTS_USE_TOP: Top
* @DTS_USE_CHAIN_A: chain A
* @DTS_USE_CHAIN_B: chain B
* @DTS_USE_CHAIN_C: chain C
* @XTAL_TEMPERATURE - read temperature from xtal
*/
enum iwl_dts_used {
DTS_USE_TOP = 0,
DTS_USE_CHAIN_A = 1,
DTS_USE_CHAIN_B = 2,
DTS_USE_CHAIN_C = 3,
XTAL_TEMPERATURE = 4,
};
/**
* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
* @DTS_BIT6_MODE: bit 6 mode
* @DTS_BIT8_MODE: bit 8 mode
*/
enum iwl_dts_bit_mode {
DTS_BIT6_MODE = 0,
DTS_BIT8_MODE = 1,
};
/**
* iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements
* @control_mode: see &enum iwl_dts_control_measurement_mode
* @temperature: used when over write DTS mode is selected
* @sensor: set temperature sensor to use. See &enum iwl_dts_used
* @avg_factor: average factor to DTS in request DTS read mode
* @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
* @step_duration: step duration for the DTS
*/
struct iwl_ext_dts_measurement_cmd {
__le32 control_mode;
__le32 temperature;
__le32 sensor;
__le32 avg_factor;
__le32 bit_mode;
__le32 step_duration;
} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
/** /**
* iwl_dts_measurement_notif - notification received with the measurements * iwl_dts_measurement_notif - notification received with the measurements
* *

Просмотреть файл

@ -843,6 +843,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
} }

Просмотреть файл

@ -572,6 +572,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* we create the 802.11 header and zero length SSID IE. */ /* we create the 802.11 header and zero length SSID IE. */
hw->wiphy->max_sched_scan_ie_len = hw->wiphy->max_sched_scan_ie_len =
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
/*
* the firmware uses u8 for num of iterations, but 0xff is saved for
* infinite loop, so the maximum number of iterations is actually 254.
*/
hw->wiphy->max_sched_scan_plan_iterations = 254;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_LOW_PRIORITY_SCAN |
@ -1129,6 +1137,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* there's no point in fw dump if the bus is dead */
if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
return;
}
if (mvm->fw_dump_trig && if (mvm->fw_dump_trig &&
mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true; monitor_dump_only = true;
@ -1192,6 +1206,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (sram2_len) if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
/* Make room for fw's virtual image pages, if it exists */
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
file_len += mvm->num_of_paging_blk *
(sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_paging) +
PAGING_BLOCK_SIZE);
/* If we only want a monitor dump, reset the file length */ /* If we only want a monitor dump, reset the file length */
if (monitor_dump_only) { if (monitor_dump_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) + file_len = sizeof(*dump_file) + sizeof(*dump_data) +
@ -1302,6 +1323,26 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->data, IWL8260_ICCM_LEN); dump_mem->data, IWL8260_ICCM_LEN);
} }
/* Dump fw's virtual image */
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
u32 i;
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
mvm->fw_paging_db[i].fw_paging_block;
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
paging = (void *)dump_data->data;
paging->index = cpu_to_le32(i);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
}
}
dump_trans_data: dump_trans_data:
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
mvm->fw_dump_trig); mvm->fw_dump_trig);
@ -1754,7 +1795,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* Flush them here. * Flush them here.
*/ */
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm, tfd_msk, true); iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
/* /*
@ -1955,6 +1996,27 @@ out:
*total_flags = 0; *total_flags = 0;
} }
static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int filter_flags,
unsigned int changed_flags)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* We support only filter for probe requests */
if (!(changed_flags & FIF_PROBE_REQ))
return;
/* Supported only for p2p client interfaces */
if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
!vif->p2p)
return;
mutex_lock(&mvm->mutex);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
mutex_unlock(&mvm->mutex);
}
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
struct iwl_bcast_iter_data { struct iwl_bcast_iter_data {
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
@ -3898,7 +3960,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
} }
if (drop) { if (drop) {
if (iwl_mvm_flush_tx_path(mvm, msk, true)) if (iwl_mvm_flush_tx_path(mvm, msk, 0))
IWL_ERR(mvm, "flush request fail\n"); IWL_ERR(mvm, "flush request fail\n");
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} else { } else {
@ -4135,6 +4197,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.config = iwl_mvm_mac_config, .config = iwl_mvm_mac_config,
.prepare_multicast = iwl_mvm_prepare_multicast, .prepare_multicast = iwl_mvm_prepare_multicast,
.configure_filter = iwl_mvm_configure_filter, .configure_filter = iwl_mvm_configure_filter,
.config_iface_filter = iwl_mvm_config_iface_filter,
.bss_info_changed = iwl_mvm_bss_info_changed, .bss_info_changed = iwl_mvm_bss_info_changed,
.hw_scan = iwl_mvm_mac_hw_scan, .hw_scan = iwl_mvm_mac_hw_scan,
.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,

Просмотреть файл

@ -1036,7 +1036,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else #else
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif #endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync); int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,

Просмотреть файл

@ -483,6 +483,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
ret = -ENOMEM; ret = -ENOMEM;
break; break;
} }
kfree(mvm->nvm_sections[section_id].data);
mvm->nvm_sections[section_id].data = temp; mvm->nvm_sections[section_id].data = temp;
mvm->nvm_sections[section_id].length = section_size; mvm->nvm_sections[section_id].length = section_size;

Просмотреть файл

@ -348,7 +348,8 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd) struct iwl_mac_power_cmd *cmd,
bool host_awake)
{ {
int dtimper, bi; int dtimper, bi;
int keep_alive; int keep_alive;
@ -376,8 +377,13 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
if (!vif->bss_conf.ps || !mvmvif->pm_enabled || if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
(iwl_mvm_vif_low_latency(mvmvif) && vif->p2p)) return;
if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
(!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
!IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
return; return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@ -389,19 +395,25 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
} }
iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
mvm->cur_ucode != IWL_UCODE_WOWLAN);
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { if (!host_awake) {
cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
} else {
cmd->rx_data_timeout = cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout = cmd->tx_data_timeout =
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
} else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
cmd->tx_data_timeout =
cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
} else {
cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
} }
if (iwl_mvm_power_allow_uapsd(mvm, vif)) if (iwl_mvm_power_allow_uapsd(mvm, vif))
@ -458,7 +470,8 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{ {
struct iwl_mac_power_cmd cmd = {}; struct iwl_mac_power_cmd cmd = {};
iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_build_cmd(mvm, vif, &cmd,
mvm->cur_ucode != IWL_UCODE_WOWLAN);
iwl_mvm_power_log(mvm, &cmd); iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
@ -994,11 +1007,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
if (!vif->bss_conf.assoc) if (!vif->bss_conf.assoc)
return 0; return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
/* when enabling D0i3, override the skip-over-dtim configuration */
if (enable)
iwl_mvm_power_config_skip_dtim(mvm, vif, &cmd, false);
iwl_mvm_power_log(mvm, &cmd); iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS

Просмотреть файл

@ -177,9 +177,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
return false;
if (mvm->nvm_data->sku_cap_mimo_disabled) if (mvm->nvm_data->sku_cap_mimo_disabled)
return false; return false;
@ -3071,9 +3068,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
else else
rs_vht_init(mvm, sta, lq_sta, vht_cap); rs_vht_init(mvm, sta, lq_sta, vht_cap);
if (IWL_MVM_RS_DISABLE_P2P_MIMO && sta_priv->vif->p2p)
lq_sta->active_mimo2_rate = 0;
lq_sta->max_legacy_rate_idx = lq_sta->max_legacy_rate_idx =
rs_get_max_rate_from_mask(lq_sta->active_legacy_rate); rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
lq_sta->max_siso_rate_idx = lq_sta->max_siso_rate_idx =

Просмотреть файл

@ -131,7 +131,6 @@ struct iwl_mvm_scan_params {
int n_ssids; int n_ssids;
struct cfg80211_ssid *ssids; struct cfg80211_ssid *ssids;
struct ieee80211_channel **channels; struct ieee80211_channel **channels;
u16 interval; /* interval between scans (in secs) */
u32 flags; u32 flags;
u8 *mac_addr; u8 *mac_addr;
u8 *mac_addr_mask; u8 *mac_addr_mask;
@ -140,7 +139,8 @@ struct iwl_mvm_scan_params {
int n_match_sets; int n_match_sets;
struct iwl_scan_probe_req preq; struct iwl_scan_probe_req preq;
struct cfg80211_match_set *match_sets; struct cfg80211_match_set *match_sets;
u8 iterations[2]; int n_scan_plans;
struct cfg80211_sched_scan_plan *scan_plans;
}; };
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@ -474,7 +474,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
int ret; int ret;
if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES)) if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
return -EIO; return -EIO;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
@ -737,8 +737,7 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
} }
static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif)
int n_iterations)
{ {
const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
@ -753,11 +752,6 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_P2P_DEVICE); vif->type != NL80211_IFTYPE_P2P_DEVICE);
} }
static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
{
return params->iterations[0] + params->iterations[1];
}
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params) struct iwl_mvm_scan_params *params)
{ {
@ -796,12 +790,15 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels); mvm->fw->ucode_capa.n_scan_channels);
u32 ssid_bitmap = 0; u32 ssid_bitmap = 0;
int n_iterations = iwl_mvm_scan_total_iterations(params); int i;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
memset(cmd, 0, ksize(cmd)); memset(cmd, 0, ksize(cmd));
if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
return -EINVAL;
iwl_mvm_scan_lmac_dwell(mvm, cmd, params); iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
@ -821,14 +818,26 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* this API uses bits 1-20 instead of 0-19 */ /* this API uses bits 1-20 instead of 0-19 */
ssid_bitmap <<= 1; ssid_bitmap <<= 1;
cmd->schedule[0].delay = cpu_to_le16(params->interval); for (i = 0; i < params->n_scan_plans; i++) {
cmd->schedule[0].iterations = params->iterations[0]; struct cfg80211_sched_scan_plan *scan_plan =
cmd->schedule[0].full_scan_mul = 1; &params->scan_plans[i];
cmd->schedule[1].delay = cpu_to_le16(params->interval);
cmd->schedule[1].iterations = params->iterations[1];
cmd->schedule[1].full_scan_mul = 1;
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) { cmd->schedule[i].delay =
cpu_to_le16(scan_plan->interval);
cmd->schedule[i].iterations = scan_plan->iterations;
cmd->schedule[i].full_scan_mul = 1;
}
/*
* If the number of iterations of the last scan plan is set to
* zero, it should run infinitely. However, this is not always the case.
* For example, when regular scan is requested the driver sets one scan
* plan with one iteration.
*/
if (!cmd->schedule[i - 1].iterations)
cmd->schedule[i - 1].iterations = 0xff;
if (iwl_mvm_scan_use_ebs(mvm, vif)) {
cmd->channel_opt[0].flags = cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@ -892,7 +901,6 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
int iwl_mvm_config_scan(struct iwl_mvm *mvm) int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{ {
struct iwl_scan_config *scan_config; struct iwl_scan_config *scan_config;
struct ieee80211_supported_band *band; struct ieee80211_supported_band *band;
int num_channels = int num_channels =
@ -968,6 +976,12 @@ static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
return -ENOENT; return -ENOENT;
} }
static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
{
return params->n_scan_plans == 1 &&
params->scan_plans[0].iterations == 1;
}
static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd, struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params) struct iwl_mvm_scan_params *params)
@ -980,7 +994,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->scan_priority = cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_scan_total_iterations(params) == 1) if (iwl_mvm_is_regular_scan(params))
cmd->ooc_priority = cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else else
@ -1027,7 +1041,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
else else
flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
if (iwl_mvm_scan_total_iterations(params) > 1) if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
@ -1045,12 +1059,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
sizeof(struct iwl_scan_channel_cfg_umac) * sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels; mvm->fw->ucode_capa.n_scan_channels;
int uid; int uid, i;
u32 ssid_bitmap = 0; u32 ssid_bitmap = 0;
int n_iterations = iwl_mvm_scan_total_iterations(params);
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
return -EINVAL;
uid = iwl_mvm_scan_uid_by_status(mvm, 0); uid = iwl_mvm_scan_uid_by_status(mvm, 0);
if (uid < 0) if (uid < 0)
return uid; return uid;
@ -1067,7 +1083,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (type == IWL_MVM_SCAN_SCHED) if (type == IWL_MVM_SCAN_SCHED)
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) if (iwl_mvm_scan_use_ebs(mvm, vif))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
@ -1079,12 +1095,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
params->n_channels, ssid_bitmap, cmd); params->n_channels, ssid_bitmap, cmd);
/* With UMAC we use only one schedule for now, so use the sum for (i = 0; i < params->n_scan_plans; i++) {
* of the iterations (with a a maximum of 255). struct cfg80211_sched_scan_plan *scan_plan =
&params->scan_plans[i];
sec_part->schedule[i].iter_count = scan_plan->iterations;
sec_part->schedule[i].interval =
cpu_to_le16(scan_plan->interval);
}
/*
* If the number of iterations of the last scan plan is set to
* zero, it should run infinitely. However, this is not always the case.
* For example, when regular scan is requested the driver sets one scan
* plan with one iteration.
*/ */
sec_part->schedule[0].iter_count = if (!sec_part->schedule[i - 1].iter_count)
(n_iterations > 255) ? 255 : n_iterations; sec_part->schedule[i - 1].iter_count = 0xff;
sec_part->schedule[0].interval = cpu_to_le16(params->interval);
sec_part->delay = cpu_to_le16(params->delay); sec_part->delay = cpu_to_le16(params->delay);
sec_part->preq = params->preq; sec_part->preq = params->preq;
@ -1150,6 +1177,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}; };
struct iwl_mvm_scan_params params = {}; struct iwl_mvm_scan_params params = {};
int ret; int ret;
struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
@ -1162,8 +1190,6 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret) if (ret)
return ret; return ret;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
/* we should have failed registration if scan_cmd was NULL */ /* we should have failed registration if scan_cmd was NULL */
if (WARN_ON(!mvm->scan_cmd)) if (WARN_ON(!mvm->scan_cmd))
return -ENOMEM; return -ENOMEM;
@ -1175,7 +1201,6 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.flags = req->flags; params.flags = req->flags;
params.n_channels = req->n_channels; params.n_channels = req->n_channels;
params.delay = 0; params.delay = 0;
params.interval = 0;
params.ssids = req->ssids; params.ssids = req->ssids;
params.channels = req->channels; params.channels = req->channels;
params.mac_addr = req->mac_addr; params.mac_addr = req->mac_addr;
@ -1185,8 +1210,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.n_match_sets = 0; params.n_match_sets = 0;
params.match_sets = NULL; params.match_sets = NULL;
params.iterations[0] = 1; params.scan_plans = &scan_plan;
params.iterations[1] = 0; params.n_scan_plans = 1;
params.type = iwl_mvm_get_scan_type(mvm, vif, &params); params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
@ -1205,21 +1230,20 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret; return ret;
ret = iwl_mvm_send_cmd(mvm, &hcmd); ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) { if (ret) {
IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
} else {
/* If the scan failed, it usually means that the FW was unable /* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we * to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params. * should try to send the command again with different params.
*/ */
IWL_ERR(mvm, "Scan failed! ret %d\n", ret); IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
return ret;
} }
if (ret) IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
return ret; return 0;
} }
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
@ -1265,20 +1289,14 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.pass_all = iwl_mvm_scan_pass_all(mvm, req); params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
params.n_match_sets = req->n_match_sets; params.n_match_sets = req->n_match_sets;
params.match_sets = req->match_sets; params.match_sets = req->match_sets;
if (!req->n_scan_plans)
return -EINVAL;
params.iterations[0] = 0; params.n_scan_plans = req->n_scan_plans;
params.iterations[1] = 0xff; params.scan_plans = req->scan_plans;
params.type = iwl_mvm_get_scan_type(mvm, vif, &params); params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
if (req->scan_plans[0].interval > U16_MAX) {
IWL_DEBUG_SCAN(mvm,
"interval value is > 16-bits, set to max possible\n");
params.interval = U16_MAX;
} else {
params.interval = req->scan_plans[0].interval;
}
/* In theory, LMAC scans can handle a 32-bit delay, but since /* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly * waiting for over 18 hours to start the scan is a bit silly
* and to keep it aligned with UMAC scans (which only support * and to keep it aligned with UMAC scans (which only support

Просмотреть файл

@ -255,7 +255,7 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
/* disable the TDLS STA-specific queues */ /* disable the TDLS STA-specific queues */
sta_msk = mvmsta->tfd_queue_msk; sta_msk = mvmsta->tfd_queue_msk;
for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i, i, 0, 0); iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
} }
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@ -474,7 +474,8 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
unsigned long i, msk = mvm->tfd_drained[sta_id]; unsigned long i, msk = mvm->tfd_drained[sta_id];
for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i, i, 0, 0); iwl_mvm_disable_txq(mvm, i, i,
IWL_MAX_TID_COUNT, 0);
mvm->tfd_drained[sta_id] = 0; mvm->tfd_drained[sta_id] = 0;
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@ -501,7 +502,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (ret) if (ret)
return ret; return ret;
/* flush its queues here since we are freeing mvm_sta */ /* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true); ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
if (ret) if (ret)
return ret; return ret;
ret = iwl_trans_wait_tx_queue_empty(mvm->trans, ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
@ -1155,7 +1156,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (old_state >= IWL_AGG_ON) { if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true); iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_wait_tx_queue_empty(mvm->trans, iwl_trans_wait_tx_queue_empty(mvm->trans,
mvmsta->tfd_queue_msk); mvmsta->tfd_queue_msk);

Просмотреть файл

@ -129,7 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* issue as it will have to complete before the next command is * issue as it will have to complete before the next command is
* executed, and a new time event means a new command. * executed, and a new time event means a new command.
*/ */
iwl_mvm_flush_tx_path(mvm, queues, false); iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
} }
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)

Просмотреть файл

@ -176,6 +176,9 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
struct iwl_dts_measurement_cmd cmd = { struct iwl_dts_measurement_cmd cmd = {
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
}; };
struct iwl_ext_dts_measurement_cmd extcmd = {
.control_mode = cpu_to_le32(DTS_AUTOMATIC),
};
u32 cmdid; u32 cmdid;
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
@ -183,8 +186,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
PHY_OPS_GROUP, 0); PHY_OPS_GROUP, 0);
else else
cmdid = CMD_DTS_MEASUREMENT_TRIGGER; cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0,
sizeof(cmd), &cmd); if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
} }
int iwl_mvm_get_temp(struct iwl_mvm *mvm) int iwl_mvm_get_temp(struct iwl_mvm *mvm)

Просмотреть файл

@ -1099,7 +1099,7 @@ out:
* 2) flush the Tx path * 2) flush the Tx path
* 3) wait for the transport queues to be empty * 3) wait for the transport queues to be empty
*/ */
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync) int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
{ {
int ret; int ret;
struct iwl_tx_path_flush_cmd flush_cmd = { struct iwl_tx_path_flush_cmd flush_cmd = {
@ -1107,8 +1107,6 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
}; };
u32 flags = sync ? 0 : CMD_ASYNC;
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd); sizeof(flush_cmd), &flush_cmd);
if (ret) if (ret)

Просмотреть файл

@ -592,10 +592,8 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do { do {
ret = iwl_pcie_set_hw_ready(trans); ret = iwl_pcie_set_hw_ready(trans);
if (ret >= 0) { if (ret >= 0)
ret = 0; return 0;
goto out;
}
usleep_range(200, 1000); usleep_range(200, 1000);
t += 200; t += 200;
@ -605,10 +603,6 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_ERR(trans, "Couldn't prepare the card\n"); IWL_ERR(trans, "Couldn't prepare the card\n");
out:
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
return ret; return ret;
} }