Some patches focusing on bugfixes for v4.11:

* Fix 802.11w, which was failing to due an IGTK bug;
   * A few more bugzilla bug fixes;
   * A channel-switch race condition fix;
   * Some fixes related to suspend/resume with new HW;
   * The RF-kill saga continues;
   * And some other fixes here and there...
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlibZVQACgkQoUecoho8
 xfp+EQ/+K1/pgC4XfjifbLVZFHVhTnTVl9Nym2LkLIH7kEQPiTQXUMXlHkQZv546
 ir1neeqKdidqQZlaM0gFpc/DWgiMxWui/e6Va1oegr0ZqcxUNN3+CXo1bTL88kmu
 CTYjPVnjOFRZLxaXt/9wBkJ2ktWNSb8BYeK6Ftj21iX9xyf2xBSCV2En+aB6dhv1
 1UbOLPn8raqubAFn1ZcwEZw2Ems4xyrOlriHNHOzhCsXG+PE0d+OxtxcVcQXF7nu
 lyqVCYgREIz0114MrLTlo5atur2LKLZqfRrkP+fgjsOekgHWMF5yyB9zpoO/TTcQ
 jVpAs2P4q5uDqMRI5BM3qVB7XPt3HDYLpTb1pyf+EL4yR/3NgN4QdB8vzZm32QgN
 0Of97AIlJOkO8FRVhHyI2z6QO6vHBVK+tzyrV1FCTc+ZQYfnj+r3hRYPCCFr/UW8
 2fB3esPiKRD6cXng8fJkFIw79e9erEgOl8hi5IbUG0zVbetDhZrtevjFrDoFzI7E
 X9LSoIcCA6J616T/k6Vi2e40xVq3u5GbYTSg8GvX+TDI0UPfKJrx6AoSImxaPGzh
 ByQ5Ib+xOQo6oZsNy27cw8cDGojWlHp2s0xj7ilgpMOlpmOP5eHlKLE0hkzOhNmg
 Zxv5GD0ar0zEW1jIiR/O0PleUGoLCdID9M2zkeUgcQ6ftB65WVQ=
 =dHzs
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2017-02-08' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Some patches focusing on bugfixes for v4.11:

  * Fix 802.11w, which was failing to due an IGTK bug;
  * A few more bugzilla bug fixes;
  * A channel-switch race condition fix;
  * Some fixes related to suspend/resume with new HW;
  * The RF-kill saga continues;
  * And some other fixes here and there...
This commit is contained in:
Kalle Valo 2017-02-08 21:28:36 +02:00
Родитель 514612fc44 0c8d0a4770
Коммит cbda794cf1
11 изменённых файлов: 188 добавлений и 145 удалений

Просмотреть файл

@ -90,13 +90,16 @@ config IWLWIFI_BCAST_FILTERING
config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices"
depends on IWLMVM && PM
depends on IWLMVM && PM && EXPERT
default false
help
Say Y here to enable runtime power management for PCIe
devices. If enabled, the device will go into low power mode
when idle for a short period of time, allowing for improved
power saving during runtime.
power saving during runtime. Note that this feature requires
a tight integration with the platform. It is not recommended
to enable this feature without proper validation with the
specific target platform.
If unsure, say N.

Просмотреть файл

@ -740,7 +740,10 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
/* Find the previous rate that is in the rate mask */
i = index - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (i >= 0)
mask = BIT(i);
for (; i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;

Просмотреть файл

@ -1396,19 +1396,15 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
IWL_DEBUG_INFO(mvm,
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
le32_to_cpu(mfuart_notif->installed_ver),
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
IWL_DEBUG_INFO(mvm,
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x, image size: 0x%08x\n",
le32_to_cpu(mfuart_notif->installed_ver),
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration),
"MFUART: image size: 0x%08x\n",
le32_to_cpu(mfuart_notif->image_size));
else
IWL_DEBUG_INFO(mvm,
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
le32_to_cpu(mfuart_notif->installed_ver),
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
}

Просмотреть файл

@ -2008,16 +2008,16 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
}
if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
* We received a beacon from the associated AP so
* remove the session protection.
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
}
if (changes & BSS_CHANGED_BEACON_INFO) {
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
}
@ -2627,11 +2627,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
ret = iwl_mvm_update_sta(mvm, vif, sta);
if (ret == 0)
iwl_mvm_rs_rate_init(mvm, sta,
mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {

Просмотреть файл

@ -972,7 +972,9 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
/* Find the previous rate that is in the rate mask */
i = index - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (i >= 0)
mask = BIT(i);
for (; i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
@ -3616,6 +3618,8 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
} else if (rate & RATE_MCS_HT_MSK) {
type = "HT";
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}

Просмотреть файл

@ -418,7 +418,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
/* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
return;
goto set_timer;
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size;
@ -441,6 +441,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
}
reorder_buf->head_sn = nssn;
set_timer:
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;

Просмотреть файл

@ -3047,6 +3047,11 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
/* Get the station from the mvm local station table */
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (!mvm_sta) {
IWL_ERR(mvm, "Failed to find station\n");
return -EINVAL;
}
sta_id = mvm_sta->sta_id;
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
@ -3074,8 +3079,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
return 0;
}
sta_id = mvm_sta->sta_id;
ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
if (ret)
return ret;

Просмотреть файл

@ -506,15 +506,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
/*
* handle legacy hostapd as well, where station may be added
* only after assoc.
* Handle legacy hostapd as well, where station may be added
* only after assoc. Take care of the case where we send a
* deauth to a station that we don't have.
*/
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
ieee80211_is_deauth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ON_ONCE(1);
WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
@ -1274,8 +1276,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
memset(&info->status, 0, sizeof(info->status));
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
@ -1298,10 +1298,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
/* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= mvm->first_agg_queue &&
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
!(info->flags & IEEE80211_TX_STAT_ACK) &&
!(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
/* W/A FW bug: seq_ctl is wrong when the status isn't success */
if (status != TX_STATUS_SUCCESS) {
@ -1336,7 +1337,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status(mvm->hw, skb);
}
if (txq_id >= mvm->first_agg_queue) {
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which

Просмотреть файл

@ -279,7 +279,7 @@ struct iwl_txq {
bool frozen;
u8 active;
bool ampdu;
bool block;
int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;

Просмотреть файл

@ -1609,6 +1609,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
@ -1617,7 +1620,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
@ -1954,6 +1956,9 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
@ -1962,7 +1967,6 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,

Просмотреть файл

@ -1076,6 +1076,123 @@ static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
return hw_rfkill;
}
struct iwl_causes_list {
u32 cause_num;
u32 mask_reg;
u8 addr;
};
static struct iwl_causes_list causes_list[] = {
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
{MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i;
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
iwl_clear_bit(trans, causes_list[i].mask_reg,
causes_list[i].cause_num);
}
}
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 offset =
trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
u32 val, idx;
/*
* The first RX queue - fallback queue, which is designated for
* management frame, command responses etc, is always mapped to the
* first interrupt vector. The other RX queues are mapped to
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
for (idx = 1; idx < trans->num_rx_queues; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
}
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
val = MSIX_FH_INT_CAUSES_Q(0);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
return;
}
/*
* The IVAR table needs to be configured again after reset,
* but if the device is disabled, we can't write to
* prph.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
* Each cause from the causes list above and the RX causes is
* represented as a byte in the IVAR table. The first nibble
* represents the bound interrupt vector of the cause, the second
* represents no auto clear for this cause. This will be set if its
* interrupt vector is bound to serve other causes.
*/
iwl_pcie_map_rx_causes(trans);
iwl_pcie_map_non_rx_causes(trans);
}
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
iwl_pcie_conf_msix_hw(trans_pcie);
if (!trans_pcie->msix_enabled)
return;
trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1128,6 +1245,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(1000, 2000);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
* work. This causes a bug in RF-KILL flows, since the interrupt
* that enables radio won't fire on the correct irq, and the
* driver won't be able to handle the interrupt.
* Configure the IVAR table again after reset.
*/
iwl_pcie_conf_msix_hw(trans_pcie);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
* This is a bug in certain verions of the hardware.
@ -1346,6 +1472,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
int ret;
@ -1358,11 +1485,15 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
/*
* Also enables interrupts - none will happen as the device doesn't
* know we're waking it up, only when the opmode actually tells it
* after this call.
* Reconfigure IVAR table in case of MSIX or reset ict table in
* MSI mode since HW reset erased it.
* Also enables interrupts - none will happen as
* the device doesn't know we're waking it up, only when
* the opmode actually tells it after this call.
*/
iwl_pcie_reset_ict(trans);
iwl_pcie_conf_msix_hw(trans_pcie);
if (!trans_pcie->msix_enabled)
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@ -1405,109 +1536,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
struct iwl_causes_list {
u32 cause_num;
u32 mask_reg;
u8 addr;
};
static struct iwl_causes_list causes_list[] = {
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
{MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i;
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
iwl_clear_bit(trans, causes_list[i].mask_reg,
causes_list[i].cause_num);
}
}
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 offset =
trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
u32 val, idx;
/*
* The first RX queue - fallback queue, which is designated for
* management frame, command responses etc, is always mapped to the
* first interrupt vector. The other RX queues are mapped to
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
for (idx = 1; idx < trans->num_rx_queues; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
}
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
val = MSIX_FH_INT_CAUSES_Q(0);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
return;
}
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
* Each cause from the causes list above and the RX causes is
* represented as a byte in the IVAR table. The first nibble
* represents the bound interrupt vector of the cause, the second
* represents no auto clear for this cause. This will be set if its
* interrupt vector is bound to serve other causes.
*/
iwl_pcie_map_rx_causes(trans);
iwl_pcie_map_non_rx_causes(trans);
trans_pcie->fh_init_mask =
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
trans_pcie->hw_init_mask =
~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
@ -1675,6 +1703,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
iwl_pcie_apm_init(trans);
iwl_pcie_init_msix(trans_pcie);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);