iwlagn: remove most BUG_ON instances
There are a number of things in the driver that may result in a BUG(), which is suboptimal since it's hard to get debugging information out of the driver in that case and the user experience is also not good :-) Almost all BUG_ON instances can be converted to WARN_ON with a few lines of appropriate error handling, so do that instead. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
This commit is contained in:
Родитель
e79b1ca75b
Коммит
3e41ace5de
|
@ -394,7 +394,9 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(addr & ~DMA_BIT_MASK(36));
|
if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (unlikely(addr & ~IWL_TX_DMA_MASK))
|
if (unlikely(addr & ~IWL_TX_DMA_MASK))
|
||||||
IWL_ERR(priv, "Unaligned address = %llx\n",
|
IWL_ERR(priv, "Unaligned address = %llx\n",
|
||||||
(unsigned long long)addr);
|
(unsigned long long)addr);
|
||||||
|
@ -718,7 +720,10 @@ static void iwl_rx_handle(struct iwl_priv *priv)
|
||||||
/* If an RXB doesn't have a Rx queue slot associated with it,
|
/* If an RXB doesn't have a Rx queue slot associated with it,
|
||||||
* then a bug has been introduced in the queue refilling
|
* then a bug has been introduced in the queue refilling
|
||||||
* routines -- catch it here */
|
* routines -- catch it here */
|
||||||
BUG_ON(rxb == NULL);
|
if (WARN_ON(rxb == NULL)) {
|
||||||
|
i = (i + 1) & RX_QUEUE_MASK;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
rxq->queue[i] = NULL;
|
rxq->queue[i] = NULL;
|
||||||
|
|
||||||
|
|
|
@ -215,12 +215,6 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
|
||||||
return nvm_type;
|
return nvm_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
|
|
||||||
{
|
|
||||||
BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
|
|
||||||
return &priv->eeprom[offset];
|
|
||||||
}
|
|
||||||
|
|
||||||
static int iwl_init_otp_access(struct iwl_priv *priv)
|
static int iwl_init_otp_access(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
@ -309,7 +309,6 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv);
|
||||||
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
|
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
|
||||||
int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
|
int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
|
||||||
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
|
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
|
||||||
const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
|
|
||||||
int iwl_init_channel_map(struct iwl_priv *priv);
|
int iwl_init_channel_map(struct iwl_priv *priv);
|
||||||
void iwl_free_channel_map(struct iwl_priv *priv);
|
void iwl_free_channel_map(struct iwl_priv *priv);
|
||||||
const struct iwl_channel_info *iwl_get_channel_info(
|
const struct iwl_channel_info *iwl_get_channel_info(
|
||||||
|
|
|
@ -143,10 +143,12 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(!(cmd->flags & CMD_ASYNC));
|
if (WARN_ON(!(cmd->flags & CMD_ASYNC)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* An asynchronous command can not expect an SKB to be set. */
|
/* An asynchronous command can not expect an SKB to be set. */
|
||||||
BUG_ON(cmd->flags & CMD_WANT_SKB);
|
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* Assign a generic callback if one is not provided */
|
/* Assign a generic callback if one is not provided */
|
||||||
if (!cmd->callback)
|
if (!cmd->callback)
|
||||||
|
@ -169,10 +171,12 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||||
int cmd_idx;
|
int cmd_idx;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&priv->mutex);
|
if (WARN_ON(cmd->flags & CMD_ASYNC))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* A synchronous command can not have a callback set. */
|
/* A synchronous command can not have a callback set. */
|
||||||
BUG_ON((cmd->flags & CMD_ASYNC) || cmd->callback);
|
if (WARN_ON(cmd->callback))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
|
IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
|
||||||
get_cmd_string(cmd->id));
|
get_cmd_string(cmd->id));
|
||||||
|
|
|
@ -188,9 +188,10 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
|
||||||
table = range_0;
|
table = range_0;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
|
if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
|
||||||
|
memset(cmd, 0, sizeof(*cmd));
|
||||||
*cmd = table[lvl].cmd;
|
else
|
||||||
|
*cmd = table[lvl].cmd;
|
||||||
|
|
||||||
if (period == 0) {
|
if (period == 0) {
|
||||||
skip = 0;
|
skip = 0;
|
||||||
|
|
|
@ -494,7 +494,8 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
|
||||||
|
|
||||||
priv->num_stations--;
|
priv->num_stations--;
|
||||||
|
|
||||||
BUG_ON(priv->num_stations < 0);
|
if (WARN_ON(priv->num_stations < 0))
|
||||||
|
priv->num_stations = 0;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||||
|
|
||||||
|
@ -679,7 +680,8 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
|
||||||
|
|
||||||
priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
|
priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
|
||||||
priv->num_stations--;
|
priv->num_stations--;
|
||||||
BUG_ON(priv->num_stations < 0);
|
if (WARN_ON(priv->num_stations < 0))
|
||||||
|
priv->num_stations = 0;
|
||||||
kfree(priv->stations[i].lq);
|
kfree(priv->stations[i].lq);
|
||||||
priv->stations[i].lq = NULL;
|
priv->stations[i].lq = NULL;
|
||||||
}
|
}
|
||||||
|
@ -775,7 +777,8 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||||
|
|
||||||
iwl_dump_lq_cmd(priv, lq);
|
iwl_dump_lq_cmd(priv, lq);
|
||||||
BUG_ON(init && (cmd.flags & CMD_ASYNC));
|
if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (is_lq_table_valid(priv, ctx, lq))
|
if (is_lq_table_valid(priv, ctx, lq))
|
||||||
ret = iwl_send_cmd(priv, &cmd);
|
ret = iwl_send_cmd(priv, &cmd);
|
||||||
|
|
|
@ -263,11 +263,13 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
||||||
|
|
||||||
/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
|
/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
|
||||||
* and iwl_queue_dec_wrap are broken. */
|
* and iwl_queue_dec_wrap are broken. */
|
||||||
BUG_ON(!is_power_of_2(count));
|
if (WARN_ON(!is_power_of_2(count)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* slots_num must be power-of-two size, otherwise
|
/* slots_num must be power-of-two size, otherwise
|
||||||
* get_cmd_index is broken. */
|
* get_cmd_index is broken. */
|
||||||
BUG_ON(!is_power_of_2(slots_num));
|
if (WARN_ON(!is_power_of_2(slots_num)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
q->low_mark = q->n_window / 4;
|
q->low_mark = q->n_window / 4;
|
||||||
if (q->low_mark < 4)
|
if (q->low_mark < 4)
|
||||||
|
@ -384,7 +386,9 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||||
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
||||||
|
|
||||||
/* Initialize queue's high/low-water marks, and head/tail indexes */
|
/* Initialize queue's high/low-water marks, and head/tail indexes */
|
||||||
iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
|
ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* Tell device where to find queue */
|
/* Tell device where to find queue */
|
||||||
priv->cfg->ops->lib->txq_init(priv, txq);
|
priv->cfg->ops->lib->txq_init(priv, txq);
|
||||||
|
@ -446,14 +450,19 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||||
cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
|
cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
|
||||||
fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
|
fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
|
||||||
|
|
||||||
/* If any of the command structures end up being larger than
|
/*
|
||||||
|
* If any of the command structures end up being larger than
|
||||||
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
|
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
|
||||||
* we will need to increase the size of the TFD entries
|
* we will need to increase the size of the TFD entries
|
||||||
* Also, check to see if command buffer should not exceed the size
|
* Also, check to see if command buffer should not exceed the size
|
||||||
* of device_cmd and max_cmd_size. */
|
* of device_cmd and max_cmd_size.
|
||||||
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
|
*/
|
||||||
!(cmd->flags & CMD_SIZE_HUGE));
|
if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
|
||||||
BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
|
!(cmd->flags & CMD_SIZE_HUGE)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
|
if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
|
||||||
IWL_WARN(priv, "Not sending command - %s KILL\n",
|
IWL_WARN(priv, "Not sending command - %s KILL\n",
|
||||||
|
|
Загрузка…
Ссылка в новой задаче