Merge branch 'master' of git://git.infradead.org/users/linville/wireless-next into for-davem
This commit is contained in:
Коммит
b4d3de8ca2
|
@ -178,23 +178,29 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
|
|||
void ath_hw_cycle_counters_update(struct ath_common *common);
|
||||
int32_t ath_hw_get_listen_time(struct ath_common *common);
|
||||
|
||||
extern __attribute__ ((format (printf, 3, 4))) int
|
||||
ath_printk(const char *level, struct ath_common *common, const char *fmt, ...);
|
||||
extern __attribute__((format (printf, 2, 3)))
|
||||
void ath_printk(const char *level, const char *fmt, ...);
|
||||
|
||||
#define _ath_printk(level, common, fmt, ...) \
|
||||
do { \
|
||||
__always_unused struct ath_common *unused = common; \
|
||||
ath_printk(level, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define ath_emerg(common, fmt, ...) \
|
||||
ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
|
||||
#define ath_alert(common, fmt, ...) \
|
||||
ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
|
||||
#define ath_crit(common, fmt, ...) \
|
||||
ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
|
||||
#define ath_err(common, fmt, ...) \
|
||||
ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
|
||||
#define ath_warn(common, fmt, ...) \
|
||||
ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
|
||||
#define ath_notice(common, fmt, ...) \
|
||||
ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
|
||||
#define ath_info(common, fmt, ...) \
|
||||
ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
|
||||
_ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* enum ath_debug_level - atheros wireless debug level
|
||||
|
@ -246,27 +252,21 @@ enum ATH_DEBUG {
|
|||
|
||||
#ifdef CONFIG_ATH_DEBUG
|
||||
|
||||
#define ath_dbg(common, dbg_mask, fmt, ...) \
|
||||
({ \
|
||||
int rtn; \
|
||||
if ((common)->debug_mask & dbg_mask) \
|
||||
rtn = ath_printk(KERN_DEBUG, common, fmt, \
|
||||
##__VA_ARGS__); \
|
||||
else \
|
||||
rtn = 0; \
|
||||
\
|
||||
rtn; \
|
||||
})
|
||||
#define ath_dbg(common, dbg_mask, fmt, ...) \
|
||||
do { \
|
||||
if ((common)->debug_mask & dbg_mask) \
|
||||
_ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
|
||||
#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
|
||||
|
||||
#else
|
||||
|
||||
static inline __attribute__ ((format (printf, 3, 4))) int
|
||||
ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
|
||||
const char *fmt, ...)
|
||||
static inline __attribute__((format (printf, 3, 4)))
|
||||
void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
|
||||
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
|
||||
|
|
|
@ -643,7 +643,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
|
|||
listenTime = ath_hw_get_listen_time(common);
|
||||
|
||||
if (listenTime <= 0) {
|
||||
ah->stats.ast_ani_lneg++;
|
||||
ah->stats.ast_ani_lneg_or_lzero++;
|
||||
ath9k_ani_restart(ah);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -148,8 +148,7 @@ struct ar5416Stats {
|
|||
u32 ast_ani_ofdmerrs;
|
||||
u32 ast_ani_cckerrs;
|
||||
u32 ast_ani_reset;
|
||||
u32 ast_ani_lzero;
|
||||
u32 ast_ani_lneg;
|
||||
u32 ast_ani_lneg_or_lzero;
|
||||
u32 avgbrssi;
|
||||
struct ath9k_mib_stats ast_mibstats;
|
||||
};
|
||||
|
@ -159,7 +158,5 @@ void ath9k_enable_mib_counters(struct ath_hw *ah);
|
|||
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
|
||||
void ath9k_hw_ani_setup(struct ath_hw *ah);
|
||||
void ath9k_hw_ani_init(struct ath_hw *ah);
|
||||
int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
|
||||
struct ath9k_channel *chan);
|
||||
|
||||
#endif /* ANI_H */
|
||||
|
|
|
@ -273,7 +273,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
|
|||
|
||||
static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
|
||||
u32 pktLen, enum ath9k_pkt_type type,
|
||||
u32 txPower, u32 keyIx,
|
||||
u32 txPower, u8 keyIx,
|
||||
enum ath9k_key_type keyType, u32 flags)
|
||||
{
|
||||
struct ar5416_desc *ads = AR5416DESC(ds);
|
||||
|
|
|
@ -3318,7 +3318,7 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
|
|||
|
||||
word = kzalloc(2048, GFP_KERNEL);
|
||||
if (!word)
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(mptr, &ar9300_default, mdata_size);
|
||||
|
||||
|
|
|
@ -312,7 +312,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
|
|||
|
||||
static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
|
||||
u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
|
||||
u32 keyIx, enum ath9k_key_type keyType, u32 flags)
|
||||
u8 keyIx, enum ath9k_key_type keyType, u32 flags)
|
||||
{
|
||||
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
|
||||
|
||||
|
|
|
@ -206,16 +206,17 @@ struct ath_atx_ac {
|
|||
};
|
||||
|
||||
struct ath_frame_info {
|
||||
struct ath_buf *bf;
|
||||
int framelen;
|
||||
u32 keyix;
|
||||
enum ath9k_key_type keytype;
|
||||
u8 keyix;
|
||||
u8 retries;
|
||||
u16 seqno;
|
||||
};
|
||||
|
||||
struct ath_buf_state {
|
||||
u8 bf_type;
|
||||
u8 bfs_paprd;
|
||||
u16 seqno;
|
||||
unsigned long bfs_paprd_timestamp;
|
||||
};
|
||||
|
||||
|
@ -235,7 +236,7 @@ struct ath_buf {
|
|||
|
||||
struct ath_atx_tid {
|
||||
struct list_head list;
|
||||
struct list_head buf_q;
|
||||
struct sk_buff_head buf_q;
|
||||
struct ath_node *an;
|
||||
struct ath_atx_ac *ac;
|
||||
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
|
||||
|
|
|
@ -711,7 +711,7 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
|
|||
" tid: %p %s %s %i %p %p\n",
|
||||
tid, tid->sched ? "sched" : "idle",
|
||||
tid->paused ? "paused" : "running",
|
||||
list_empty(&tid->buf_q),
|
||||
skb_queue_empty(&tid->buf_q),
|
||||
tid->an, tid->ac);
|
||||
if (len >= size)
|
||||
goto done;
|
||||
|
@ -828,6 +828,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
|||
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_tx_status *ts, struct ath_txq *txq)
|
||||
{
|
||||
#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
|
||||
[sc->debug.tsidx].c)
|
||||
int qnum = txq->axq_qnum;
|
||||
|
||||
TX_STAT_INC(qnum, tx_pkts_all);
|
||||
|
@ -857,6 +859,26 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
|
|||
TX_STAT_INC(qnum, data_underrun);
|
||||
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
|
||||
TX_STAT_INC(qnum, delim_underrun);
|
||||
|
||||
spin_lock(&sc->debug.samp_lock);
|
||||
TX_SAMP_DBG(jiffies) = jiffies;
|
||||
TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
|
||||
TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
|
||||
TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
|
||||
TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
|
||||
TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
|
||||
TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
|
||||
TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
|
||||
TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
|
||||
TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
|
||||
TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
|
||||
TX_SAMP_DBG(rssi) = ts->ts_rssi;
|
||||
TX_SAMP_DBG(tid) = ts->tid;
|
||||
TX_SAMP_DBG(qid) = ts->qid;
|
||||
sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
|
||||
spin_unlock(&sc->debug.samp_lock);
|
||||
|
||||
#undef TX_SAMP_DBG
|
||||
}
|
||||
|
||||
static const struct file_operations fops_xmit = {
|
||||
|
@ -995,6 +1017,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
|
|||
{
|
||||
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
|
||||
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
|
||||
#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
|
||||
[sc->debug.rsidx].c)
|
||||
|
||||
u32 phyerr;
|
||||
|
||||
|
@ -1030,8 +1054,25 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
|
|||
|
||||
sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
|
||||
|
||||
spin_lock(&sc->debug.samp_lock);
|
||||
RX_SAMP_DBG(jiffies) = jiffies;
|
||||
RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
|
||||
RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
|
||||
RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
|
||||
RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
|
||||
RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
|
||||
RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
|
||||
RX_SAMP_DBG(antenna) = rs->rs_antenna;
|
||||
RX_SAMP_DBG(rssi) = rs->rs_rssi;
|
||||
RX_SAMP_DBG(rate) = rs->rs_rate;
|
||||
RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
|
||||
|
||||
sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
|
||||
spin_unlock(&sc->debug.samp_lock);
|
||||
|
||||
#undef RX_STAT_INC
|
||||
#undef RX_PHY_ERR_INC
|
||||
#undef RX_SAMP_DBG
|
||||
}
|
||||
|
||||
static const struct file_operations fops_recv = {
|
||||
|
@ -1272,6 +1313,269 @@ static const struct file_operations fops_modal_eeprom = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
|
||||
{
|
||||
#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
ath9k_ps_wakeup(sc);
|
||||
|
||||
spin_lock_irqsave(&common->cc_lock, flags);
|
||||
ath_hw_cycle_counters_update(common);
|
||||
spin_unlock_irqrestore(&common->cc_lock, flags);
|
||||
|
||||
spin_lock_bh(&sc->debug.samp_lock);
|
||||
|
||||
ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
|
||||
ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
|
||||
ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
|
||||
ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
|
||||
ATH_SAMP_DBG(noise) = ah->noise;
|
||||
|
||||
REG_WRITE_D(ah, AR_MACMISC,
|
||||
((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
|
||||
(AR_MACMISC_MISC_OBS_BUS_1 <<
|
||||
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
|
||||
|
||||
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
|
||||
ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
|
||||
AR_DMADBG_0 + (i * sizeof(u32)));
|
||||
|
||||
ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
|
||||
ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
|
||||
|
||||
memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
|
||||
sizeof(ATH_SAMP_DBG(nfCalHist)));
|
||||
|
||||
sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
|
||||
spin_unlock_bh(&sc->debug.samp_lock);
|
||||
ath9k_ps_restore(sc);
|
||||
|
||||
#undef ATH_SAMP_DBG
|
||||
}
|
||||
|
||||
static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
|
||||
{
|
||||
#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
|
||||
struct ath_softc *sc = inode->i_private;
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ieee80211_conf *conf = &common->hw->conf;
|
||||
struct ath_dbg_bb_mac_samp *bb_mac_samp;
|
||||
struct ath9k_nfcal_hist *h;
|
||||
int i, j, qcuOffset = 0, dcuOffset = 0;
|
||||
u32 *qcuBase, *dcuBase, size = 30000, len = 0;
|
||||
u32 sampidx = 0;
|
||||
u8 *buf;
|
||||
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
|
||||
u8 nread;
|
||||
|
||||
buf = vmalloc(size);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
|
||||
if (!bb_mac_samp) {
|
||||
vfree(buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_bh(&sc->debug.samp_lock);
|
||||
memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
|
||||
sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
|
||||
spin_unlock_bh(&sc->debug.samp_lock);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Raw DMA Debug Dump:\n");
|
||||
len += snprintf(buf + len, size - len, "Sample |\t");
|
||||
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
|
||||
len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
len += snprintf(buf + len, size - len, "%d\t", sampidx);
|
||||
|
||||
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
|
||||
len += snprintf(buf + len, size - len, " %08x\t",
|
||||
ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
}
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
|
||||
dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
|
||||
|
||||
for (i = 0; i < ATH9K_NUM_QUEUES; i++,
|
||||
qcuOffset += 4, dcuOffset += 5) {
|
||||
if (i == 8) {
|
||||
qcuOffset = 0;
|
||||
qcuBase++;
|
||||
}
|
||||
|
||||
if (i == 6) {
|
||||
dcuOffset = 0;
|
||||
dcuBase++;
|
||||
}
|
||||
if (!sc->debug.stats.txstats[i].queued)
|
||||
continue;
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"%4d %7d %2x %1x %2x %2x\n",
|
||||
sampidx, i,
|
||||
(*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
|
||||
(*qcuBase & (0x8 << qcuOffset)) >>
|
||||
(qcuOffset + 3),
|
||||
ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
|
||||
(0x7 << (i * 3)) >> (i * 3),
|
||||
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
|
||||
}
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
}
|
||||
len += snprintf(buf + len, size - len,
|
||||
"samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
|
||||
"ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
|
||||
"txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
|
||||
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
|
||||
dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
|
||||
|
||||
len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
|
||||
len += snprintf(buf + len, size - len, "%7x %8x ",
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
|
||||
len += snprintf(buf + len, size - len, "%7x %7x ",
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
|
||||
len += snprintf(buf + len, size - len, "%7d %12d ",
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
|
||||
len += snprintf(buf + len, size - len, "%12d %12d ",
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
|
||||
len += snprintf(buf + len, size - len, "%12d %12d ",
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
|
||||
(ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
|
||||
len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
|
||||
ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Sample ChNoise Chain privNF #Reading Readings\n");
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
h = ATH_SAMP_DBG(nfCalHist);
|
||||
if (!ATH_SAMP_DBG(noise))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < NUM_NF_READINGS; i++) {
|
||||
if (!(chainmask & (1 << i)) ||
|
||||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
|
||||
continue;
|
||||
|
||||
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
|
||||
h[i].invalidNFcount;
|
||||
len += snprintf(buf + len, size - len,
|
||||
"%4d %5d %4d\t %d\t %d\t",
|
||||
sampidx, ATH_SAMP_DBG(noise),
|
||||
i, h[i].privNF, nread);
|
||||
for (j = 0; j < nread; j++)
|
||||
len += snprintf(buf + len, size - len,
|
||||
" %d", h[i].nfCalBuffer[j]);
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
}
|
||||
}
|
||||
len += snprintf(buf + len, size - len, "\nCycle counters:\n"
|
||||
"Sample Total Rxbusy Rxframes Txframes\n");
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
if (!ATH_SAMP_DBG(cc.cycles))
|
||||
continue;
|
||||
len += snprintf(buf + len, size - len,
|
||||
"%4d %08x %08x %08x %08x\n",
|
||||
sampidx, ATH_SAMP_DBG(cc.cycles),
|
||||
ATH_SAMP_DBG(cc.rx_busy),
|
||||
ATH_SAMP_DBG(cc.rx_frame),
|
||||
ATH_SAMP_DBG(cc.tx_frame));
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len, "Tx status Dump :\n");
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
|
||||
"isok rts_fail data_fail rate tid qid tx_before(ms)\n");
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
|
||||
if (!ATH_SAMP_DBG(ts[i].jiffies))
|
||||
continue;
|
||||
len += snprintf(buf + len, size - len, "%4d \t"
|
||||
"%8d %4d %4d %4d %4d %4d %4d %4d %4d "
|
||||
"%4d %4d %2d %2d %d\n",
|
||||
sampidx,
|
||||
ATH_SAMP_DBG(ts[i].rssi_ctl0),
|
||||
ATH_SAMP_DBG(ts[i].rssi_ctl1),
|
||||
ATH_SAMP_DBG(ts[i].rssi_ctl2),
|
||||
ATH_SAMP_DBG(ts[i].rssi_ext0),
|
||||
ATH_SAMP_DBG(ts[i].rssi_ext1),
|
||||
ATH_SAMP_DBG(ts[i].rssi_ext2),
|
||||
ATH_SAMP_DBG(ts[i].rssi),
|
||||
ATH_SAMP_DBG(ts[i].isok),
|
||||
ATH_SAMP_DBG(ts[i].rts_fail_cnt),
|
||||
ATH_SAMP_DBG(ts[i].data_fail_cnt),
|
||||
ATH_SAMP_DBG(ts[i].rateindex),
|
||||
ATH_SAMP_DBG(ts[i].tid),
|
||||
ATH_SAMP_DBG(ts[i].qid),
|
||||
jiffies_to_msecs(jiffies -
|
||||
ATH_SAMP_DBG(ts[i].jiffies)));
|
||||
}
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len, "Rx status Dump :\n");
|
||||
len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
|
||||
"ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
|
||||
for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
|
||||
for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
|
||||
if (!ATH_SAMP_DBG(rs[i].jiffies))
|
||||
continue;
|
||||
len += snprintf(buf + len, size - len, "%4d \t"
|
||||
"%8d %4d %4d %4d %4d %4d %4d %s %4d %02x %d\n",
|
||||
sampidx,
|
||||
ATH_SAMP_DBG(rs[i].rssi_ctl0),
|
||||
ATH_SAMP_DBG(rs[i].rssi_ctl1),
|
||||
ATH_SAMP_DBG(rs[i].rssi_ctl2),
|
||||
ATH_SAMP_DBG(rs[i].rssi_ext0),
|
||||
ATH_SAMP_DBG(rs[i].rssi_ext1),
|
||||
ATH_SAMP_DBG(rs[i].rssi_ext2),
|
||||
ATH_SAMP_DBG(rs[i].rssi),
|
||||
ATH_SAMP_DBG(rs[i].is_mybeacon) ?
|
||||
"True" : "False",
|
||||
ATH_SAMP_DBG(rs[i].antenna),
|
||||
ATH_SAMP_DBG(rs[i].rate),
|
||||
jiffies_to_msecs(jiffies -
|
||||
ATH_SAMP_DBG(rs[i].jiffies)));
|
||||
}
|
||||
}
|
||||
|
||||
vfree(bb_mac_samp);
|
||||
file->private_data = buf;
|
||||
|
||||
return 0;
|
||||
#undef ATH_SAMP_DBG
|
||||
}
|
||||
|
||||
static const struct file_operations fops_samps = {
|
||||
.open = open_file_bb_mac_samps,
|
||||
.read = ath9k_debugfs_read_buf,
|
||||
.release = ath9k_debugfs_release_buf,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
|
||||
int ath9k_init_debug(struct ath_hw *ah)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
|
@ -1321,6 +1625,8 @@ int ath9k_init_debug(struct ath_hw *ah)
|
|||
&fops_base_eeprom);
|
||||
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
|
||||
&fops_modal_eeprom);
|
||||
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
|
||||
&fops_samps);
|
||||
|
||||
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
|
||||
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
|
||||
|
@ -1329,5 +1635,9 @@ int ath9k_init_debug(struct ath_hw *ah)
|
|||
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
|
||||
|
||||
sc->debug.regidx = 0;
|
||||
memset(&sc->debug.bb_mac_samp, 0, sizeof(sc->debug.bb_mac_samp));
|
||||
sc->debug.sampidx = 0;
|
||||
sc->debug.tsidx = 0;
|
||||
sc->debug.rsidx = 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -177,14 +177,57 @@ struct ath_stats {
|
|||
struct ath_rx_stats rxstats;
|
||||
};
|
||||
|
||||
#define ATH_DBG_MAX_SAMPLES 10
|
||||
struct ath_dbg_bb_mac_samp {
|
||||
u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
|
||||
u32 pcu_obs, pcu_cr, noise;
|
||||
struct {
|
||||
u64 jiffies;
|
||||
int8_t rssi_ctl0;
|
||||
int8_t rssi_ctl1;
|
||||
int8_t rssi_ctl2;
|
||||
int8_t rssi_ext0;
|
||||
int8_t rssi_ext1;
|
||||
int8_t rssi_ext2;
|
||||
int8_t rssi;
|
||||
bool isok;
|
||||
u8 rts_fail_cnt;
|
||||
u8 data_fail_cnt;
|
||||
u8 rateindex;
|
||||
u8 qid;
|
||||
u8 tid;
|
||||
} ts[ATH_DBG_MAX_SAMPLES];
|
||||
struct {
|
||||
u64 jiffies;
|
||||
int8_t rssi_ctl0;
|
||||
int8_t rssi_ctl1;
|
||||
int8_t rssi_ctl2;
|
||||
int8_t rssi_ext0;
|
||||
int8_t rssi_ext1;
|
||||
int8_t rssi_ext2;
|
||||
int8_t rssi;
|
||||
bool is_mybeacon;
|
||||
u8 antenna;
|
||||
u8 rate;
|
||||
} rs[ATH_DBG_MAX_SAMPLES];
|
||||
struct ath_cycle_counters cc;
|
||||
struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
|
||||
};
|
||||
|
||||
struct ath9k_debug {
|
||||
struct dentry *debugfs_phy;
|
||||
u32 regidx;
|
||||
struct ath_stats stats;
|
||||
spinlock_t samp_lock;
|
||||
struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
|
||||
u8 sampidx;
|
||||
u8 tsidx;
|
||||
u8 rsidx;
|
||||
};
|
||||
|
||||
int ath9k_init_debug(struct ath_hw *ah);
|
||||
|
||||
void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
|
||||
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
|
||||
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_tx_status *ts, struct ath_txq *txq);
|
||||
|
@ -197,6 +240,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
|
||||
enum ath9k_int status)
|
||||
{
|
||||
|
|
|
@ -1300,6 +1300,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
|
|||
if (priv->op_flags & OP_INVALID) {
|
||||
ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
|
||||
"Unable to configure filter on invalid state\n");
|
||||
mutex_unlock(&priv->mutex);
|
||||
return;
|
||||
}
|
||||
ath9k_htc_ps_wakeup(priv);
|
||||
|
|
|
@ -440,7 +440,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
|
|||
if (AR_SREV_9100(ah))
|
||||
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
|
||||
ah->enable_32kHz_clock = DONT_USE_32KHZ;
|
||||
ah->slottime = 20;
|
||||
ah->slottime = ATH9K_SLOT_TIME_9;
|
||||
ah->globaltxtimeout = (u32) -1;
|
||||
ah->power_mode = ATH9K_PM_UNDEFINED;
|
||||
}
|
||||
|
@ -997,8 +997,14 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
|
|||
slottime = 21;
|
||||
sifstime = 64;
|
||||
} else {
|
||||
eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/common->clockrate;
|
||||
reg = REG_READ(ah, AR_USEC);
|
||||
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
|
||||
eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
|
||||
reg = AR_USEC_ASYNC_FIFO;
|
||||
} else {
|
||||
eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/
|
||||
common->clockrate;
|
||||
reg = REG_READ(ah, AR_USEC);
|
||||
}
|
||||
rx_lat = MS(reg, AR_USEC_RX_LAT);
|
||||
tx_lat = MS(reg, AR_USEC_TX_LAT);
|
||||
|
||||
|
@ -2754,6 +2760,7 @@ static struct {
|
|||
{ AR_SREV_VERSION_9271, "9271" },
|
||||
{ AR_SREV_VERSION_9300, "9300" },
|
||||
{ AR_SREV_VERSION_9330, "9330" },
|
||||
{ AR_SREV_VERSION_9340, "9340" },
|
||||
{ AR_SREV_VERSION_9485, "9485" },
|
||||
};
|
||||
|
||||
|
|
|
@ -623,7 +623,7 @@ struct ath_hw_ops {
|
|||
struct ath_tx_status *ts);
|
||||
void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
|
||||
u32 pktLen, enum ath9k_pkt_type type,
|
||||
u32 txPower, u32 keyIx,
|
||||
u32 txPower, u8 keyIx,
|
||||
enum ath9k_key_type keyType,
|
||||
u32 flags);
|
||||
void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
|
||||
|
|
|
@ -572,6 +572,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
|
|||
mutex_init(&sc->mutex);
|
||||
#ifdef CONFIG_ATH9K_DEBUGFS
|
||||
spin_lock_init(&sc->nodes_lock);
|
||||
spin_lock_init(&sc->debug.samp_lock);
|
||||
INIT_LIST_HEAD(&sc->nodes);
|
||||
#endif
|
||||
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
|
||||
|
|
|
@ -146,6 +146,7 @@ struct ath_rx_status {
|
|||
u8 rs_moreaggr;
|
||||
u8 rs_num_delims;
|
||||
u8 rs_flags;
|
||||
bool is_mybeacon;
|
||||
u32 evm0;
|
||||
u32 evm1;
|
||||
u32 evm2;
|
||||
|
@ -194,7 +195,7 @@ struct ath_htc_rx_status {
|
|||
#define ATH9K_RX_DECRYPT_BUSY 0x40
|
||||
|
||||
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
|
||||
#define ATH9K_TXKEYIX_INVALID ((u32)-1)
|
||||
#define ATH9K_TXKEYIX_INVALID ((u8)-1)
|
||||
|
||||
enum ath9k_phyerr {
|
||||
ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
|
||||
|
|
|
@ -546,6 +546,7 @@ set_timer:
|
|||
* The interval must be the shortest necessary to satisfy ANI,
|
||||
* short calibration and long calibration.
|
||||
*/
|
||||
ath9k_debug_samp_bb_mac(sc);
|
||||
cal_interval = ATH_LONG_CALINTERVAL;
|
||||
if (sc->sc_ah->config.enable_ani)
|
||||
cal_interval = min(cal_interval,
|
||||
|
@ -978,6 +979,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
|
|||
|
||||
sc->hw_busy_count = 0;
|
||||
|
||||
ath9k_debug_samp_bb_mac(sc);
|
||||
/* Stop ANI */
|
||||
|
||||
del_timer_sync(&common->ani.timer);
|
||||
|
|
|
@ -937,7 +937,7 @@ static int ath9k_process_rate(struct ath_common *common,
|
|||
* No valid hardware bitrate found -- we should not get here
|
||||
* because hardware has already validated this frame as OK.
|
||||
*/
|
||||
ath_dbg(common, ATH_DBG_XMIT,
|
||||
ath_dbg(common, ATH_DBG_ANY,
|
||||
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
|
||||
rx_stats->rs_rate);
|
||||
|
||||
|
@ -952,23 +952,12 @@ static void ath9k_process_rssi(struct ath_common *common,
|
|||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_hw *ah = common->ah;
|
||||
int last_rssi;
|
||||
__le16 fc;
|
||||
|
||||
if ((ah->opmode != NL80211_IFTYPE_STATION) &&
|
||||
(ah->opmode != NL80211_IFTYPE_ADHOC))
|
||||
if (!rx_stats->is_mybeacon ||
|
||||
((ah->opmode != NL80211_IFTYPE_STATION) &&
|
||||
(ah->opmode != NL80211_IFTYPE_ADHOC)))
|
||||
return;
|
||||
|
||||
fc = hdr->frame_control;
|
||||
if (!ieee80211_is_beacon(fc) ||
|
||||
compare_ether_addr(hdr->addr3, common->curbssid)) {
|
||||
/* TODO: This doesn't work well if you have stations
|
||||
* associated to two different APs because curbssid
|
||||
* is just the last AP that any of the stations associated
|
||||
* with.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
|
||||
ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
|
||||
|
||||
|
@ -1838,6 +1827,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||
|
||||
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
|
||||
rxs = IEEE80211_SKB_RXCB(hdr_skb);
|
||||
if (ieee80211_is_beacon(hdr->frame_control) &&
|
||||
!compare_ether_addr(hdr->addr3, common->curbssid))
|
||||
rs.is_mybeacon = true;
|
||||
else
|
||||
rs.is_mybeacon = false;
|
||||
|
||||
ath_debug_stat_rx(sc, &rs);
|
||||
|
||||
|
|
|
@ -619,6 +619,7 @@
|
|||
#define AR_D_GBL_IFS_EIFS 0x10b0
|
||||
#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
|
||||
#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
|
||||
#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO 363
|
||||
|
||||
#define AR_D_GBL_IFS_MISC 0x10f0
|
||||
#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
|
||||
|
@ -1503,6 +1504,7 @@ enum {
|
|||
#define AR_USEC_TX_LAT_S 14
|
||||
#define AR_USEC_RX_LAT 0x1F800000
|
||||
#define AR_USEC_RX_LAT_S 23
|
||||
#define AR_USEC_ASYNC_FIFO 0x12E00074
|
||||
|
||||
#define AR_RESET_TSF 0x8020
|
||||
#define AR_RESET_TSF_ONCE 0x01000000
|
||||
|
|
|
@ -48,8 +48,9 @@ static u16 bits_per_symbol[][2] = {
|
|||
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
|
||||
|
||||
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid,
|
||||
struct list_head *bf_head);
|
||||
struct ath_atx_tid *tid, struct sk_buff *skb);
|
||||
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
||||
int tx_flags, struct ath_txq *txq);
|
||||
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_txq *txq, struct list_head *bf_q,
|
||||
struct ath_tx_status *ts, int txok, int sendbar);
|
||||
|
@ -61,6 +62,10 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
|||
int txok, bool update_rc);
|
||||
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
||||
int seqno);
|
||||
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
||||
struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid,
|
||||
struct sk_buff *skb);
|
||||
|
||||
enum {
|
||||
MCS_HT20,
|
||||
|
@ -129,7 +134,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|||
spin_lock_bh(&txq->axq_lock);
|
||||
tid->paused = false;
|
||||
|
||||
if (list_empty(&tid->buf_q))
|
||||
if (skb_queue_empty(&tid->buf_q))
|
||||
goto unlock;
|
||||
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
|
@ -149,6 +154,7 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
|
|||
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
||||
{
|
||||
struct ath_txq *txq = tid->ac->txq;
|
||||
struct sk_buff *skb;
|
||||
struct ath_buf *bf;
|
||||
struct list_head bf_head;
|
||||
struct ath_tx_status ts;
|
||||
|
@ -159,17 +165,17 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|||
memset(&ts, 0, sizeof(ts));
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
|
||||
while (!list_empty(&tid->buf_q)) {
|
||||
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
|
||||
list_move_tail(&bf->list, &bf_head);
|
||||
while ((skb = __skb_dequeue(&tid->buf_q))) {
|
||||
fi = get_frame_info(skb);
|
||||
bf = fi->bf;
|
||||
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
fi = get_frame_info(bf->bf_mpdu);
|
||||
if (fi->retries) {
|
||||
ath_tx_update_baw(sc, tid, fi->seqno);
|
||||
if (bf && fi->retries) {
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
|
||||
} else {
|
||||
ath_tx_send_normal(sc, txq, NULL, &bf_head);
|
||||
ath_tx_send_normal(sc, txq, NULL, skb);
|
||||
}
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
}
|
||||
|
@ -219,6 +225,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
|
|||
struct ath_atx_tid *tid)
|
||||
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ath_buf *bf;
|
||||
struct list_head bf_head;
|
||||
struct ath_tx_status ts;
|
||||
|
@ -227,16 +234,21 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
|
|||
memset(&ts, 0, sizeof(ts));
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
|
||||
for (;;) {
|
||||
if (list_empty(&tid->buf_q))
|
||||
break;
|
||||
while ((skb = __skb_dequeue(&tid->buf_q))) {
|
||||
fi = get_frame_info(skb);
|
||||
bf = fi->bf;
|
||||
|
||||
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
|
||||
list_move_tail(&bf->list, &bf_head);
|
||||
if (!bf) {
|
||||
spin_unlock(&txq->axq_lock);
|
||||
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
|
||||
spin_lock(&txq->axq_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
|
||||
fi = get_frame_info(bf->bf_mpdu);
|
||||
if (fi->retries)
|
||||
ath_tx_update_baw(sc, tid, fi->seqno);
|
||||
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
|
||||
|
||||
spin_unlock(&txq->axq_lock);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
|
||||
|
@ -326,7 +338,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
|
|||
|
||||
while (bf) {
|
||||
fi = get_frame_info(bf->bf_mpdu);
|
||||
ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
|
||||
ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
|
||||
|
||||
(*nframes)++;
|
||||
if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
|
||||
|
@ -349,7 +361,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
struct ieee80211_tx_info *tx_info;
|
||||
struct ath_atx_tid *tid = NULL;
|
||||
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
|
||||
struct list_head bf_head, bf_pending;
|
||||
struct list_head bf_head;
|
||||
struct sk_buff_head bf_pending;
|
||||
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
|
||||
u32 ba[WME_BA_BMP_SIZE >> 5];
|
||||
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
|
||||
|
@ -422,11 +435,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&bf_pending);
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
__skb_queue_head_init(&bf_pending);
|
||||
|
||||
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
|
||||
while (bf) {
|
||||
u16 seqno = bf->bf_state.seqno;
|
||||
|
||||
txfail = txpending = sendbar = 0;
|
||||
bf_next = bf->bf_next;
|
||||
|
||||
|
@ -434,7 +448,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
tx_info = IEEE80211_SKB_CB(skb);
|
||||
fi = get_frame_info(skb);
|
||||
|
||||
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
|
||||
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
|
||||
/* transmit completion, subframe is
|
||||
* acked by block ack */
|
||||
acked_cnt++;
|
||||
|
@ -467,10 +481,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
* Make sure the last desc is reclaimed if it
|
||||
* not a holding desc.
|
||||
*/
|
||||
if (!bf_last->bf_stale || bf_next != NULL)
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
|
||||
bf_next != NULL || !bf_last->bf_stale)
|
||||
list_move_tail(&bf->list, &bf_head);
|
||||
else
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
|
||||
if (!txpending || (tid->state & AGGR_CLEANUP)) {
|
||||
/*
|
||||
|
@ -478,7 +492,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
* block-ack window
|
||||
*/
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
ath_tx_update_baw(sc, tid, fi->seqno);
|
||||
ath_tx_update_baw(sc, tid, seqno);
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
|
||||
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
|
||||
|
@ -506,7 +520,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
*/
|
||||
if (!tbf) {
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
ath_tx_update_baw(sc, tid, fi->seqno);
|
||||
ath_tx_update_baw(sc, tid, seqno);
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
|
||||
bf->bf_state.bf_type |=
|
||||
|
@ -521,7 +535,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
|
||||
ath9k_hw_cleartxdesc(sc->sc_ah,
|
||||
tbf->bf_desc);
|
||||
list_add_tail(&tbf->list, &bf_head);
|
||||
fi->bf = tbf;
|
||||
} else {
|
||||
/*
|
||||
* Clear descriptor status words for
|
||||
|
@ -536,21 +550,21 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
* Put this buffer to the temporary pending
|
||||
* queue to retain ordering
|
||||
*/
|
||||
list_splice_tail_init(&bf_head, &bf_pending);
|
||||
__skb_queue_tail(&bf_pending, skb);
|
||||
}
|
||||
|
||||
bf = bf_next;
|
||||
}
|
||||
|
||||
/* prepend un-acked frames to the beginning of the pending frame queue */
|
||||
if (!list_empty(&bf_pending)) {
|
||||
if (!skb_queue_empty(&bf_pending)) {
|
||||
if (an->sleeping)
|
||||
ieee80211_sta_set_tim(sta);
|
||||
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
if (clear_filter)
|
||||
tid->ac->clear_ps_filter = true;
|
||||
list_splice(&bf_pending, &tid->buf_q);
|
||||
skb_queue_splice(&bf_pending, &tid->buf_q);
|
||||
if (!an->sleeping)
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
|
@ -582,7 +596,10 @@ static bool ath_lookup_legacy(struct ath_buf *bf)
|
|||
tx_info = IEEE80211_SKB_CB(skb);
|
||||
rates = tx_info->control.rates;
|
||||
|
||||
for (i = 3; i >= 0; i--) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (!rates[i].count || rates[i].idx < 0)
|
||||
break;
|
||||
|
||||
if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
|
||||
return true;
|
||||
}
|
||||
|
@ -740,22 +757,33 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
|
|||
int *aggr_len)
|
||||
{
|
||||
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
|
||||
struct ath_buf *bf, *bf_first, *bf_prev = NULL;
|
||||
struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
|
||||
int rl = 0, nframes = 0, ndelim, prev_al = 0;
|
||||
u16 aggr_limit = 0, al = 0, bpad = 0,
|
||||
al_delta, h_baw = tid->baw_size / 2;
|
||||
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct ath_frame_info *fi;
|
||||
|
||||
bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
|
||||
struct sk_buff *skb;
|
||||
u16 seqno;
|
||||
|
||||
do {
|
||||
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
|
||||
fi = get_frame_info(bf->bf_mpdu);
|
||||
skb = skb_peek(&tid->buf_q);
|
||||
fi = get_frame_info(skb);
|
||||
bf = fi->bf;
|
||||
if (!fi->bf)
|
||||
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
|
||||
|
||||
if (!bf)
|
||||
continue;
|
||||
|
||||
bf->bf_state.bf_type |= BUF_AMPDU;
|
||||
seqno = bf->bf_state.seqno;
|
||||
if (!bf_first)
|
||||
bf_first = bf;
|
||||
|
||||
/* do not step over block-ack window */
|
||||
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
|
||||
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
|
||||
status = ATH_AGGR_BAW_CLOSED;
|
||||
break;
|
||||
}
|
||||
|
@ -803,9 +831,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
|
|||
|
||||
/* link buffers of this frame to the aggregate */
|
||||
if (!fi->retries)
|
||||
ath_tx_addto_baw(sc, tid, fi->seqno);
|
||||
ath_tx_addto_baw(sc, tid, seqno);
|
||||
ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
|
||||
list_move_tail(&bf->list, bf_q);
|
||||
|
||||
__skb_unlink(skb, &tid->buf_q);
|
||||
list_add_tail(&bf->list, bf_q);
|
||||
if (bf_prev) {
|
||||
bf_prev->bf_next = bf;
|
||||
ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
|
||||
|
@ -813,7 +843,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
|
|||
}
|
||||
bf_prev = bf;
|
||||
|
||||
} while (!list_empty(&tid->buf_q));
|
||||
} while (!skb_queue_empty(&tid->buf_q));
|
||||
|
||||
*aggr_len = al;
|
||||
|
||||
|
@ -831,7 +861,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
int aggr_len;
|
||||
|
||||
do {
|
||||
if (list_empty(&tid->buf_q))
|
||||
if (skb_queue_empty(&tid->buf_q))
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&bf_q);
|
||||
|
@ -952,7 +982,7 @@ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
|
|||
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
|
||||
if (!list_empty(&tid->buf_q))
|
||||
if (!skb_queue_empty(&tid->buf_q))
|
||||
buffered = true;
|
||||
|
||||
tid->sched = false;
|
||||
|
@ -985,7 +1015,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
|
|||
spin_lock_bh(&txq->axq_lock);
|
||||
ac->clear_ps_filter = true;
|
||||
|
||||
if (!list_empty(&tid->buf_q) && !tid->paused) {
|
||||
if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
ath_txq_schedule(sc, txq);
|
||||
}
|
||||
|
@ -1329,7 +1359,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
* add tid to round-robin queue if more frames
|
||||
* are pending for the tid
|
||||
*/
|
||||
if (!list_empty(&tid->buf_q))
|
||||
if (!skb_queue_empty(&tid->buf_q))
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
|
||||
if (tid == last_tid ||
|
||||
|
@ -1421,12 +1451,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
|
||||
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
|
||||
struct ath_buf *bf, struct ath_tx_control *txctl)
|
||||
struct sk_buff *skb, struct ath_tx_control *txctl)
|
||||
{
|
||||
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
|
||||
struct ath_frame_info *fi = get_frame_info(skb);
|
||||
struct list_head bf_head;
|
||||
|
||||
bf->bf_state.bf_type |= BUF_AMPDU;
|
||||
struct ath_buf *bf;
|
||||
|
||||
/*
|
||||
* Do not queue to h/w when any of the following conditions is true:
|
||||
|
@ -1435,26 +1464,30 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|||
* - seqno is not within block-ack window
|
||||
* - h/w queue depth exceeds low water mark
|
||||
*/
|
||||
if (!list_empty(&tid->buf_q) || tid->paused ||
|
||||
!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
|
||||
if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
|
||||
!BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
|
||||
txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
|
||||
/*
|
||||
* Add this frame to software queue for scheduling later
|
||||
* for aggregation.
|
||||
*/
|
||||
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
|
||||
list_add_tail(&bf->list, &tid->buf_q);
|
||||
__skb_queue_tail(&tid->buf_q, skb);
|
||||
if (!txctl->an || !txctl->an->sleeping)
|
||||
ath_tx_queue_tid(txctl->txq, tid);
|
||||
return;
|
||||
}
|
||||
|
||||
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
|
||||
if (!bf)
|
||||
return;
|
||||
|
||||
bf->bf_state.bf_type |= BUF_AMPDU;
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
list_add(&bf->list, &bf_head);
|
||||
|
||||
/* Add sub-frame to BAW */
|
||||
if (!fi->retries)
|
||||
ath_tx_addto_baw(sc, tid, fi->seqno);
|
||||
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
|
||||
|
||||
/* Queue to h/w without aggregation */
|
||||
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
|
||||
|
@ -1464,13 +1497,21 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|||
}
|
||||
|
||||
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid,
|
||||
struct list_head *bf_head)
|
||||
struct ath_atx_tid *tid, struct sk_buff *skb)
|
||||
{
|
||||
struct ath_frame_info *fi;
|
||||
struct ath_frame_info *fi = get_frame_info(skb);
|
||||
struct list_head bf_head;
|
||||
struct ath_buf *bf;
|
||||
|
||||
bf = list_first_entry(bf_head, struct ath_buf, list);
|
||||
bf = fi->bf;
|
||||
if (!bf)
|
||||
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
|
||||
|
||||
if (!bf)
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
bf->bf_state.bf_type &= ~BUF_AMPDU;
|
||||
|
||||
/* update starting sequence number for subsequent ADDBA request */
|
||||
|
@ -1478,9 +1519,8 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
|||
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
|
||||
|
||||
bf->bf_lastbf = bf;
|
||||
fi = get_frame_info(bf->bf_mpdu);
|
||||
ath_buf_set_rate(sc, bf, fi->framelen);
|
||||
ath_tx_txqaddbuf(sc, txq, bf_head, false);
|
||||
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
|
||||
TX_STAT_INC(txq->axq_qnum, queued);
|
||||
}
|
||||
|
||||
|
@ -1510,39 +1550,19 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
|
|||
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
int framelen)
|
||||
{
|
||||
struct ath_softc *sc = hw->priv;
|
||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_sta *sta = tx_info->control.sta;
|
||||
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ath_frame_info *fi = get_frame_info(skb);
|
||||
struct ath_node *an = NULL;
|
||||
struct ath_atx_tid *tid;
|
||||
enum ath9k_key_type keytype;
|
||||
u16 seqno = 0;
|
||||
u8 tidno;
|
||||
|
||||
keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
|
||||
|
||||
if (sta)
|
||||
an = (struct ath_node *) sta->drv_priv;
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
if (an && ieee80211_is_data_qos(hdr->frame_control) &&
|
||||
conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
|
||||
|
||||
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
|
||||
|
||||
/*
|
||||
* Override seqno set by upper layer with the one
|
||||
* in tx aggregation state.
|
||||
*/
|
||||
tid = ATH_AN_2_TID(an, tidno);
|
||||
seqno = tid->seq_next;
|
||||
hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
|
||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||
}
|
||||
|
||||
memset(fi, 0, sizeof(*fi));
|
||||
if (hw_key)
|
||||
fi->keyix = hw_key->hw_key_idx;
|
||||
|
@ -1552,7 +1572,6 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
fi->keyix = ATH9K_TXKEYIX_INVALID;
|
||||
fi->keytype = keytype;
|
||||
fi->framelen = framelen;
|
||||
fi->seqno = seqno;
|
||||
}
|
||||
|
||||
static int setup_tx_flags(struct sk_buff *skb)
|
||||
|
@ -1724,26 +1743,39 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
|
|||
|
||||
}
|
||||
|
||||
static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
|
||||
/*
|
||||
* Assign a descriptor (and sequence number if necessary,
|
||||
* and map buffer for DMA. Frees skb on error
|
||||
*/
|
||||
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
||||
struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
struct ath_frame_info *fi = get_frame_info(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ath_buf *bf;
|
||||
struct ath_desc *ds;
|
||||
int frm_type;
|
||||
u16 seqno;
|
||||
|
||||
bf = ath_tx_get_buffer(sc);
|
||||
if (!bf) {
|
||||
ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
|
||||
return NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ATH_TXBUF_RESET(bf);
|
||||
|
||||
if (tid) {
|
||||
seqno = tid->seq_next;
|
||||
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
|
||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||
bf->bf_state.seqno = seqno;
|
||||
}
|
||||
|
||||
bf->bf_flags = setup_tx_flags(skb);
|
||||
bf->bf_mpdu = skb;
|
||||
|
||||
|
@ -1755,7 +1787,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
|
|||
ath_err(ath9k_hw_common(sc->sc_ah),
|
||||
"dma_mapping_error() on TX\n");
|
||||
ath_tx_return_buffer(sc, bf);
|
||||
return NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
frm_type = get_hw_packet_type(skb);
|
||||
|
@ -1774,19 +1806,23 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
|
|||
bf->bf_buf_addr,
|
||||
txq->axq_qnum);
|
||||
|
||||
fi->bf = bf;
|
||||
|
||||
return bf;
|
||||
|
||||
error:
|
||||
dev_kfree_skb_any(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* FIXME: tx power */
|
||||
static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
|
||||
static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
|
||||
struct ath_tx_control *txctl)
|
||||
{
|
||||
struct sk_buff *skb = bf->bf_mpdu;
|
||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct list_head bf_head;
|
||||
struct ath_atx_tid *tid = NULL;
|
||||
struct ath_buf *bf;
|
||||
u8 tidno;
|
||||
|
||||
spin_lock_bh(&txctl->txq->axq_lock);
|
||||
|
@ -1804,10 +1840,11 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
|
|||
* Try aggregation if it's a unicast data frame
|
||||
* and the destination is HT capable.
|
||||
*/
|
||||
ath_tx_send_ampdu(sc, tid, bf, txctl);
|
||||
ath_tx_send_ampdu(sc, tid, skb, txctl);
|
||||
} else {
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
|
||||
if (!bf)
|
||||
goto out;
|
||||
|
||||
bf->bf_state.bfs_paprd = txctl->paprd;
|
||||
|
||||
|
@ -1821,9 +1858,10 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
|
|||
if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
|
||||
ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
|
||||
|
||||
ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
|
||||
ath_tx_send_normal(sc, txctl->txq, tid, skb);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&txctl->txq->axq_lock);
|
||||
}
|
||||
|
||||
|
@ -1837,7 +1875,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_txq *txq = txctl->txq;
|
||||
struct ath_buf *bf;
|
||||
int padpos, padsize;
|
||||
int frmlen = skb->len + FCS_LEN;
|
||||
int q;
|
||||
|
@ -1884,10 +1921,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
* info are no longer valid (overwritten by the ath_frame_info data.
|
||||
*/
|
||||
|
||||
bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
|
||||
if (unlikely(!bf))
|
||||
return -ENOMEM;
|
||||
|
||||
q = skb_get_queue_mapping(skb);
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
if (txq == sc->tx.txq_map[q] &&
|
||||
|
@ -1897,8 +1930,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
}
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
|
||||
ath_tx_start_dma(sc, bf, txctl);
|
||||
|
||||
ath_tx_start_dma(sc, skb, txctl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2391,7 +2423,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
|
|||
tid->sched = false;
|
||||
tid->paused = false;
|
||||
tid->state &= ~AGGR_CLEANUP;
|
||||
INIT_LIST_HEAD(&tid->buf_q);
|
||||
__skb_queue_head_init(&tid->buf_q);
|
||||
acno = TID_TO_WME_AC(tidno);
|
||||
tid->ac = &an->ac[acno];
|
||||
tid->state &= ~AGGR_ADDBA_COMPLETE;
|
||||
|
|
|
@ -57,22 +57,18 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
|
|||
}
|
||||
EXPORT_SYMBOL(ath_rxbuf_alloc);
|
||||
|
||||
int ath_printk(const char *level, struct ath_common *common,
|
||||
const char *fmt, ...)
|
||||
void ath_printk(const char *level, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
int rtn;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
rtn = printk("%sath: %pV", level, &vaf);
|
||||
printk("%sath: %pV", level, &vaf);
|
||||
|
||||
va_end(args);
|
||||
|
||||
return rtn;
|
||||
}
|
||||
EXPORT_SYMBOL(ath_printk);
|
||||
|
|
|
@ -124,12 +124,12 @@ config B43_PHY_LP
|
|||
(802.11a support is optional, and currently disabled).
|
||||
|
||||
config B43_PHY_HT
|
||||
bool "Support for HT-PHY devices (BROKEN)"
|
||||
depends on B43 && BROKEN
|
||||
bool "Support for HT-PHY (high throughput) devices (EXPERIMENTAL)"
|
||||
depends on B43 && EXPERIMENTAL
|
||||
---help---
|
||||
Support for the HT-PHY.
|
||||
|
||||
Say N, this is BROKEN and crashes driver.
|
||||
Enables support for BCM4331 and possibly other chipsets with that PHY.
|
||||
|
||||
config B43_PHY_LCN
|
||||
bool "Support for LCN-PHY devices (BROKEN)"
|
||||
|
|
|
@ -419,33 +419,34 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
|
|||
gfp_t flags = GFP_KERNEL;
|
||||
|
||||
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
|
||||
* alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
|
||||
* has shown that 4K is sufficient for the latter as long as the buffer
|
||||
* does not cross an 8K boundary.
|
||||
*
|
||||
* For unknown reasons - possibly a hardware error - the BCM4311 rev
|
||||
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
|
||||
* which accounts for the GFP_DMA flag below.
|
||||
*
|
||||
* The flags here must match the flags in free_ringmemory below!
|
||||
* alignment and 8K buffers for 64-bit DMA with 8K alignment.
|
||||
* In practice we could use smaller buffers for the latter, but the
|
||||
* alignment is really important because of the hardware bug. If bit
|
||||
* 0x00001000 is used in DMA address, some hardware (like BCM4331)
|
||||
* copies that bit into B43_DMA64_RXSTATUS and we get false values from
|
||||
* B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
|
||||
* more than 256 slots for ring.
|
||||
*/
|
||||
if (ring->type == B43_DMA_64BIT)
|
||||
flags |= GFP_DMA;
|
||||
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
|
||||
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
|
||||
|
||||
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
|
||||
B43_DMA_RINGMEMSIZE,
|
||||
&(ring->dmabase), flags);
|
||||
ring_mem_size, &(ring->dmabase),
|
||||
flags);
|
||||
if (!ring->descbase) {
|
||||
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
|
||||
memset(ring->descbase, 0, ring_mem_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_ringmemory(struct b43_dmaring *ring)
|
||||
{
|
||||
dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
|
||||
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
|
||||
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
|
||||
dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
|
||||
ring->descbase, ring->dmabase);
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,8 @@ struct b43_dmadesc_generic {
|
|||
} __packed;
|
||||
|
||||
/* Misc DMA constants */
|
||||
#define B43_DMA_RINGMEMSIZE PAGE_SIZE
|
||||
#define B43_DMA32_RINGMEMSIZE 4096
|
||||
#define B43_DMA64_RINGMEMSIZE 8192
|
||||
/* Offset of frame with actual data */
|
||||
#define B43_DMA0_RX_FW598_FO 38
|
||||
#define B43_DMA0_RX_FW351_FO 30
|
||||
|
|
|
@ -4131,10 +4131,13 @@ out_unlock:
|
|||
* because the core might be gone away while we unlocked the mutex. */
|
||||
static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
|
||||
{
|
||||
struct b43_wl *wl = dev->wl;
|
||||
struct b43_wl *wl;
|
||||
struct b43_wldev *orig_dev;
|
||||
u32 mask;
|
||||
|
||||
if (!dev)
|
||||
return NULL;
|
||||
wl = dev->wl;
|
||||
redo:
|
||||
if (!dev || b43_status(dev) < B43_STAT_STARTED)
|
||||
return dev;
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
#include "iwl-agn.h"
|
||||
#include "iwl-helpers.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-pci.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL1000_UCODE_API_MAX 6
|
||||
|
@ -76,21 +78,21 @@
|
|||
static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
|
||||
{
|
||||
/* want Celsius */
|
||||
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
|
||||
priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
|
||||
hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
|
||||
hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
|
||||
}
|
||||
|
||||
/* NIC configuration for 1000 series */
|
||||
static void iwl1000_nic_config(struct iwl_priv *priv)
|
||||
{
|
||||
/* set CSR_HW_CONFIG_REG for uCode use */
|
||||
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
|
||||
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
|
||||
|
||||
/* Setting digital SVR for 1000 card to 1.32V */
|
||||
/* locking is acquired in iwl_set_bits_mask_prph() function */
|
||||
iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG,
|
||||
iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
|
||||
APMG_SVR_DIGITAL_VOLTAGE_1_32,
|
||||
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
|
||||
}
|
||||
|
@ -127,43 +129,39 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
priv->cfg->base_params->num_of_queues =
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
priv->hw_params.scd_bc_tbls_size =
|
||||
priv->cfg->base_params->num_of_queues *
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
|
||||
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
|
||||
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
|
||||
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
|
||||
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
|
||||
|
||||
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
if (priv->cfg->rx_with_siso_diversity)
|
||||
priv->hw_params.rx_chains_num = 1;
|
||||
hw_params(priv).rx_chains_num = 1;
|
||||
else
|
||||
priv->hw_params.rx_chains_num =
|
||||
hw_params(priv).rx_chains_num =
|
||||
num_of_ant(priv->cfg->valid_rx_ant);
|
||||
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
|
||||
iwl1000_set_ct_threshold(priv);
|
||||
|
||||
/* Set initial sensitivity parameters */
|
||||
/* Set initial calibration set */
|
||||
priv->hw_params.sens = &iwl1000_sensitivity;
|
||||
priv->hw_params.calib_init_cfg =
|
||||
hw_params(priv).sens = &iwl1000_sensitivity;
|
||||
hw_params(priv).calib_init_cfg =
|
||||
BIT(IWL_CALIB_XTAL) |
|
||||
BIT(IWL_CALIB_LO) |
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_TX_IQ_PERD) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
|
||||
hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
|
||||
|
||||
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
#include "iwl-helpers.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-6000-hw.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-pci.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL2030_UCODE_API_MAX 6
|
||||
|
@ -78,8 +80,8 @@
|
|||
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
|
||||
{
|
||||
/* want Celsius */
|
||||
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
|
||||
priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
|
||||
hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
|
||||
hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
|
||||
}
|
||||
|
||||
/* NIC configuration for 2000 series */
|
||||
|
@ -88,7 +90,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
|
|||
iwl_rf_config(priv);
|
||||
|
||||
if (priv->cfg->iq_invert)
|
||||
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
|
||||
}
|
||||
|
||||
|
@ -124,44 +126,40 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
priv->cfg->base_params->num_of_queues =
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
priv->hw_params.scd_bc_tbls_size =
|
||||
priv->cfg->base_params->num_of_queues *
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
|
||||
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
|
||||
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
|
||||
priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
|
||||
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
|
||||
|
||||
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
|
||||
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
|
||||
|
||||
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
if (priv->cfg->rx_with_siso_diversity)
|
||||
priv->hw_params.rx_chains_num = 1;
|
||||
hw_params(priv).rx_chains_num = 1;
|
||||
else
|
||||
priv->hw_params.rx_chains_num =
|
||||
hw_params(priv).rx_chains_num =
|
||||
num_of_ant(priv->cfg->valid_rx_ant);
|
||||
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
|
||||
iwl2000_set_ct_threshold(priv);
|
||||
|
||||
/* Set initial sensitivity parameters */
|
||||
/* Set initial calibration set */
|
||||
priv->hw_params.sens = &iwl2000_sensitivity;
|
||||
priv->hw_params.calib_init_cfg =
|
||||
hw_params(priv).sens = &iwl2000_sensitivity;
|
||||
hw_params(priv).calib_init_cfg =
|
||||
BIT(IWL_CALIB_XTAL) |
|
||||
BIT(IWL_CALIB_LO) |
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
if (priv->cfg->need_temp_offset_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
|
||||
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -179,7 +177,7 @@ static struct iwl_lib_ops iwl2000_lib = {
|
|||
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
|
||||
EEPROM_REGULATORY_BAND_NO_HT40,
|
||||
},
|
||||
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
|
||||
.update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
|
||||
},
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
@ -200,7 +198,7 @@ static struct iwl_lib_ops iwl2030_lib = {
|
|||
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
|
||||
EEPROM_REGULATORY_BAND_NO_HT40,
|
||||
},
|
||||
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
|
||||
.update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
|
||||
},
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
@ -284,6 +282,11 @@ struct iwl_cfg iwl2000_2bg_cfg = {
|
|||
IWL_DEVICE_2000,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl2000_2bgn_d_cfg = {
|
||||
.name = "2000D Series 2x2 BGN",
|
||||
IWL_DEVICE_2000,
|
||||
};
|
||||
|
||||
#define IWL_DEVICE_2030 \
|
||||
.fw_name_pre = IWL2030_FW_PRE, \
|
||||
.ucode_api_max = IWL2030_UCODE_API_MAX, \
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-5000-hw.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-pci.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL5000_UCODE_API_MAX 5
|
||||
|
@ -68,18 +70,18 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
|
|||
|
||||
iwl_rf_config(priv);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
|
||||
/* W/A : NIC is stuck in a reset state after Early PCIe power off
|
||||
* (PCIe power is lost before PERST# is asserted),
|
||||
* causing ME FW to lose ownership and not being able to obtain it back.
|
||||
*/
|
||||
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
|
||||
iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
|
||||
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
|
||||
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
}
|
||||
|
||||
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
|
||||
|
@ -139,13 +141,13 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
|
|||
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
|
||||
iwl_temp_calib_to_offset(priv);
|
||||
|
||||
priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
|
||||
hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
|
||||
}
|
||||
|
||||
static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
|
||||
{
|
||||
/* want Celsius */
|
||||
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
|
||||
hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
|
||||
}
|
||||
|
||||
static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
|
@ -155,38 +157,34 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
priv->cfg->base_params->num_of_queues =
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
priv->hw_params.scd_bc_tbls_size =
|
||||
priv->cfg->base_params->num_of_queues *
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
|
||||
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
|
||||
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
|
||||
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
|
||||
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
|
||||
BIT(IEEE80211_BAND_5GHZ);
|
||||
|
||||
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
|
||||
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
|
||||
hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
|
||||
iwl5000_set_ct_threshold(priv);
|
||||
|
||||
/* Set initial sensitivity parameters */
|
||||
/* Set initial calibration set */
|
||||
priv->hw_params.sens = &iwl5000_sensitivity;
|
||||
priv->hw_params.calib_init_cfg =
|
||||
hw_params(priv).sens = &iwl5000_sensitivity;
|
||||
hw_params(priv).calib_init_cfg =
|
||||
BIT(IWL_CALIB_XTAL) |
|
||||
BIT(IWL_CALIB_LO) |
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_TX_IQ_PERD) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
|
||||
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -198,38 +196,34 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
|
|||
priv->cfg->base_params->num_of_queues =
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
priv->hw_params.scd_bc_tbls_size =
|
||||
priv->cfg->base_params->num_of_queues *
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
|
||||
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
|
||||
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
|
||||
|
||||
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
|
||||
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
|
||||
BIT(IEEE80211_BAND_5GHZ);
|
||||
|
||||
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
|
||||
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
|
||||
hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
|
||||
iwl5150_set_ct_threshold(priv);
|
||||
|
||||
/* Set initial sensitivity parameters */
|
||||
/* Set initial calibration set */
|
||||
priv->hw_params.sens = &iwl5150_sensitivity;
|
||||
priv->hw_params.calib_init_cfg =
|
||||
hw_params(priv).sens = &iwl5150_sensitivity;
|
||||
hw_params(priv).calib_init_cfg =
|
||||
BIT(IWL_CALIB_LO) |
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
|
||||
hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
|
||||
|
||||
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -314,7 +308,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
return trans_send_cmd(&priv->trans, &hcmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &hcmd);
|
||||
}
|
||||
|
||||
static struct iwl_lib_ops iwl5000_lib = {
|
||||
|
|
|
@ -45,6 +45,8 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-6000-hw.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-pci.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL6000_UCODE_API_MAX 4
|
||||
|
@ -74,15 +76,15 @@
|
|||
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
|
||||
{
|
||||
/* want Celsius */
|
||||
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
|
||||
priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
|
||||
hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
|
||||
hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
|
||||
}
|
||||
|
||||
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
|
||||
{
|
||||
/* Indicate calibration version to uCode. */
|
||||
if (iwlagn_eeprom_calib_version(priv) >= 6)
|
||||
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
|
||||
}
|
||||
|
||||
|
@ -90,9 +92,9 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
|
|||
{
|
||||
/* Indicate calibration version to uCode. */
|
||||
if (iwlagn_eeprom_calib_version(priv) >= 6)
|
||||
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
|
||||
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
|
||||
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_6050_1x2);
|
||||
}
|
||||
|
||||
|
@ -104,7 +106,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
|
|||
/* no locking required for register write */
|
||||
if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
|
||||
/* 2x2 IPA phy type */
|
||||
iwl_write32(priv, CSR_GP_DRIVER_REG,
|
||||
iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
|
||||
}
|
||||
/* do additional nic configuration if needed */
|
||||
|
@ -144,45 +146,41 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
priv->cfg->base_params->num_of_queues =
|
||||
iwlagn_mod_params.num_of_queues;
|
||||
|
||||
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
priv->hw_params.scd_bc_tbls_size =
|
||||
priv->cfg->base_params->num_of_queues *
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
|
||||
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
|
||||
hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
|
||||
hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
|
||||
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
|
||||
|
||||
priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
|
||||
priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
|
||||
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
|
||||
hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
|
||||
|
||||
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
|
||||
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
|
||||
BIT(IEEE80211_BAND_5GHZ);
|
||||
|
||||
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
|
||||
if (priv->cfg->rx_with_siso_diversity)
|
||||
priv->hw_params.rx_chains_num = 1;
|
||||
hw_params(priv).rx_chains_num = 1;
|
||||
else
|
||||
priv->hw_params.rx_chains_num =
|
||||
hw_params(priv).rx_chains_num =
|
||||
num_of_ant(priv->cfg->valid_rx_ant);
|
||||
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
|
||||
hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
|
||||
|
||||
iwl6000_set_ct_threshold(priv);
|
||||
|
||||
/* Set initial sensitivity parameters */
|
||||
/* Set initial calibration set */
|
||||
priv->hw_params.sens = &iwl6000_sensitivity;
|
||||
priv->hw_params.calib_init_cfg =
|
||||
hw_params(priv).sens = &iwl6000_sensitivity;
|
||||
hw_params(priv).calib_init_cfg =
|
||||
BIT(IWL_CALIB_XTAL) |
|
||||
BIT(IWL_CALIB_LO) |
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
if (priv->cfg->need_temp_offset_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
|
||||
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -255,7 +253,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
return trans_send_cmd(&priv->trans, &hcmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &hcmd);
|
||||
}
|
||||
|
||||
static struct iwl_lib_ops iwl6000_lib = {
|
||||
|
@ -272,7 +270,7 @@ static struct iwl_lib_ops iwl6000_lib = {
|
|||
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
|
||||
EEPROM_REG_BAND_52_HT40_CHANNELS
|
||||
},
|
||||
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
|
||||
.update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
|
||||
},
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
@ -294,7 +292,7 @@ static struct iwl_lib_ops iwl6030_lib = {
|
|||
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
|
||||
EEPROM_REG_BAND_52_HT40_CHANNELS
|
||||
},
|
||||
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
|
||||
.update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
|
||||
},
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
@ -395,6 +393,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
|
|||
IWL_DEVICE_6005,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6005_2agn_sff_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
|
||||
IWL_DEVICE_6005,
|
||||
.ht_params = &iwl6000_ht_params,
|
||||
};
|
||||
|
||||
#define IWL_DEVICE_6030 \
|
||||
.fw_name_pre = IWL6030_FW_PRE, \
|
||||
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
|
||||
|
|
|
@ -93,12 +93,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
|
|||
};
|
||||
|
||||
for (i = 0; i < IWL_CALIB_MAX; i++) {
|
||||
if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
|
||||
if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
|
||||
priv->calib_results[i].buf) {
|
||||
hcmd.len[0] = priv->calib_results[i].buf_len;
|
||||
hcmd.data[0] = priv->calib_results[i].buf;
|
||||
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
ret = trans_send_cmd(&priv->trans, &hcmd);
|
||||
ret = iwl_trans_send_cmd(trans(priv), &hcmd);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error %d iteration %d\n",
|
||||
ret, i);
|
||||
|
@ -174,7 +174,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
|
|||
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
|
||||
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
|
||||
struct iwl_sensitivity_data *data = NULL;
|
||||
const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
|
||||
const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
|
||||
|
||||
data = &(priv->sensitivity_data);
|
||||
|
||||
|
@ -357,7 +357,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
|
|||
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
|
||||
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
|
||||
struct iwl_sensitivity_data *data = NULL;
|
||||
const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
|
||||
const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
|
||||
|
||||
data = &(priv->sensitivity_data);
|
||||
|
||||
|
@ -484,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
|
|||
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
|
||||
sizeof(u16)*HD_TABLE_SIZE);
|
||||
|
||||
return trans_send_cmd(&priv->trans, &cmd_out);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd_out);
|
||||
}
|
||||
|
||||
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
|
||||
|
@ -573,7 +573,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
|
|||
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
|
||||
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
|
||||
|
||||
return trans_send_cmd(&priv->trans, &cmd_out);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd_out);
|
||||
}
|
||||
|
||||
void iwl_init_sensitivity(struct iwl_priv *priv)
|
||||
|
@ -581,7 +581,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
|
|||
int ret = 0;
|
||||
int i;
|
||||
struct iwl_sensitivity_data *data = NULL;
|
||||
const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
|
||||
const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
|
||||
|
||||
if (priv->disable_sens_cal)
|
||||
return;
|
||||
|
@ -658,13 +658,13 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
rx_info = &priv->statistics.rx_non_phy;
|
||||
ofdm = &priv->statistics.rx_ofdm;
|
||||
cck = &priv->statistics.rx_cck;
|
||||
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
|
||||
IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -688,7 +688,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
|
|||
statis.beacon_energy_c =
|
||||
le32_to_cpu(rx_info->beacon_energy_c);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
|
||||
|
||||
|
@ -821,21 +821,21 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
|
|||
* To be safe, simply mask out any chains that we know
|
||||
* are not on the device.
|
||||
*/
|
||||
active_chains &= priv->hw_params.valid_rx_ant;
|
||||
active_chains &= hw_params(priv).valid_rx_ant;
|
||||
|
||||
num_tx_chains = 0;
|
||||
for (i = 0; i < NUM_RX_CHAINS; i++) {
|
||||
/* loops on all the bits of
|
||||
* priv->hw_setting.valid_tx_ant */
|
||||
u8 ant_msk = (1 << i);
|
||||
if (!(priv->hw_params.valid_tx_ant & ant_msk))
|
||||
if (!(hw_params(priv).valid_tx_ant & ant_msk))
|
||||
continue;
|
||||
|
||||
num_tx_chains++;
|
||||
if (data->disconn_array[i] == 0)
|
||||
/* there is a Tx antenna connected */
|
||||
break;
|
||||
if (num_tx_chains == priv->hw_params.tx_chains_num &&
|
||||
if (num_tx_chains == hw_params(priv).tx_chains_num &&
|
||||
data->disconn_array[i]) {
|
||||
/*
|
||||
* If all chains are disconnected
|
||||
|
@ -852,12 +852,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
|
|||
}
|
||||
}
|
||||
|
||||
if (active_chains != priv->hw_params.valid_rx_ant &&
|
||||
if (active_chains != hw_params(priv).valid_rx_ant &&
|
||||
active_chains != priv->chain_noise_data.active_chains)
|
||||
IWL_DEBUG_CALIB(priv,
|
||||
"Detected that not all antennas are connected! "
|
||||
"Connected: %#x, valid: %#x.\n",
|
||||
active_chains, priv->hw_params.valid_rx_ant);
|
||||
active_chains,
|
||||
hw_params(priv).valid_rx_ant);
|
||||
|
||||
/* Save for use within RXON, TX, SCAN commands, etc. */
|
||||
data->active_chains = active_chains;
|
||||
|
@ -917,7 +918,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
|
|||
priv->phy_calib_chain_noise_gain_cmd);
|
||||
cmd.delta_gain_1 = data->delta_gain_code[1];
|
||||
cmd.delta_gain_2 = data->delta_gain_code[2];
|
||||
trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
|
||||
iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
|
||||
data->radio_write = 1;
|
||||
|
@ -975,13 +976,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
|
||||
rx_info = &priv->statistics.rx_non_phy;
|
||||
|
||||
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
|
||||
IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -996,7 +997,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
|
|||
if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
|
||||
IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
|
||||
rxon_chnum, rxon_band24);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1015,7 +1016,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
|
|||
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
|
||||
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
|
||||
data->beacon_count++;
|
||||
|
||||
|
@ -1046,7 +1047,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
|
|||
priv->cfg->bt_params->advanced_bt_coexist) {
|
||||
/* Disable disconnected antenna algorithm for advanced
|
||||
bt coex, assuming valid antennas are connected */
|
||||
data->active_chains = priv->hw_params.valid_rx_ant;
|
||||
data->active_chains = hw_params(priv).valid_rx_ant;
|
||||
for (i = 0; i < NUM_RX_CHAINS; i++)
|
||||
if (!(data->active_chains & (1<<i)))
|
||||
data->disconn_array[i] = 1;
|
||||
|
|
|
@ -195,7 +195,7 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
|
|||
}
|
||||
|
||||
static void
|
||||
iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
|
||||
iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
|
||||
struct iwl_eeprom_enhanced_txpwr *txp,
|
||||
s8 max_txpower_avg)
|
||||
{
|
||||
|
@ -235,7 +235,7 @@ iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
|
|||
#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
|
||||
? # x " " : "")
|
||||
|
||||
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
||||
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
|
||||
int idx, entries;
|
||||
|
@ -294,6 +294,6 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
|||
if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
|
||||
priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
|
||||
|
||||
iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
|
||||
iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,17 +95,4 @@
|
|||
#define IWLAGN_NUM_AMPDU_QUEUES 9
|
||||
#define IWLAGN_FIRST_AMPDU_QUEUE 11
|
||||
|
||||
/* Fixed (non-configurable) rx data from phy */
|
||||
|
||||
/**
|
||||
* struct iwlagn_schedq_bc_tbl scheduler byte count table
|
||||
* base physical address provided by SCD_DRAM_BASE_ADDR
|
||||
* @tfd_offset 0-12 - tx command byte count
|
||||
* 12-16 - station index
|
||||
*/
|
||||
struct iwlagn_scd_bc_tbl {
|
||||
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
|
||||
} __packed;
|
||||
|
||||
|
||||
#endif /* __iwl_agn_hw_h__ */
|
||||
|
|
|
@ -40,449 +40,7 @@
|
|||
#include "iwl-agn.h"
|
||||
#include "iwl-sta.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
|
||||
{
|
||||
return le32_to_cpup((__le32 *)&tx_resp->status +
|
||||
tx_resp->frame_count) & MAX_SN;
|
||||
}
|
||||
|
||||
static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
|
||||
{
|
||||
status &= TX_STATUS_MSK;
|
||||
|
||||
switch (status) {
|
||||
case TX_STATUS_POSTPONE_DELAY:
|
||||
priv->reply_tx_stats.pp_delay++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_FEW_BYTES:
|
||||
priv->reply_tx_stats.pp_few_bytes++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_BT_PRIO:
|
||||
priv->reply_tx_stats.pp_bt_prio++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_QUIET_PERIOD:
|
||||
priv->reply_tx_stats.pp_quiet_period++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_CALC_TTAK:
|
||||
priv->reply_tx_stats.pp_calc_ttak++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
|
||||
priv->reply_tx_stats.int_crossed_retry++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_SHORT_LIMIT:
|
||||
priv->reply_tx_stats.short_limit++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_LONG_LIMIT:
|
||||
priv->reply_tx_stats.long_limit++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FIFO_UNDERRUN:
|
||||
priv->reply_tx_stats.fifo_underrun++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DRAIN_FLOW:
|
||||
priv->reply_tx_stats.drain_flow++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_RFKILL_FLUSH:
|
||||
priv->reply_tx_stats.rfkill_flush++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_LIFE_EXPIRE:
|
||||
priv->reply_tx_stats.life_expire++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DEST_PS:
|
||||
priv->reply_tx_stats.dest_ps++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_HOST_ABORTED:
|
||||
priv->reply_tx_stats.host_abort++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_BT_RETRY:
|
||||
priv->reply_tx_stats.bt_retry++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_STA_INVALID:
|
||||
priv->reply_tx_stats.sta_invalid++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FRAG_DROPPED:
|
||||
priv->reply_tx_stats.frag_drop++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_TID_DISABLE:
|
||||
priv->reply_tx_stats.tid_disable++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FIFO_FLUSHED:
|
||||
priv->reply_tx_stats.fifo_flush++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
|
||||
priv->reply_tx_stats.insuff_cf_poll++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_PASSIVE_NO_RX:
|
||||
priv->reply_tx_stats.fail_hw_drop++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
|
||||
priv->reply_tx_stats.sta_color_mismatch++;
|
||||
break;
|
||||
default:
|
||||
priv->reply_tx_stats.unknown++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
|
||||
{
|
||||
status &= AGG_TX_STATUS_MSK;
|
||||
|
||||
switch (status) {
|
||||
case AGG_TX_STATE_UNDERRUN_MSK:
|
||||
priv->reply_agg_tx_stats.underrun++;
|
||||
break;
|
||||
case AGG_TX_STATE_BT_PRIO_MSK:
|
||||
priv->reply_agg_tx_stats.bt_prio++;
|
||||
break;
|
||||
case AGG_TX_STATE_FEW_BYTES_MSK:
|
||||
priv->reply_agg_tx_stats.few_bytes++;
|
||||
break;
|
||||
case AGG_TX_STATE_ABORT_MSK:
|
||||
priv->reply_agg_tx_stats.abort++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_TTL_MSK:
|
||||
priv->reply_agg_tx_stats.last_sent_ttl++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
|
||||
priv->reply_agg_tx_stats.last_sent_try++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
|
||||
priv->reply_agg_tx_stats.last_sent_bt_kill++;
|
||||
break;
|
||||
case AGG_TX_STATE_SCD_QUERY_MSK:
|
||||
priv->reply_agg_tx_stats.scd_query++;
|
||||
break;
|
||||
case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
|
||||
priv->reply_agg_tx_stats.bad_crc32++;
|
||||
break;
|
||||
case AGG_TX_STATE_RESPONSE_MSK:
|
||||
priv->reply_agg_tx_stats.response++;
|
||||
break;
|
||||
case AGG_TX_STATE_DUMP_TX_MSK:
|
||||
priv->reply_agg_tx_stats.dump_tx++;
|
||||
break;
|
||||
case AGG_TX_STATE_DELAY_TX_MSK:
|
||||
priv->reply_agg_tx_stats.delay_tx++;
|
||||
break;
|
||||
default:
|
||||
priv->reply_agg_tx_stats.unknown++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void iwlagn_set_tx_status(struct iwl_priv *priv,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct iwlagn_tx_resp *tx_resp,
|
||||
int txq_id, bool is_agg)
|
||||
{
|
||||
u16 status = le16_to_cpu(tx_resp->status.status);
|
||||
|
||||
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
||||
if (is_agg)
|
||||
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
||||
info->flags |= iwl_tx_status_to_mac80211(status);
|
||||
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
|
||||
info);
|
||||
if (!iwl_is_tx_success(status))
|
||||
iwlagn_count_tx_err_status(priv, status);
|
||||
|
||||
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
|
||||
iwl_is_associated_ctx(ctx) && ctx->vif &&
|
||||
ctx->vif->type == NL80211_IFTYPE_STATION) {
|
||||
ctx->last_tx_rejected = true;
|
||||
iwl_stop_queue(priv, &priv->txq[txq_id]);
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
|
||||
"0x%x retries %d\n",
|
||||
txq_id,
|
||||
iwl_get_tx_fail_reason(status), status,
|
||||
le32_to_cpu(tx_resp->rate_n_flags),
|
||||
tx_resp->failure_frame);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
|
||||
|
||||
const char *iwl_get_agg_tx_fail_reason(u16 status)
|
||||
{
|
||||
status &= AGG_TX_STATUS_MSK;
|
||||
switch (status) {
|
||||
case AGG_TX_STATE_TRANSMITTED:
|
||||
return "SUCCESS";
|
||||
AGG_TX_STATE_FAIL(UNDERRUN_MSK);
|
||||
AGG_TX_STATE_FAIL(BT_PRIO_MSK);
|
||||
AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
|
||||
AGG_TX_STATE_FAIL(ABORT_MSK);
|
||||
AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
|
||||
AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
|
||||
AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
|
||||
AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
|
||||
AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
|
||||
AGG_TX_STATE_FAIL(RESPONSE_MSK);
|
||||
AGG_TX_STATE_FAIL(DUMP_TX_MSK);
|
||||
AGG_TX_STATE_FAIL(DELAY_TX_MSK);
|
||||
}
|
||||
|
||||
return "UNKNOWN";
|
||||
}
|
||||
#endif /* CONFIG_IWLWIFI_DEBUG */
|
||||
|
||||
static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
|
||||
struct iwl_ht_agg *agg,
|
||||
struct iwlagn_tx_resp *tx_resp,
|
||||
int txq_id, u16 start_idx)
|
||||
{
|
||||
u16 status;
|
||||
struct agg_tx_status *frame_status = &tx_resp->status;
|
||||
struct ieee80211_hdr *hdr = NULL;
|
||||
int i, sh, idx;
|
||||
u16 seq;
|
||||
|
||||
if (agg->wait_for_ba)
|
||||
IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
|
||||
|
||||
agg->frame_count = tx_resp->frame_count;
|
||||
agg->start_idx = start_idx;
|
||||
agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
|
||||
agg->bitmap = 0;
|
||||
|
||||
/* # frames attempted by Tx command */
|
||||
if (agg->frame_count == 1) {
|
||||
struct iwl_tx_info *txb;
|
||||
|
||||
/* Only one frame was attempted; no block-ack will arrive */
|
||||
idx = start_idx;
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
|
||||
agg->frame_count, agg->start_idx, idx);
|
||||
txb = &priv->txq[txq_id].txb[idx];
|
||||
iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
|
||||
txb->ctx, tx_resp, txq_id, true);
|
||||
agg->wait_for_ba = 0;
|
||||
} else {
|
||||
/* Two or more frames were attempted; expect block-ack */
|
||||
u64 bitmap = 0;
|
||||
|
||||
/*
|
||||
* Start is the lowest frame sent. It may not be the first
|
||||
* frame in the batch; we figure this out dynamically during
|
||||
* the following loop.
|
||||
*/
|
||||
int start = agg->start_idx;
|
||||
|
||||
/* Construct bit-map of pending frames within Tx window */
|
||||
for (i = 0; i < agg->frame_count; i++) {
|
||||
u16 sc;
|
||||
status = le16_to_cpu(frame_status[i].status);
|
||||
seq = le16_to_cpu(frame_status[i].sequence);
|
||||
idx = SEQ_TO_INDEX(seq);
|
||||
txq_id = SEQ_TO_QUEUE(seq);
|
||||
|
||||
if (status & AGG_TX_STATUS_MSK)
|
||||
iwlagn_count_agg_tx_err_status(priv, status);
|
||||
|
||||
if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
|
||||
AGG_TX_STATE_ABORT_MSK))
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
|
||||
agg->frame_count, txq_id, idx);
|
||||
IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
|
||||
"try-count (0x%08x)\n",
|
||||
iwl_get_agg_tx_fail_reason(status),
|
||||
status & AGG_TX_STATUS_MSK,
|
||||
status & AGG_TX_TRY_MSK);
|
||||
|
||||
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
|
||||
if (!hdr) {
|
||||
IWL_ERR(priv,
|
||||
"BUG_ON idx doesn't point to valid skb"
|
||||
" idx=%d, txq_id=%d\n", idx, txq_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
sc = le16_to_cpu(hdr->seq_ctrl);
|
||||
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
|
||||
IWL_ERR(priv,
|
||||
"BUG_ON idx doesn't match seq control"
|
||||
" idx=%d, seq_idx=%d, seq=%d\n",
|
||||
idx, SEQ_TO_SN(sc),
|
||||
hdr->seq_ctrl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
|
||||
i, idx, SEQ_TO_SN(sc));
|
||||
|
||||
/*
|
||||
* sh -> how many frames ahead of the starting frame is
|
||||
* the current one?
|
||||
*
|
||||
* Note that all frames sent in the batch must be in a
|
||||
* 64-frame window, so this number should be in [0,63].
|
||||
* If outside of this window, then we've found a new
|
||||
* "first" frame in the batch and need to change start.
|
||||
*/
|
||||
sh = idx - start;
|
||||
|
||||
/*
|
||||
* If >= 64, out of window. start must be at the front
|
||||
* of the circular buffer, idx must be near the end of
|
||||
* the buffer, and idx is the new "first" frame. Shift
|
||||
* the indices around.
|
||||
*/
|
||||
if (sh >= 64) {
|
||||
/* Shift bitmap by start - idx, wrapped */
|
||||
sh = 0x100 - idx + start;
|
||||
bitmap = bitmap << sh;
|
||||
/* Now idx is the new start so sh = 0 */
|
||||
sh = 0;
|
||||
start = idx;
|
||||
/*
|
||||
* If <= -64 then wraps the 256-pkt circular buffer
|
||||
* (e.g., start = 255 and idx = 0, sh should be 1)
|
||||
*/
|
||||
} else if (sh <= -64) {
|
||||
sh = 0x100 - start + idx;
|
||||
/*
|
||||
* If < 0 but > -64, out of window. idx is before start
|
||||
* but not wrapped. Shift the indices around.
|
||||
*/
|
||||
} else if (sh < 0) {
|
||||
/* Shift by how far start is ahead of idx */
|
||||
sh = start - idx;
|
||||
bitmap = bitmap << sh;
|
||||
/* Now idx is the new start so sh = 0 */
|
||||
start = idx;
|
||||
sh = 0;
|
||||
}
|
||||
/* Sequence number start + sh was sent in this batch */
|
||||
bitmap |= 1ULL << sh;
|
||||
IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
|
||||
start, (unsigned long long)bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the bitmap and possibly the new start, if we wrapped
|
||||
* the buffer above
|
||||
*/
|
||||
agg->bitmap = bitmap;
|
||||
agg->start_idx = start;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
|
||||
agg->frame_count, agg->start_idx,
|
||||
(unsigned long long)agg->bitmap);
|
||||
|
||||
if (bitmap)
|
||||
agg->wait_for_ba = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_check_abort_status(struct iwl_priv *priv,
|
||||
u8 frame_count, u32 status)
|
||||
{
|
||||
if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
|
||||
IWL_ERR(priv, "Tx flush command to flush out all frames\n");
|
||||
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
queue_work(priv->workqueue, &priv->tx_flush);
|
||||
}
|
||||
}
|
||||
|
||||
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
||||
int txq_id = SEQ_TO_QUEUE(sequence);
|
||||
int index = SEQ_TO_INDEX(sequence);
|
||||
struct iwl_tx_queue *txq = &priv->txq[txq_id];
|
||||
struct ieee80211_tx_info *info;
|
||||
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct iwl_tx_info *txb;
|
||||
u32 status = le16_to_cpu(tx_resp->status.status);
|
||||
int tid;
|
||||
int sta_id;
|
||||
int freed;
|
||||
unsigned long flags;
|
||||
|
||||
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
|
||||
IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
|
||||
"index %d is out of range [0-%d] %d %d\n", __func__,
|
||||
txq_id, index, txq->q.n_bd, txq->q.write_ptr,
|
||||
txq->q.read_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
txq->time_stamp = jiffies;
|
||||
txb = &txq->txb[txq->q.read_ptr];
|
||||
info = IEEE80211_SKB_CB(txb->skb);
|
||||
memset(&info->status, 0, sizeof(info->status));
|
||||
|
||||
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
|
||||
IWLAGN_TX_RES_TID_POS;
|
||||
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
|
||||
IWLAGN_TX_RES_RA_POS;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
hdr = (void *)txb->skb->data;
|
||||
if (!ieee80211_is_data_qos(hdr->frame_control))
|
||||
priv->last_seq_ctl = tx_resp->seq_ctl;
|
||||
|
||||
if (txq->sched_retry) {
|
||||
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
|
||||
struct iwl_ht_agg *agg;
|
||||
|
||||
agg = &priv->stations[sta_id].tid[tid].agg;
|
||||
/*
|
||||
* If the BT kill count is non-zero, we'll get this
|
||||
* notification again.
|
||||
*/
|
||||
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
|
||||
priv->cfg->bt_params &&
|
||||
priv->cfg->bt_params->advanced_bt_coexist) {
|
||||
IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
|
||||
}
|
||||
iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
|
||||
|
||||
/* check if BAR is needed */
|
||||
if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
|
||||
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
||||
|
||||
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
|
||||
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
|
||||
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
|
||||
"scd_ssn=%d idx=%d txq=%d swq=%d\n",
|
||||
scd_ssn , index, txq_id, txq->swq_id);
|
||||
|
||||
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
|
||||
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
||||
|
||||
if (priv->mac80211_registered &&
|
||||
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
|
||||
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
|
||||
iwl_wake_queue(priv, txq);
|
||||
}
|
||||
} else {
|
||||
iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
|
||||
txq_id, false);
|
||||
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
|
||||
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
||||
|
||||
if (priv->mac80211_registered &&
|
||||
iwl_queue_space(&txq->q) > txq->q.low_mark &&
|
||||
status != TX_STATUS_FAIL_PASSIVE_NO_RX)
|
||||
iwl_wake_queue(priv, txq);
|
||||
}
|
||||
|
||||
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
|
||||
|
||||
iwl_check_abort_status(priv, tx_resp->frame_count, status);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
}
|
||||
#include "iwl-shared.h"
|
||||
|
||||
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
|
||||
{
|
||||
|
@ -495,7 +53,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
|
|||
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
|
||||
u8 tx_ant_cfg_cmd;
|
||||
|
||||
if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
|
||||
if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
|
||||
"TX Power requested while scanning!\n"))
|
||||
return -EAGAIN;
|
||||
|
||||
|
@ -525,7 +83,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
|
|||
else
|
||||
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
|
||||
|
||||
return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
|
||||
return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
|
||||
sizeof(tx_power_cmd), &tx_power_cmd);
|
||||
}
|
||||
|
||||
|
@ -609,6 +167,9 @@ struct iwl_mod_params iwlagn_mod_params = {
|
|||
.bt_coex_active = true,
|
||||
.no_sleep_autoadjust = true,
|
||||
.power_level = IWL_POWER_INDEX_1,
|
||||
.bt_ch_announce = true,
|
||||
.wanted_ucode_alternative = 1,
|
||||
.auto_agg = true,
|
||||
/* the rest are 0 by default */
|
||||
};
|
||||
|
||||
|
@ -767,15 +328,15 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
u16 rx_chain = 0;
|
||||
enum ieee80211_band band;
|
||||
u8 n_probes = 0;
|
||||
u8 rx_ant = priv->hw_params.valid_rx_ant;
|
||||
u8 rx_ant = hw_params(priv).valid_rx_ant;
|
||||
u8 rate;
|
||||
bool is_active = false;
|
||||
int chan_mod;
|
||||
u8 active_chains;
|
||||
u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
|
||||
u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (vif)
|
||||
ctx = iwl_rxon_ctx_from_vif(vif);
|
||||
|
@ -942,7 +503,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
|
||||
|
||||
/* In power save mode use one chain, otherwise use all chains */
|
||||
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
|
||||
if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
|
||||
/* rx_ant has been set to all valid chains previously */
|
||||
active_chains = rx_ant &
|
||||
((u8)(priv->chain_noise_data.active_chains));
|
||||
|
@ -962,7 +523,8 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
}
|
||||
|
||||
/* MIMO is not used here, but value is required */
|
||||
rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
|
||||
rx_chain |=
|
||||
hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
|
||||
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
|
||||
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
|
||||
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
|
||||
|
@ -1044,15 +606,15 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
scan->len = cpu_to_le16(cmd.len[0]);
|
||||
|
||||
/* set scan bit here for PAN params */
|
||||
set_bit(STATUS_SCAN_HW, &priv->status);
|
||||
set_bit(STATUS_SCAN_HW, &priv->shrd->status);
|
||||
|
||||
ret = iwlagn_set_pan_params(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
ret = iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
if (ret) {
|
||||
clear_bit(STATUS_SCAN_HW, &priv->status);
|
||||
clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
|
||||
iwlagn_set_pan_params(priv);
|
||||
}
|
||||
|
||||
|
@ -1072,52 +634,6 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
|
|||
vif->bss_conf.bssid);
|
||||
}
|
||||
|
||||
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
|
||||
int sta_id, int tid, int freed)
|
||||
{
|
||||
lockdep_assert_held(&priv->sta_lock);
|
||||
|
||||
if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
|
||||
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
|
||||
else {
|
||||
IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
|
||||
priv->stations[sta_id].tid[tid].tfds_in_queue,
|
||||
freed);
|
||||
priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#define IWL_FLUSH_WAIT_MS 2000
|
||||
|
||||
int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_tx_queue *txq;
|
||||
struct iwl_queue *q;
|
||||
int cnt;
|
||||
unsigned long now = jiffies;
|
||||
int ret = 0;
|
||||
|
||||
/* waiting for all the tx frames complete might take a while */
|
||||
for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
|
||||
if (cnt == priv->cmd_queue)
|
||||
continue;
|
||||
txq = &priv->txq[cnt];
|
||||
q = &txq->q;
|
||||
while (q->read_ptr != q->write_ptr && !time_after(jiffies,
|
||||
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
|
||||
msleep(1);
|
||||
|
||||
if (q->read_ptr != q->write_ptr) {
|
||||
IWL_ERR(priv, "fail to flush all tx fifo queues\n");
|
||||
ret = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define IWL_TX_QUEUE_MSK 0xfffff
|
||||
|
||||
/**
|
||||
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
|
||||
*
|
||||
|
@ -1156,22 +672,22 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
|||
flush_cmd.fifo_control);
|
||||
flush_cmd.flush_control = cpu_to_le16(flush_control);
|
||||
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
}
|
||||
|
||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
||||
{
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
ieee80211_stop_queues(priv->hw);
|
||||
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
|
||||
IWL_ERR(priv, "flush request fail\n");
|
||||
goto done;
|
||||
}
|
||||
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
|
||||
iwlagn_wait_tx_queue_empty(priv);
|
||||
iwl_trans_wait_tx_queue_empty(trans(priv));
|
||||
done:
|
||||
ieee80211_wake_queues(priv->hw);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1350,12 +866,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
|
|||
if (priv->cfg->bt_params->bt_session_2) {
|
||||
memcpy(&bt_cmd_2000.basic, &basic,
|
||||
sizeof(basic));
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
|
||||
} else {
|
||||
memcpy(&bt_cmd_6000.basic, &basic,
|
||||
sizeof(basic));
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
|
||||
}
|
||||
if (ret)
|
||||
|
@ -1368,7 +884,7 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
|
|||
struct iwl_rxon_context *ctx, *found_ctx = NULL;
|
||||
bool found_ap = false;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
/* Check whether AP or GO mode is active. */
|
||||
if (rssi_ena) {
|
||||
|
@ -1481,7 +997,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
/*
|
||||
* We can not send command to firmware while scanning. When the scan
|
||||
|
@ -1490,7 +1006,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
|
|||
* STATUS_SCANNING to avoid race when queue_work two times from
|
||||
* different notifications, but quit and not perform any work at all.
|
||||
*/
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status))
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
|
||||
goto out;
|
||||
|
||||
iwl_update_chain_flags(priv);
|
||||
|
@ -1509,7 +1025,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
|
|||
*/
|
||||
iwlagn_bt_coex_rssi_monitor(priv);
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1616,7 +1132,7 @@ static void iwlagn_set_kill_msk(struct iwl_priv *priv,
|
|||
priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
|
||||
|
||||
/* schedule to send runtime bt_config */
|
||||
queue_work(priv->workqueue, &priv->bt_runtime_config);
|
||||
queue_work(priv->shrd->workqueue, &priv->bt_runtime_config);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1660,7 +1176,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
|
|||
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
|
||||
}
|
||||
priv->bt_status = coex->bt_status;
|
||||
queue_work(priv->workqueue,
|
||||
queue_work(priv->shrd->workqueue,
|
||||
&priv->bt_traffic_change_work);
|
||||
}
|
||||
}
|
||||
|
@ -1669,9 +1185,9 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
|
|||
|
||||
/* FIXME: based on notification, adjust the prio_boost */
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
priv->bt_ci_compliance = coex->bt_ci_compliance;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
}
|
||||
|
||||
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
|
||||
|
@ -1771,7 +1287,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
|
|||
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||
{
|
||||
bool is_single = is_single_rx_stream(priv);
|
||||
bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
|
||||
bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
||||
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
|
||||
u32 active_chains;
|
||||
u16 rx_chain;
|
||||
|
@ -1783,7 +1299,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
if (priv->chain_noise_data.active_chains)
|
||||
active_chains = priv->chain_noise_data.active_chains;
|
||||
else
|
||||
active_chains = priv->hw_params.valid_rx_ant;
|
||||
active_chains = hw_params(priv).valid_rx_ant;
|
||||
|
||||
if (priv->cfg->bt_params &&
|
||||
priv->cfg->bt_params->advanced_bt_coexist &&
|
||||
|
@ -1848,136 +1364,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
|
|||
return ant;
|
||||
}
|
||||
|
||||
static const char *get_csr_string(int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
IWL_CMD(CSR_HW_IF_CONFIG_REG);
|
||||
IWL_CMD(CSR_INT_COALESCING);
|
||||
IWL_CMD(CSR_INT);
|
||||
IWL_CMD(CSR_INT_MASK);
|
||||
IWL_CMD(CSR_FH_INT_STATUS);
|
||||
IWL_CMD(CSR_GPIO_IN);
|
||||
IWL_CMD(CSR_RESET);
|
||||
IWL_CMD(CSR_GP_CNTRL);
|
||||
IWL_CMD(CSR_HW_REV);
|
||||
IWL_CMD(CSR_EEPROM_REG);
|
||||
IWL_CMD(CSR_EEPROM_GP);
|
||||
IWL_CMD(CSR_OTP_GP_REG);
|
||||
IWL_CMD(CSR_GIO_REG);
|
||||
IWL_CMD(CSR_GP_UCODE_REG);
|
||||
IWL_CMD(CSR_GP_DRIVER_REG);
|
||||
IWL_CMD(CSR_UCODE_DRV_GP1);
|
||||
IWL_CMD(CSR_UCODE_DRV_GP2);
|
||||
IWL_CMD(CSR_LED_REG);
|
||||
IWL_CMD(CSR_DRAM_INT_TBL_REG);
|
||||
IWL_CMD(CSR_GIO_CHICKEN_BITS);
|
||||
IWL_CMD(CSR_ANA_PLL_CFG);
|
||||
IWL_CMD(CSR_HW_REV_WA_REG);
|
||||
IWL_CMD(CSR_DBG_HPET_MEM_REG);
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_dump_csr(struct iwl_priv *priv)
|
||||
{
|
||||
int i;
|
||||
static const u32 csr_tbl[] = {
|
||||
CSR_HW_IF_CONFIG_REG,
|
||||
CSR_INT_COALESCING,
|
||||
CSR_INT,
|
||||
CSR_INT_MASK,
|
||||
CSR_FH_INT_STATUS,
|
||||
CSR_GPIO_IN,
|
||||
CSR_RESET,
|
||||
CSR_GP_CNTRL,
|
||||
CSR_HW_REV,
|
||||
CSR_EEPROM_REG,
|
||||
CSR_EEPROM_GP,
|
||||
CSR_OTP_GP_REG,
|
||||
CSR_GIO_REG,
|
||||
CSR_GP_UCODE_REG,
|
||||
CSR_GP_DRIVER_REG,
|
||||
CSR_UCODE_DRV_GP1,
|
||||
CSR_UCODE_DRV_GP2,
|
||||
CSR_LED_REG,
|
||||
CSR_DRAM_INT_TBL_REG,
|
||||
CSR_GIO_CHICKEN_BITS,
|
||||
CSR_ANA_PLL_CFG,
|
||||
CSR_HW_REV_WA_REG,
|
||||
CSR_DBG_HPET_MEM_REG
|
||||
};
|
||||
IWL_ERR(priv, "CSR values:\n");
|
||||
IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
|
||||
"CSR_INT_PERIODIC_REG)\n");
|
||||
for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
|
||||
IWL_ERR(priv, " %25s: 0X%08x\n",
|
||||
get_csr_string(csr_tbl[i]),
|
||||
iwl_read32(priv, csr_tbl[i]));
|
||||
}
|
||||
}
|
||||
|
||||
static const char *get_fh_string(int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
|
||||
IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
|
||||
IWL_CMD(FH_RSCSR_CHNL0_WPTR);
|
||||
IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
|
||||
IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
|
||||
IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
|
||||
IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
|
||||
IWL_CMD(FH_TSSR_TX_STATUS_REG);
|
||||
IWL_CMD(FH_TSSR_TX_ERROR_REG);
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
|
||||
{
|
||||
int i;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
int pos = 0;
|
||||
size_t bufsz = 0;
|
||||
#endif
|
||||
static const u32 fh_tbl[] = {
|
||||
FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
FH_RSCSR_CHNL0_WPTR,
|
||||
FH_MEM_RCSR_CHNL0_CONFIG_REG,
|
||||
FH_MEM_RSSR_SHARED_CTRL_REG,
|
||||
FH_MEM_RSSR_RX_STATUS_REG,
|
||||
FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
|
||||
FH_TSSR_TX_STATUS_REG,
|
||||
FH_TSSR_TX_ERROR_REG
|
||||
};
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (display) {
|
||||
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
*buf = kmalloc(bufsz, GFP_KERNEL);
|
||||
if (!*buf)
|
||||
return -ENOMEM;
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
"FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
" %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(priv, fh_tbl[i]));
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
#endif
|
||||
IWL_ERR(priv, "FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
IWL_ERR(priv, " %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(priv, fh_tbl[i]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* notification wait support */
|
||||
void iwlagn_init_notification_wait(struct iwl_priv *priv,
|
||||
struct iwl_notification_wait *wait_entry,
|
||||
|
|
|
@ -297,10 +297,10 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
|
|||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
tid = qc[0] & 0xf;
|
||||
} else
|
||||
return MAX_TID_COUNT;
|
||||
return IWL_MAX_TID_COUNT;
|
||||
|
||||
if (unlikely(tid >= TID_MAX_LOAD_COUNT))
|
||||
return MAX_TID_COUNT;
|
||||
return IWL_MAX_TID_COUNT;
|
||||
|
||||
tl = &lq_data->load[tid];
|
||||
|
||||
|
@ -313,7 +313,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
|
|||
tl->queue_count = 1;
|
||||
tl->head = 0;
|
||||
tl->packet_count[0] = 1;
|
||||
return MAX_TID_COUNT;
|
||||
return IWL_MAX_TID_COUNT;
|
||||
}
|
||||
|
||||
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
|
||||
|
@ -420,7 +420,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
|
|||
|
||||
load = rs_tl_get_load(lq_data, tid);
|
||||
|
||||
if (load > IWL_AGG_LOAD_THRESHOLD) {
|
||||
if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
|
||||
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
|
||||
sta->addr, tid);
|
||||
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
|
||||
|
@ -819,7 +819,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
|
|||
|
||||
if (num_of_ant(tbl->ant_type) > 1)
|
||||
tbl->ant_type =
|
||||
first_antenna(priv->hw_params.valid_tx_ant);
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
|
||||
tbl->is_ht40 = 0;
|
||||
tbl->is_SGI = 0;
|
||||
|
@ -877,12 +877,12 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
* Is there a need to switch between
|
||||
* full concurrency and 3-wire?
|
||||
*/
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
|
||||
full_concurrent = true;
|
||||
else
|
||||
full_concurrent = false;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
}
|
||||
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
|
||||
(priv->bt_full_concurrent != full_concurrent)) {
|
||||
|
@ -893,7 +893,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
|
||||
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
|
||||
|
||||
queue_work(priv->workqueue, &priv->bt_full_concurrency);
|
||||
queue_work(priv->shrd->workqueue, &priv->bt_full_concurrency);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1293,7 +1293,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
|
|||
return -1;
|
||||
|
||||
/* Need both Tx chains/antennas to support MIMO */
|
||||
if (priv->hw_params.tx_chains_num < 2)
|
||||
if (hw_params(priv).tx_chains_num < 2)
|
||||
return -1;
|
||||
|
||||
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
|
||||
|
@ -1349,7 +1349,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
|
|||
return -1;
|
||||
|
||||
/* Need both Tx chains/antennas to support MIMO */
|
||||
if (priv->hw_params.tx_chains_num < 3)
|
||||
if (hw_params(priv).tx_chains_num < 3)
|
||||
return -1;
|
||||
|
||||
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
|
||||
|
@ -1448,8 +1448,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
|
|||
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
|
||||
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
|
||||
u8 start_action;
|
||||
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
u8 tx_chains_num = priv->hw_params.tx_chains_num;
|
||||
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
||||
int ret = 0;
|
||||
u8 update_search_tbl_counter = 0;
|
||||
|
||||
|
@ -1459,14 +1459,16 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
|
|||
break;
|
||||
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
|
||||
/* avoid antenna B unless MIMO */
|
||||
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
|
||||
valid_tx_ant =
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
|
||||
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
|
||||
break;
|
||||
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
|
||||
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
|
||||
/* avoid antenna B and MIMO */
|
||||
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
|
||||
valid_tx_ant =
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
|
||||
tbl->action != IWL_LEGACY_SWITCH_SISO)
|
||||
tbl->action = IWL_LEGACY_SWITCH_SISO;
|
||||
|
@ -1489,7 +1491,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
|
|||
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
|
||||
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
|
||||
tbl->action = IWL_LEGACY_SWITCH_SISO;
|
||||
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
|
||||
valid_tx_ant =
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
}
|
||||
|
||||
start_action = tbl->action;
|
||||
|
@ -1623,8 +1626,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
|
|||
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
|
||||
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
|
||||
u8 start_action;
|
||||
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
u8 tx_chains_num = priv->hw_params.tx_chains_num;
|
||||
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
||||
u8 update_search_tbl_counter = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -1634,14 +1637,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
|
|||
break;
|
||||
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
|
||||
/* avoid antenna B unless MIMO */
|
||||
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
|
||||
valid_tx_ant =
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
|
||||
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
|
||||
break;
|
||||
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
|
||||
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
|
||||
/* avoid antenna B and MIMO */
|
||||
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
|
||||
valid_tx_ant =
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
|
||||
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
|
||||
break;
|
||||
|
@ -1658,7 +1663,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
|
|||
|
||||
/* configure as 1x1 if bt full concurrency */
|
||||
if (priv->bt_full_concurrent) {
|
||||
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
|
||||
valid_tx_ant =
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
|
||||
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
|
||||
}
|
||||
|
@ -1794,8 +1800,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
|
|||
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
|
||||
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
|
||||
u8 start_action;
|
||||
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
u8 tx_chains_num = priv->hw_params.tx_chains_num;
|
||||
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
||||
u8 update_search_tbl_counter = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -1964,8 +1970,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
|
|||
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
|
||||
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
|
||||
u8 start_action;
|
||||
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
u8 tx_chains_num = priv->hw_params.tx_chains_num;
|
||||
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
||||
int ret;
|
||||
u8 update_search_tbl_counter = 0;
|
||||
|
||||
|
@ -2208,7 +2214,6 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
|
|||
|
||||
/*
|
||||
* setup rate table in uCode
|
||||
* return rate_n_flags as used in the table
|
||||
*/
|
||||
static void rs_update_rate_tbl(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
|
@ -2255,7 +2260,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
|
|||
u8 done_search = 0;
|
||||
u16 high_low;
|
||||
s32 sr;
|
||||
u8 tid = MAX_TID_COUNT;
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
struct iwl_tid_data *tid_data;
|
||||
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
|
||||
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
@ -2274,8 +2279,9 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
|
|||
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
|
||||
|
||||
tid = rs_tl_add_packet(lq_sta, hdr);
|
||||
if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
|
||||
tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
|
||||
if ((tid != IWL_MAX_TID_COUNT) &&
|
||||
(lq_sta->tx_agg_tid_en & (1 << tid))) {
|
||||
tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
|
||||
if (tid_data->agg.state == IWL_AGG_OFF)
|
||||
lq_sta->is_agg = 0;
|
||||
else
|
||||
|
@ -2645,9 +2651,10 @@ lq_update:
|
|||
iwl_ht_enabled(priv)) {
|
||||
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
|
||||
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
|
||||
(tid != MAX_TID_COUNT)) {
|
||||
(tid != IWL_MAX_TID_COUNT)) {
|
||||
u8 sta_id = lq_sta->lq.sta_id;
|
||||
tid_data =
|
||||
&priv->stations[lq_sta->lq.sta_id].tid[tid];
|
||||
&priv->shrd->tid_data[sta_id][tid];
|
||||
if (tid_data->agg.state == IWL_AGG_OFF) {
|
||||
IWL_DEBUG_RATE(priv,
|
||||
"try to aggregate tid %d\n",
|
||||
|
@ -2703,7 +2710,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
|
|||
|
||||
i = lq_sta->last_txrate_idx;
|
||||
|
||||
valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
|
||||
if (!lq_sta->search_better_tbl)
|
||||
active_tbl = lq_sta->active_tbl;
|
||||
|
@ -2886,15 +2893,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
|
|||
|
||||
/* These values will be overridden later */
|
||||
lq_sta->lq.general_params.single_stream_ant_msk =
|
||||
first_antenna(priv->hw_params.valid_tx_ant);
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
lq_sta->lq.general_params.dual_stream_ant_msk =
|
||||
priv->hw_params.valid_tx_ant &
|
||||
~first_antenna(priv->hw_params.valid_tx_ant);
|
||||
hw_params(priv).valid_tx_ant &
|
||||
~first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
|
||||
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
|
||||
} else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
|
||||
} else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
|
||||
lq_sta->lq.general_params.dual_stream_ant_msk =
|
||||
priv->hw_params.valid_tx_ant;
|
||||
hw_params(priv).valid_tx_ant;
|
||||
}
|
||||
|
||||
/* as default allow aggregation for all tids */
|
||||
|
@ -2940,7 +2947,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
|
|||
if (priv && priv->bt_full_concurrent) {
|
||||
/* 1x1 only */
|
||||
tbl_type.ant_type =
|
||||
first_antenna(priv->hw_params.valid_tx_ant);
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
}
|
||||
|
||||
/* How many times should we repeat the initial rate? */
|
||||
|
@ -2972,7 +2979,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
|
|||
if (priv->bt_full_concurrent)
|
||||
valid_tx_ant = ANT_A;
|
||||
else
|
||||
valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
}
|
||||
|
||||
/* Fill rest of rate table */
|
||||
|
@ -3006,7 +3013,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
|
|||
if (priv && priv->bt_full_concurrent) {
|
||||
/* 1x1 only */
|
||||
tbl_type.ant_type =
|
||||
first_antenna(priv->hw_params.valid_tx_ant);
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
}
|
||||
|
||||
/* Indicate to uCode which entries might be MIMO.
|
||||
|
@ -3097,7 +3104,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
|
|||
u8 ant_sel_tx;
|
||||
|
||||
priv = lq_sta->drv;
|
||||
valid_tx_ant = priv->hw_params.valid_tx_ant;
|
||||
valid_tx_ant = hw_params(priv).valid_tx_ant;
|
||||
if (lq_sta->dbg_fixed_rate) {
|
||||
ant_sel_tx =
|
||||
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
|
||||
|
@ -3168,9 +3175,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
|
||||
lq_sta->dbg_fixed_rate);
|
||||
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
|
||||
(priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
|
||||
(priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
|
||||
(priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
|
||||
(hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
|
||||
(hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
|
||||
(hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
|
||||
desc += sprintf(buff+desc, "lq type %s\n",
|
||||
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
|
||||
if (is_Ht(tbl->lq_type)) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "iwl-agn-calib.h"
|
||||
#include "iwl-helpers.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-shared.h"
|
||||
|
||||
static int iwlagn_disable_bss(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
|
@ -40,7 +41,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
|
|||
int ret;
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
|
||||
CMD_SYNC, sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
|
@ -66,7 +67,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
|
|||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
send->dev_type = RXON_DEV_TYPE_P2P;
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
|
||||
CMD_SYNC, sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
|
@ -92,7 +93,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
|
|||
int ret;
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
|
||||
sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
|
@ -121,7 +122,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
|
|||
ctx->qos_data.qos_active,
|
||||
ctx->qos_data.def_qos_parm.qos_flags);
|
||||
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
|
||||
sizeof(struct iwl_qosparam_cmd),
|
||||
&ctx->qos_data.def_qos_parm);
|
||||
if (ret)
|
||||
|
@ -131,7 +132,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
|
|||
static int iwlagn_update_beacon(struct iwl_priv *priv,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
dev_kfree_skb(priv->beacon_skb);
|
||||
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
|
||||
|
@ -180,7 +181,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
|
|||
ctx->staging.ofdm_ht_triple_stream_basic_rates;
|
||||
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
|
||||
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
|
||||
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -266,7 +267,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
|
|||
* Associated RXON doesn't clear the station table in uCode,
|
||||
* so we don't need to restore stations etc. after this.
|
||||
*/
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
|
||||
sizeof(struct iwl_rxon_cmd), &ctx->staging);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
|
||||
|
@ -315,7 +316,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
|
@ -362,7 +363,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||
slot0 = bcnint / 2;
|
||||
slot1 = bcnint - slot0;
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
|
||||
(!ctx_bss->vif->bss_conf.idle &&
|
||||
!ctx_bss->vif->bss_conf.assoc)) {
|
||||
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
|
@ -378,7 +379,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||
ctx_pan->beacon_int;
|
||||
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
|
||||
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
|
@ -387,7 +388,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||
cmd.slots[0].width = cpu_to_le16(slot0);
|
||||
cmd.slots[1].width = cpu_to_le16(slot1);
|
||||
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
|
||||
|
@ -420,12 +421,12 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return -EINVAL;
|
||||
|
||||
if (!iwl_is_alive(priv))
|
||||
if (!iwl_is_alive(priv->shrd))
|
||||
return -EBUSY;
|
||||
|
||||
/* This function hardcodes a bunch of dual-mode assumptions */
|
||||
|
@ -434,6 +435,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
if (!ctx->is_active)
|
||||
return 0;
|
||||
|
||||
/* override BSSID if necessary due to preauth */
|
||||
if (ctx->preauth_bssid)
|
||||
memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
|
||||
|
||||
/* always get timestamp with Rx frame */
|
||||
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
|
||||
|
||||
|
@ -462,7 +467,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
* receive commit_rxon request
|
||||
* abort any previous channel switch if still in process
|
||||
*/
|
||||
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
|
||||
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
|
||||
(priv->switch_channel != ctx->staging.channel)) {
|
||||
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
|
||||
le16_to_cpu(priv->switch_channel));
|
||||
|
@ -536,14 +541,14 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
|
||||
IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
|
||||
if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
|
||||
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!iwl_is_ready(priv)) {
|
||||
if (!iwl_is_ready(priv->shrd)) {
|
||||
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
|
||||
goto out;
|
||||
}
|
||||
|
@ -575,7 +580,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
|
||||
for_each_context(priv, ctx) {
|
||||
/* Configure HT40 channels */
|
||||
|
@ -619,7 +624,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
ctx->vif);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
|
||||
iwl_update_bcast_stations(priv);
|
||||
|
||||
|
@ -651,7 +656,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
iwlagn_commit_rxon(priv, ctx);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -666,7 +671,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
|
|||
struct ieee80211_sta_ht_cap *ht_cap;
|
||||
bool need_multiple;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
|
@ -770,7 +775,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
|
|||
memset(&cmd, 0, sizeof(cmd));
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->phy_calib_chain_noise_reset_cmd);
|
||||
ret = trans_send_cmd_pdu(&priv->trans,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv),
|
||||
REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_SYNC, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
|
@ -791,17 +796,17 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|||
int ret;
|
||||
bool force = false;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
if (unlikely(!iwl_is_ready(priv))) {
|
||||
if (unlikely(!iwl_is_ready(priv->shrd))) {
|
||||
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(!ctx->vif)) {
|
||||
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -834,7 +839,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|||
*/
|
||||
if (ctx->last_tx_rejected) {
|
||||
ctx->last_tx_rejected = false;
|
||||
iwl_wake_any_queue(priv, ctx);
|
||||
iwl_trans_wake_any_queue(trans(priv),
|
||||
ctx->ctxid);
|
||||
}
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
|
@ -895,6 +901,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|||
if (!priv->disable_chain_noise_cal)
|
||||
iwlagn_chain_noise_reset(priv);
|
||||
priv->start_calib = 1;
|
||||
WARN_ON(ctx->preauth_bssid);
|
||||
}
|
||||
|
||||
if (changes & BSS_CHANGED_IBSS) {
|
||||
|
@ -912,7 +919,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|||
IWL_ERR(priv, "Error sending IBSS beacon\n");
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
void iwlagn_post_scan(struct iwl_priv *priv)
|
||||
|
|
|
@ -49,7 +49,7 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
/* Set up the rate scaling to start at selected rate, fall back
|
||||
* all the way down to 1M in IEEE order, and then spin on 1M */
|
||||
|
@ -63,23 +63,23 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
|
|||
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
|
||||
rate_flags |= RATE_MCS_CCK_MSK;
|
||||
|
||||
rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
|
||||
rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
|
||||
RATE_MCS_ANT_POS;
|
||||
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
|
||||
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
|
||||
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
|
||||
|
||||
link_cmd->general_params.single_stream_ant_msk =
|
||||
first_antenna(priv->hw_params.valid_tx_ant);
|
||||
first_antenna(hw_params(priv).valid_tx_ant);
|
||||
|
||||
link_cmd->general_params.dual_stream_ant_msk =
|
||||
priv->hw_params.valid_tx_ant &
|
||||
~first_antenna(priv->hw_params.valid_tx_ant);
|
||||
hw_params(priv).valid_tx_ant &
|
||||
~first_antenna(hw_params(priv).valid_tx_ant);
|
||||
if (!link_cmd->general_params.dual_stream_ant_msk) {
|
||||
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
|
||||
} else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
|
||||
} else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
|
||||
link_cmd->general_params.dual_stream_ant_msk =
|
||||
priv->hw_params.valid_tx_ant;
|
||||
hw_params(priv).valid_tx_ant;
|
||||
}
|
||||
|
||||
link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
|
||||
|
@ -116,9 +116,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
|
|||
if (sta_id_r)
|
||||
*sta_id_r = sta_id;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].used |= IWL_STA_LOCAL;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
/* Set up default rate scaling table in device's station table */
|
||||
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
|
||||
|
@ -132,9 +132,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
|
|||
if (ret)
|
||||
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].lq = link_cmd;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
|||
cmd.len[0] = cmd_size;
|
||||
|
||||
if (not_empty || send_if_empty)
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
|||
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx)
|
||||
{
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
return iwl_send_static_wepkey_cmd(priv, ctx, false);
|
||||
}
|
||||
|
@ -208,13 +208,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
|
|||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
|
||||
keyconf->keyidx);
|
||||
|
||||
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
|
||||
if (iwl_is_rfkill(priv)) {
|
||||
if (iwl_is_rfkill(priv->shrd)) {
|
||||
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
|
||||
/* but keys in device are clear anyway so return success */
|
||||
return 0;
|
||||
|
@ -232,7 +232,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
|
|||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (keyconf->keylen != WEP_KEY_LEN_128 &&
|
||||
keyconf->keylen != WEP_KEY_LEN_64) {
|
||||
|
@ -311,9 +311,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
|
|||
struct iwl_addsta_cmd sta_cmd;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
|
||||
|
@ -388,16 +388,16 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
|
|||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -ENOENT;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
|
||||
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
|
||||
sta_id = IWL_INVALID_STATION;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return 0;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
ctx->key_mapping_keys--;
|
||||
|
||||
|
@ -430,7 +430,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
|
|||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -EINVAL;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
|
||||
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
|
||||
|
@ -493,18 +493,18 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
|
|||
unsigned long flags;
|
||||
u8 sta_id;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
|
||||
if (sta_id == IWL_INVALID_STATION) {
|
||||
IWL_ERR(priv, "Unable to prepare broadcast station\n");
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
|
||||
priv->stations[sta_id].used |= IWL_STA_BCAST;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
|
||||
if (!link_cmd) {
|
||||
|
@ -513,9 +513,9 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].lq = link_cmd;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -539,13 +539,13 @@ int iwl_update_bcast_station(struct iwl_priv *priv,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
if (priv->stations[sta_id].lq)
|
||||
kfree(priv->stations[sta_id].lq);
|
||||
else
|
||||
IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
|
||||
priv->stations[sta_id].lq = link_cmd;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -572,15 +572,15 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
|
|||
unsigned long flags;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
/* Remove "disable" flag, to enable Tx for this TID */
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
|
||||
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
@ -592,20 +592,20 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
|
|||
int sta_id;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
sta_id = iwl_sta_id(sta);
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -ENXIO;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].sta.station_flags_msk = 0;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
|
||||
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
|
||||
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
@ -617,7 +617,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
|
|||
int sta_id;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
sta_id = iwl_sta_id(sta);
|
||||
if (sta_id == IWL_INVALID_STATION) {
|
||||
|
@ -625,13 +625,13 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].sta.station_flags_msk = 0;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
|
||||
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
@ -640,14 +640,14 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
|
||||
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = 0;
|
||||
priv->stations[sta_id].sta.sleep_tx_count = 0;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
}
|
||||
|
||||
|
@ -655,7 +655,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
|
||||
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
|
||||
priv->stations[sta_id].sta.sta.modify_mask =
|
||||
|
@ -663,7 +663,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
|
|||
priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -176,24 +176,24 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
|
|||
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (tt->state == IWL_TI_CT_KILL) {
|
||||
if (priv->thermal_throttle.ct_kill_toggle) {
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
||||
priv->thermal_throttle.ct_kill_toggle = false;
|
||||
} else {
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
|
||||
priv->thermal_throttle.ct_kill_toggle = true;
|
||||
}
|
||||
iwl_read32(priv, CSR_UCODE_DRV_GP1);
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(priv))
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
|
||||
spin_lock_irqsave(&bus(priv)->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus(priv)))
|
||||
iwl_release_nic_access(bus(priv));
|
||||
spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
|
||||
|
||||
/* Reschedule the ct_kill timer to occur in
|
||||
* CT_KILL_EXIT_DURATION seconds to ensure we get a
|
||||
|
@ -209,7 +209,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
|
|||
{
|
||||
if (stop) {
|
||||
IWL_DEBUG_TEMP(priv, "Stop all queues\n");
|
||||
if (priv->mac80211_registered)
|
||||
if (priv->shrd->mac80211_registered)
|
||||
ieee80211_stop_queues(priv->hw);
|
||||
IWL_DEBUG_TEMP(priv,
|
||||
"Schedule 5 seconds CT_KILL Timer\n");
|
||||
|
@ -217,7 +217,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
|
|||
jiffies + CT_KILL_EXIT_DURATION * HZ);
|
||||
} else {
|
||||
IWL_DEBUG_TEMP(priv, "Wake all queues\n");
|
||||
if (priv->mac80211_registered)
|
||||
if (priv->shrd->mac80211_registered)
|
||||
ieee80211_wake_queues(priv->hw);
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
|
|||
struct iwl_priv *priv = (struct iwl_priv *)data;
|
||||
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
/* temperature timer expired, ready to go into CT_KILL state */
|
||||
|
@ -235,7 +235,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
|
|||
IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
|
||||
"temperature timer expired\n");
|
||||
tt->state = IWL_TI_CT_KILL;
|
||||
set_bit(STATUS_CT_KILL, &priv->status);
|
||||
set_bit(STATUS_CT_KILL, &priv->shrd->status);
|
||||
iwl_perform_ct_kill_task(priv, true);
|
||||
}
|
||||
}
|
||||
|
@ -313,23 +313,24 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
|
|||
tt->tt_power_mode = IWL_POWER_INDEX_5;
|
||||
break;
|
||||
}
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
if (old_state == IWL_TI_CT_KILL)
|
||||
clear_bit(STATUS_CT_KILL, &priv->status);
|
||||
clear_bit(STATUS_CT_KILL, &priv->shrd->status);
|
||||
if (tt->state != IWL_TI_CT_KILL &&
|
||||
iwl_power_update_mode(priv, true)) {
|
||||
/* TT state not updated
|
||||
* try again during next temperature read
|
||||
*/
|
||||
if (old_state == IWL_TI_CT_KILL)
|
||||
set_bit(STATUS_CT_KILL, &priv->status);
|
||||
set_bit(STATUS_CT_KILL, &priv->shrd->status);
|
||||
tt->state = old_state;
|
||||
IWL_ERR(priv, "Cannot update power mode, "
|
||||
"TT state not updated\n");
|
||||
} else {
|
||||
if (tt->state == IWL_TI_CT_KILL) {
|
||||
if (force) {
|
||||
set_bit(STATUS_CT_KILL, &priv->status);
|
||||
set_bit(STATUS_CT_KILL,
|
||||
&priv->shrd->status);
|
||||
iwl_perform_ct_kill_task(priv, true);
|
||||
} else {
|
||||
iwl_prepare_ct_kill_task(priv);
|
||||
|
@ -343,7 +344,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
|
|||
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
|
||||
tt->tt_power_mode);
|
||||
}
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -453,9 +454,9 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
|
|||
* in case get disabled before */
|
||||
iwl_set_rxon_ht(priv, &priv->current_ht_config);
|
||||
}
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
if (old_state == IWL_TI_CT_KILL)
|
||||
clear_bit(STATUS_CT_KILL, &priv->status);
|
||||
clear_bit(STATUS_CT_KILL, &priv->shrd->status);
|
||||
if (tt->state != IWL_TI_CT_KILL &&
|
||||
iwl_power_update_mode(priv, true)) {
|
||||
/* TT state not updated
|
||||
|
@ -464,7 +465,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
|
|||
IWL_ERR(priv, "Cannot update power mode, "
|
||||
"TT state not updated\n");
|
||||
if (old_state == IWL_TI_CT_KILL)
|
||||
set_bit(STATUS_CT_KILL, &priv->status);
|
||||
set_bit(STATUS_CT_KILL, &priv->shrd->status);
|
||||
tt->state = old_state;
|
||||
} else {
|
||||
IWL_DEBUG_TEMP(priv,
|
||||
|
@ -475,7 +476,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
|
|||
if (force) {
|
||||
IWL_DEBUG_TEMP(priv,
|
||||
"Enter IWL_TI_CT_KILL\n");
|
||||
set_bit(STATUS_CT_KILL, &priv->status);
|
||||
set_bit(STATUS_CT_KILL,
|
||||
&priv->shrd->status);
|
||||
iwl_perform_ct_kill_task(priv, true);
|
||||
} else {
|
||||
iwl_prepare_ct_kill_task(priv);
|
||||
|
@ -487,7 +489,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
|
|||
iwl_perform_ct_kill_task(priv, false);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -506,10 +508,10 @@ static void iwl_bg_ct_enter(struct work_struct *work)
|
|||
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
|
||||
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (!iwl_is_ready(priv))
|
||||
if (!iwl_is_ready(priv->shrd))
|
||||
return;
|
||||
|
||||
if (tt->state != IWL_TI_CT_KILL) {
|
||||
|
@ -535,10 +537,10 @@ static void iwl_bg_ct_exit(struct work_struct *work)
|
|||
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
|
||||
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (!iwl_is_ready(priv))
|
||||
if (!iwl_is_ready(priv->shrd))
|
||||
return;
|
||||
|
||||
/* stop ct_kill_exit_tm timer */
|
||||
|
@ -565,20 +567,20 @@ static void iwl_bg_ct_exit(struct work_struct *work)
|
|||
|
||||
void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
|
||||
{
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
|
||||
queue_work(priv->workqueue, &priv->ct_enter);
|
||||
queue_work(priv->shrd->workqueue, &priv->ct_enter);
|
||||
}
|
||||
|
||||
void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
|
||||
{
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
|
||||
queue_work(priv->workqueue, &priv->ct_exit);
|
||||
queue_work(priv->shrd->workqueue, &priv->ct_exit);
|
||||
}
|
||||
|
||||
static void iwl_bg_tt_work(struct work_struct *work)
|
||||
|
@ -586,7 +588,7 @@ static void iwl_bg_tt_work(struct work_struct *work)
|
|||
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
|
||||
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (priv->cfg->base_params->temperature_kelvin)
|
||||
|
@ -600,11 +602,11 @@ static void iwl_bg_tt_work(struct work_struct *work)
|
|||
|
||||
void iwl_tt_handler(struct iwl_priv *priv)
|
||||
{
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
|
||||
queue_work(priv->workqueue, &priv->tt_work);
|
||||
queue_work(priv->shrd->workqueue, &priv->tt_work);
|
||||
}
|
||||
|
||||
/* Thermal throttling initialization
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -40,6 +40,7 @@
|
|||
#include "iwl-agn.h"
|
||||
#include "iwl-agn-calib.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-fh.h"
|
||||
|
||||
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
|
||||
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
|
||||
|
@ -84,29 +85,29 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
|
|||
|
||||
priv->ucode_write_complete = 0;
|
||||
|
||||
iwl_write_direct32(priv,
|
||||
iwl_write_direct32(bus(priv),
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
|
||||
|
||||
iwl_write_direct32(priv,
|
||||
iwl_write_direct32(bus(priv),
|
||||
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
|
||||
|
||||
iwl_write_direct32(priv,
|
||||
iwl_write_direct32(bus(priv),
|
||||
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
|
||||
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
|
||||
|
||||
iwl_write_direct32(priv,
|
||||
iwl_write_direct32(bus(priv),
|
||||
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
|
||||
(iwl_get_dma_hi_addr(phy_addr)
|
||||
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
|
||||
|
||||
iwl_write_direct32(priv,
|
||||
iwl_write_direct32(bus(priv),
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
|
||||
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
|
||||
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
|
||||
|
||||
iwl_write_direct32(priv,
|
||||
iwl_write_direct32(bus(priv),
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
|
||||
|
@ -193,7 +194,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
|
|||
calib_cfg_cmd.ucd_calib_cfg.flags =
|
||||
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
|
||||
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
}
|
||||
|
||||
void iwlagn_rx_calib_result(struct iwl_priv *priv,
|
||||
|
@ -291,7 +292,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
|
|||
/* coexistence is disabled */
|
||||
memset(&coex_cmd, 0, sizeof(coex_cmd));
|
||||
}
|
||||
return trans_send_cmd_pdu(&priv->trans,
|
||||
return iwl_trans_send_cmd_pdu(trans(priv),
|
||||
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
|
||||
sizeof(coex_cmd), &coex_cmd);
|
||||
}
|
||||
|
@ -324,7 +325,7 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
|
|||
|
||||
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
|
||||
sizeof(iwlagn_bt_prio_tbl));
|
||||
if (trans_send_cmd_pdu(&priv->trans,
|
||||
if (iwl_trans_send_cmd_pdu(trans(priv),
|
||||
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
|
||||
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
|
||||
IWL_ERR(priv, "failed to send BT prio tbl command\n");
|
||||
|
@ -337,7 +338,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
|
|||
|
||||
env_cmd.action = action;
|
||||
env_cmd.type = type;
|
||||
ret = trans_send_cmd_pdu(&priv->trans,
|
||||
ret = iwl_trans_send_cmd_pdu(trans(priv),
|
||||
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
|
||||
sizeof(env_cmd), &env_cmd);
|
||||
if (ret)
|
||||
|
@ -350,7 +351,16 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
trans_tx_start(&priv->trans);
|
||||
if (!priv->tx_cmd_pool)
|
||||
priv->tx_cmd_pool =
|
||||
kmem_cache_create("iwlagn_dev_cmd",
|
||||
sizeof(struct iwl_device_cmd),
|
||||
sizeof(void *), 0, NULL);
|
||||
|
||||
if (!priv->tx_cmd_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
iwl_trans_tx_start(trans(priv));
|
||||
|
||||
ret = iwlagn_send_wimax_coex(priv);
|
||||
if (ret)
|
||||
|
@ -369,7 +379,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
|
|||
* using sample data 100 bytes apart. If these sample points are good,
|
||||
* it's a pretty good bet that everything between them is good, too.
|
||||
*/
|
||||
static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
|
||||
static int iwl_verify_inst_sparse(struct iwl_priv *priv,
|
||||
struct fw_desc *fw_desc)
|
||||
{
|
||||
__le32 *image = (__le32 *)fw_desc->v_addr;
|
||||
|
@ -383,9 +393,9 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
|
|||
/* read data comes through single port, auto-incr addr */
|
||||
/* NOTE: Use the debugless read so we don't flood kernel log
|
||||
* if IWL_DL_IO is set */
|
||||
iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
|
||||
iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
|
||||
i + IWLAGN_RTC_INST_LOWER_BOUND);
|
||||
val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
|
||||
val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
|
||||
if (val != le32_to_cpu(*image))
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -404,14 +414,14 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
|
|||
|
||||
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
|
||||
|
||||
iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
|
||||
iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
|
||||
IWLAGN_RTC_INST_LOWER_BOUND);
|
||||
|
||||
for (offs = 0;
|
||||
offs < len && errors < 20;
|
||||
offs += sizeof(u32), image++) {
|
||||
/* read data comes through single port, auto-incr addr */
|
||||
val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
|
||||
val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
|
||||
if (val != le32_to_cpu(*image)) {
|
||||
IWL_ERR(priv, "uCode INST section at "
|
||||
"offset 0x%x, is 0x%x, s/b 0x%x\n",
|
||||
|
@ -427,7 +437,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
|
|||
*/
|
||||
static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
|
||||
{
|
||||
if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
|
||||
if (!iwl_verify_inst_sparse(priv, &img->code)) {
|
||||
IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -478,7 +488,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
int ret;
|
||||
enum iwlagn_ucode_type old_type;
|
||||
|
||||
ret = trans_start_device(&priv->trans);
|
||||
ret = iwl_trans_start_device(trans(priv));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -495,7 +505,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
trans_kick_nic(&priv->trans);
|
||||
iwl_trans_kick_nic(trans(priv));
|
||||
|
||||
/*
|
||||
* Some things may run in the background now, but we
|
||||
|
@ -545,7 +555,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
|
|||
struct iwl_notification_wait calib_wait;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
/* No init ucode required? Curious, but maybe ok */
|
||||
if (!priv->ucode_init.code.len)
|
||||
|
@ -580,6 +590,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
|
|||
iwlagn_remove_notification(priv, &calib_wait);
|
||||
out:
|
||||
/* Whatever happened, stop the device */
|
||||
trans_stop_device(&priv->trans);
|
||||
iwl_trans_stop_device(trans(priv));
|
||||
return ret;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -65,54 +65,9 @@
|
|||
|
||||
#include "iwl-dev.h"
|
||||
|
||||
/* configuration for the _agn devices */
|
||||
extern struct iwl_cfg iwl5300_agn_cfg;
|
||||
extern struct iwl_cfg iwl5100_agn_cfg;
|
||||
extern struct iwl_cfg iwl5350_agn_cfg;
|
||||
extern struct iwl_cfg iwl5100_bgn_cfg;
|
||||
extern struct iwl_cfg iwl5100_abg_cfg;
|
||||
extern struct iwl_cfg iwl5150_agn_cfg;
|
||||
extern struct iwl_cfg iwl5150_abg_cfg;
|
||||
extern struct iwl_cfg iwl6005_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6005_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6005_2bg_cfg;
|
||||
extern struct iwl_cfg iwl1030_bgn_cfg;
|
||||
extern struct iwl_cfg iwl1030_bg_cfg;
|
||||
extern struct iwl_cfg iwl6030_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6030_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6030_2bgn_cfg;
|
||||
extern struct iwl_cfg iwl6030_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6000i_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6000i_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6000i_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6000_3agn_cfg;
|
||||
extern struct iwl_cfg iwl6050_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6050_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6150_bgn_cfg;
|
||||
extern struct iwl_cfg iwl6150_bg_cfg;
|
||||
extern struct iwl_cfg iwl1000_bgn_cfg;
|
||||
extern struct iwl_cfg iwl1000_bg_cfg;
|
||||
extern struct iwl_cfg iwl100_bgn_cfg;
|
||||
extern struct iwl_cfg iwl100_bg_cfg;
|
||||
extern struct iwl_cfg iwl130_bgn_cfg;
|
||||
extern struct iwl_cfg iwl130_bg_cfg;
|
||||
extern struct iwl_cfg iwl2000_2bgn_cfg;
|
||||
extern struct iwl_cfg iwl2000_2bg_cfg;
|
||||
extern struct iwl_cfg iwl2030_2bgn_cfg;
|
||||
extern struct iwl_cfg iwl2030_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6035_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6035_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6035_2bg_cfg;
|
||||
extern struct iwl_cfg iwl105_bg_cfg;
|
||||
extern struct iwl_cfg iwl105_bgn_cfg;
|
||||
extern struct iwl_cfg iwl135_bg_cfg;
|
||||
extern struct iwl_cfg iwl135_bgn_cfg;
|
||||
|
||||
extern struct iwl_mod_params iwlagn_mod_params;
|
||||
|
||||
extern struct ieee80211_ops iwlagn_hw_ops;
|
||||
|
||||
int iwl_reset_ict(struct iwl_priv *priv);
|
||||
int iwl_reset_ict(struct iwl_trans *trans);
|
||||
|
||||
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
|
||||
{
|
||||
|
@ -122,10 +77,6 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
|
|||
hdr->data_valid = 1;
|
||||
}
|
||||
|
||||
/* tx queue */
|
||||
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
|
||||
int sta_id, int tid, int freed);
|
||||
|
||||
/* RXON */
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv);
|
||||
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
||||
|
@ -147,13 +98,10 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
enum iwlagn_ucode_type ucode_type);
|
||||
|
||||
/* lib */
|
||||
void iwl_check_abort_status(struct iwl_priv *priv,
|
||||
u8 frame_count, u32 status);
|
||||
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
|
||||
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
||||
void iwlagn_temperature(struct iwl_priv *priv);
|
||||
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
|
||||
int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
|
||||
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
|
||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
|
||||
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
|
||||
|
@ -165,21 +113,14 @@ void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
|||
|
||||
|
||||
/* tx */
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||
int index);
|
||||
void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
|
||||
struct ieee80211_tx_info *info);
|
||||
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
|
||||
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
||||
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid);
|
||||
int iwlagn_txq_check_empty(struct iwl_priv *priv,
|
||||
int sta_id, u8 tid, int txq_id);
|
||||
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
|
||||
struct iwl_rx_mem_buffer *rxb);
|
||||
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||
int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
|
||||
|
||||
static inline u32 iwl_tx_status_to_mac80211(u32 status)
|
||||
{
|
||||
|
@ -287,7 +228,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
|
|||
}
|
||||
|
||||
/* eeprom */
|
||||
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
|
||||
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
|
||||
void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
|
||||
|
||||
/* notification wait support */
|
||||
|
|
|
@ -60,16 +60,22 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_pci_h__
|
||||
#define __iwl_pci_h__
|
||||
#ifndef __iwl_bus_h__
|
||||
#define __iwl_bus_h__
|
||||
|
||||
/*This file includes the declaration that are exported from the bus layer */
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct iwl_shared;
|
||||
struct iwl_bus;
|
||||
|
||||
/**
|
||||
* struct iwl_bus_ops - bus specific operations
|
||||
* @get_pm_support: must returns true if the bus can go to sleep
|
||||
* @apm_config: will be called during the config of the APM configuration
|
||||
* @set_drv_data: set the drv_data pointer to the bus layer
|
||||
* @set_drv_data: set the shared data pointer to the bus layer
|
||||
* @get_hw_id: prints the hw_id in the provided buffer
|
||||
* @write8: write a byte to register at offset ofs
|
||||
* @write32: write a dword to register at offset ofs
|
||||
|
@ -78,20 +84,29 @@ struct iwl_bus;
|
|||
struct iwl_bus_ops {
|
||||
bool (*get_pm_support)(struct iwl_bus *bus);
|
||||
void (*apm_config)(struct iwl_bus *bus);
|
||||
void (*set_drv_data)(struct iwl_bus *bus, void *drv_data);
|
||||
void (*set_drv_data)(struct iwl_bus *bus, struct iwl_shared *shrd);
|
||||
void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
|
||||
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
|
||||
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_bus - bus common data
|
||||
* @dev - pointer to struct device * that represent the device
|
||||
* @ops - pointer to iwl_bus_ops
|
||||
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
|
||||
* @irq - the irq number for the device
|
||||
* @reg_lock - protect hw register access
|
||||
*/
|
||||
struct iwl_bus {
|
||||
/* Common data to all buses */
|
||||
void *drv_data; /* driver's context */
|
||||
struct device *dev;
|
||||
struct iwl_bus_ops *ops;
|
||||
const struct iwl_bus_ops *ops;
|
||||
struct iwl_shared *shrd;
|
||||
|
||||
unsigned int irq;
|
||||
spinlock_t reg_lock;
|
||||
|
||||
/* pointer to bus specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
|
@ -108,9 +123,10 @@ static inline void bus_apm_config(struct iwl_bus *bus)
|
|||
bus->ops->apm_config(bus);
|
||||
}
|
||||
|
||||
static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data)
|
||||
static inline void bus_set_drv_data(struct iwl_bus *bus,
|
||||
struct iwl_shared *shrd)
|
||||
{
|
||||
bus->ops->set_drv_data(bus, drv_data);
|
||||
bus->ops->set_drv_data(bus, shrd);
|
||||
}
|
||||
|
||||
static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
|
||||
|
@ -136,4 +152,4 @@ static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
|
|||
int __must_check iwl_pci_register_driver(void);
|
||||
void iwl_pci_unregister_driver(void);
|
||||
|
||||
#endif
|
||||
#endif /* __iwl_bus_h__ */
|
||||
|
|
|
@ -69,6 +69,9 @@
|
|||
#ifndef __iwl_commands_h__
|
||||
#define __iwl_commands_h__
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ieee80211.h>
|
||||
|
||||
struct iwl_priv;
|
||||
|
||||
/* uCode version contains 4 values: Major/Minor/API/Serial */
|
||||
|
@ -670,7 +673,6 @@ struct iwl_rxon_assoc_cmd {
|
|||
|
||||
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
|
||||
#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
|
||||
#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
|
||||
|
||||
/*
|
||||
* REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
|
||||
|
@ -806,6 +808,7 @@ struct iwl_qosparam_cmd {
|
|||
#define IWLAGN_STATION_COUNT 16
|
||||
|
||||
#define IWL_INVALID_STATION 255
|
||||
#define IWL_MAX_TID_COUNT 9
|
||||
|
||||
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
|
||||
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
|
||||
|
@ -3909,6 +3912,7 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
|
|||
* Union of all expected notifications/responses:
|
||||
*
|
||||
*****************************************************************************/
|
||||
#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
|
||||
|
||||
struct iwl_rx_packet {
|
||||
/*
|
||||
|
|
|
@ -42,22 +42,21 @@
|
|||
#include "iwl-sta.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-helpers.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
u32 iwl_debug_level;
|
||||
|
||||
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
|
||||
|
||||
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
|
||||
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
|
||||
static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
|
||||
static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
|
||||
struct ieee80211_sta_ht_cap *ht_info,
|
||||
enum ieee80211_band band)
|
||||
{
|
||||
u16 max_bit_rate = 0;
|
||||
u8 rx_chains_num = priv->hw_params.rx_chains_num;
|
||||
u8 tx_chains_num = priv->hw_params.tx_chains_num;
|
||||
u8 rx_chains_num = hw_params(priv).rx_chains_num;
|
||||
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
||||
|
||||
ht_info->cap = 0;
|
||||
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
|
||||
|
@ -69,7 +68,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
|
|||
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
|
||||
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
|
||||
max_bit_rate = MAX_BIT_RATE_20_MHZ;
|
||||
if (priv->hw_params.ht40_channel & BIT(band)) {
|
||||
if (hw_params(priv).ht40_channel & BIT(band)) {
|
||||
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
|
||||
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
|
||||
ht_info->mcs.rx_mask[4] = 0x01;
|
||||
|
@ -107,9 +106,9 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
|
|||
}
|
||||
|
||||
/**
|
||||
* iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
|
||||
* iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
|
||||
*/
|
||||
int iwlcore_init_geos(struct iwl_priv *priv)
|
||||
int iwl_init_geos(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_channel_info *ch;
|
||||
struct ieee80211_supported_band *sband;
|
||||
|
@ -122,7 +121,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
|
|||
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
|
||||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
|
||||
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
|
||||
set_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
||||
set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -146,7 +145,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
|
|||
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
|
||||
|
||||
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
|
||||
iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
|
||||
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
|
||||
IEEE80211_BAND_5GHZ);
|
||||
|
||||
sband = &priv->bands[IEEE80211_BAND_2GHZ];
|
||||
|
@ -156,7 +155,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
|
|||
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
|
||||
|
||||
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
|
||||
iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
|
||||
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
|
||||
IEEE80211_BAND_2GHZ);
|
||||
|
||||
priv->ieee_channels = channels;
|
||||
|
@ -222,19 +221,19 @@ int iwlcore_init_geos(struct iwl_priv *priv)
|
|||
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
|
||||
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
|
||||
|
||||
set_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
||||
set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* iwlcore_free_geos - undo allocations in iwlcore_init_geos
|
||||
* iwl_free_geos - undo allocations in iwl_init_geos
|
||||
*/
|
||||
void iwlcore_free_geos(struct iwl_priv *priv)
|
||||
void iwl_free_geos(struct iwl_priv *priv)
|
||||
{
|
||||
kfree(priv->ieee_channels);
|
||||
kfree(priv->ieee_rates);
|
||||
clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
||||
clear_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
|
||||
}
|
||||
|
||||
static bool iwl_is_channel_extension(struct iwl_priv *priv,
|
||||
|
@ -326,7 +325,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
|
||||
conf = ieee80211_get_hw_conf(priv->hw);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
|
||||
|
||||
|
@ -360,7 +359,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
|
||||
} else {
|
||||
beacon_int = iwl_adjust_beacon_interval(beacon_int,
|
||||
priv->hw_params.max_beacon_itrvl * TIME_UNIT);
|
||||
IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
|
||||
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
|
||||
}
|
||||
|
||||
|
@ -379,7 +378,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
le32_to_cpu(ctx->timing.beacon_init_val),
|
||||
le16_to_cpu(ctx->timing.atim_window));
|
||||
|
||||
return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd,
|
||||
return iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_timing_cmd,
|
||||
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
|
||||
}
|
||||
|
||||
|
@ -809,10 +808,11 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
|
|||
*/
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
||||
if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
|
||||
&priv->shrd->status))
|
||||
ieee80211_chswitch_done(ctx->vif, is_success);
|
||||
}
|
||||
|
||||
|
@ -857,16 +857,16 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
|||
unsigned long reload_jiffies;
|
||||
|
||||
/* Set the FW error flag -- cleared on iwl_down */
|
||||
set_bit(STATUS_FW_ERROR, &priv->status);
|
||||
set_bit(STATUS_FW_ERROR, &priv->shrd->status);
|
||||
|
||||
/* Cancel currently queued command. */
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
|
||||
|
||||
iwlagn_abort_notification_waits(priv);
|
||||
|
||||
/* Keep the restart process from trying to send host
|
||||
* commands by clearing the ready bit */
|
||||
clear_bit(STATUS_READY, &priv->status);
|
||||
clear_bit(STATUS_READY, &priv->shrd->status);
|
||||
|
||||
wake_up_interruptible(&priv->wait_command_queue);
|
||||
|
||||
|
@ -891,63 +891,26 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
|||
priv->reload_count = 0;
|
||||
}
|
||||
|
||||
if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
|
||||
if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
|
||||
if (iwlagn_mod_params.restart_fw) {
|
||||
IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
|
||||
IWL_DEBUG_FW_ERRORS(priv,
|
||||
"Restarting adapter due to uCode error.\n");
|
||||
queue_work(priv->workqueue, &priv->restart);
|
||||
queue_work(priv->shrd->workqueue, &priv->restart);
|
||||
} else
|
||||
IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
|
||||
IWL_DEBUG_FW_ERRORS(priv,
|
||||
"Detected FW error, but not restarting\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_irq_handle_error - called for HW or SW error interrupt from card
|
||||
*/
|
||||
void iwl_irq_handle_error(struct iwl_priv *priv)
|
||||
{
|
||||
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
|
||||
if (priv->cfg->internal_wimax_coex &&
|
||||
(!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
|
||||
APMS_CLK_VAL_MRB_FUNC_MODE) ||
|
||||
(iwl_read_prph(priv, APMG_PS_CTRL_REG) &
|
||||
APMG_PS_CTRL_VAL_RESET_REQ))) {
|
||||
/*
|
||||
* Keep the restart process from trying to send host
|
||||
* commands by clearing the ready bit.
|
||||
*/
|
||||
clear_bit(STATUS_READY, &priv->status);
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
wake_up_interruptible(&priv->wait_command_queue);
|
||||
IWL_ERR(priv, "RF is used by WiMAX\n");
|
||||
return;
|
||||
}
|
||||
|
||||
IWL_ERR(priv, "Loaded firmware version: %s\n",
|
||||
priv->hw->wiphy->fw_version);
|
||||
|
||||
iwl_dump_nic_error_log(priv);
|
||||
iwl_dump_csr(priv);
|
||||
iwl_dump_fh(priv, NULL, false);
|
||||
iwl_dump_nic_event_log(priv, false, NULL, false);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
|
||||
iwl_print_rx_config_cmd(priv,
|
||||
&priv->contexts[IWL_RXON_CTX_BSS]);
|
||||
#endif
|
||||
|
||||
iwlagn_fw_error(priv, false);
|
||||
}
|
||||
|
||||
static int iwl_apm_stop_master(struct iwl_priv *priv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* stop device's busmaster DMA activity */
|
||||
iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
||||
iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
||||
|
||||
ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
||||
ret = iwl_poll_bit(bus(priv), CSR_RESET,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
|
||||
if (ret)
|
||||
IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
|
||||
|
@ -961,13 +924,13 @@ void iwl_apm_stop(struct iwl_priv *priv)
|
|||
{
|
||||
IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
|
||||
|
||||
clear_bit(STATUS_DEVICE_ENABLED, &priv->status);
|
||||
clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
|
||||
|
||||
/* Stop device's DMA activity */
|
||||
iwl_apm_stop_master(priv);
|
||||
|
||||
/* Reset the entire device */
|
||||
iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
|
||||
udelay(10);
|
||||
|
||||
|
@ -975,7 +938,7 @@ void iwl_apm_stop(struct iwl_priv *priv)
|
|||
* Clear "initialization complete" bit to move adapter from
|
||||
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
|
||||
*/
|
||||
iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -995,45 +958,45 @@ int iwl_apm_init(struct iwl_priv *priv)
|
|||
*/
|
||||
|
||||
/* Disable L0S exit timer (platform NMI Work/Around) */
|
||||
iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
|
||||
iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
|
||||
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
|
||||
|
||||
/*
|
||||
* Disable L0s without affecting L1;
|
||||
* don't wait for ICH L0s (ICH bug W/A)
|
||||
*/
|
||||
iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
|
||||
iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
|
||||
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
|
||||
|
||||
/* Set FH wait threshold to maximum (HW error during stress W/A) */
|
||||
iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
|
||||
iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
|
||||
|
||||
/*
|
||||
* Enable HAP INTA (interrupt from management bus) to
|
||||
* wake device's PCI Express link L1a -> L0s
|
||||
*/
|
||||
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
|
||||
|
||||
bus_apm_config(priv->bus);
|
||||
|
||||
/* Configure analog phase-lock-loop before activating to D0A */
|
||||
if (priv->cfg->base_params->pll_cfg_val)
|
||||
iwl_set_bit(priv, CSR_ANA_PLL_CFG,
|
||||
iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
|
||||
priv->cfg->base_params->pll_cfg_val);
|
||||
|
||||
/*
|
||||
* Set "initialization complete" bit to move adapter from
|
||||
* D0U* --> D0A* (powered-up active) state.
|
||||
*/
|
||||
iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
/*
|
||||
* Wait for clock stabilization; once stabilized, access to
|
||||
* device-internal resources is supported, e.g. iwl_write_prph()
|
||||
* and accesses to uCode SRAM.
|
||||
*/
|
||||
ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
|
||||
ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
|
||||
if (ret < 0) {
|
||||
|
@ -1048,14 +1011,14 @@ int iwl_apm_init(struct iwl_priv *priv)
|
|||
* do not disable clocks. This preserves any hardware bits already
|
||||
* set by default in "CLK_CTRL_REG" after reset.
|
||||
*/
|
||||
iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
|
||||
iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
|
||||
udelay(20);
|
||||
|
||||
/* Disable L1-Active */
|
||||
iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
|
||||
iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||
|
||||
set_bit(STATUS_DEVICE_ENABLED, &priv->status);
|
||||
set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -1069,7 +1032,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
|
|||
bool defer;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (priv->tx_power_user_lmt == tx_power && !force)
|
||||
return 0;
|
||||
|
@ -1089,7 +1052,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
if (!iwl_is_ready_rf(priv->shrd))
|
||||
return -EIO;
|
||||
|
||||
/* scan complete and commit_rxon use tx_power_next value,
|
||||
|
@ -1097,7 +1060,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
|
|||
priv->tx_power_next = tx_power;
|
||||
|
||||
/* do not set tx power when scanning or channel changing */
|
||||
defer = test_bit(STATUS_SCANNING, &priv->status) ||
|
||||
defer = test_bit(STATUS_SCANNING, &priv->shrd->status) ||
|
||||
memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
|
||||
if (defer && !force) {
|
||||
IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
|
||||
|
@ -1135,7 +1098,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
|
|||
IWL_DEBUG_INFO(priv, "BT coex %s\n",
|
||||
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
|
||||
|
||||
if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
|
||||
IWL_ERR(priv, "failed to send BT Coex Config\n");
|
||||
}
|
||||
|
@ -1148,22 +1111,17 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
|
|||
};
|
||||
|
||||
if (flags & CMD_ASYNC)
|
||||
return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
|
||||
return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
|
||||
CMD_ASYNC,
|
||||
sizeof(struct iwl_statistics_cmd),
|
||||
&statistics_cmd);
|
||||
else
|
||||
return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
|
||||
return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
|
||||
CMD_SYNC,
|
||||
sizeof(struct iwl_statistics_cmd),
|
||||
&statistics_cmd);
|
||||
}
|
||||
|
||||
void iwl_clear_isr_stats(struct iwl_priv *priv)
|
||||
{
|
||||
memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
|
||||
}
|
||||
|
||||
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
||||
const struct ieee80211_tx_queue_params *params)
|
||||
{
|
||||
|
@ -1174,7 +1132,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
|||
|
||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||
|
||||
if (!iwl_is_ready_rf(priv)) {
|
||||
if (!iwl_is_ready_rf(priv->shrd)) {
|
||||
IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1186,7 +1144,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
|||
|
||||
q = AC_NUM - 1 - queue;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->lock, flags);
|
||||
|
||||
/*
|
||||
* MULTI-FIXME
|
||||
|
@ -1204,7 +1162,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
|||
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
return 0;
|
||||
|
@ -1232,7 +1190,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
|
|||
struct ieee80211_vif *vif = ctx->vif;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
/*
|
||||
* This variable will be correct only when there's just
|
||||
|
@ -1276,11 +1234,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|||
|
||||
cancel_delayed_work_sync(&priv->hw_roc_disable_work);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
iwlagn_disable_roc(priv);
|
||||
|
||||
if (!iwl_is_ready_rf(priv)) {
|
||||
if (!iwl_is_ready_rf(priv->shrd)) {
|
||||
IWL_WARN(priv, "Try to add interface when device not ready\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
|
@ -1323,7 +1281,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|||
ctx->vif = NULL;
|
||||
priv->iw_mode = NL80211_IFTYPE_STATION;
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
return err;
|
||||
|
@ -1335,7 +1293,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
|
|||
{
|
||||
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (priv->scan_vif == vif) {
|
||||
iwl_scan_cancel_timeout(priv, 200);
|
||||
|
@ -1367,14 +1325,14 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
|
|||
|
||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
WARN_ON(ctx->vif != vif);
|
||||
ctx->vif = NULL;
|
||||
|
||||
iwl_teardown_interface(priv, vif, false);
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
||||
|
@ -1398,7 +1356,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
|
|||
{
|
||||
u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
|
||||
|
||||
if (iwl_debug_level & IWL_DL_TX) {
|
||||
if (iwl_get_debug_level(priv->shrd) & IWL_DL_TX) {
|
||||
if (!priv->tx_traffic) {
|
||||
priv->tx_traffic =
|
||||
kzalloc(traffic_size, GFP_KERNEL);
|
||||
|
@ -1406,7 +1364,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
|
|||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (iwl_debug_level & IWL_DL_RX) {
|
||||
if (iwl_get_debug_level(priv->shrd) & IWL_DL_RX) {
|
||||
if (!priv->rx_traffic) {
|
||||
priv->rx_traffic =
|
||||
kzalloc(traffic_size, GFP_KERNEL);
|
||||
|
@ -1433,7 +1391,7 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
|
|||
__le16 fc;
|
||||
u16 len;
|
||||
|
||||
if (likely(!(iwl_debug_level & IWL_DL_TX)))
|
||||
if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_TX)))
|
||||
return;
|
||||
|
||||
if (!priv->tx_traffic)
|
||||
|
@ -1457,7 +1415,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
|
|||
__le16 fc;
|
||||
u16 len;
|
||||
|
||||
if (likely(!(iwl_debug_level & IWL_DL_RX)))
|
||||
if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_RX)))
|
||||
return;
|
||||
|
||||
if (!priv->rx_traffic)
|
||||
|
@ -1614,7 +1572,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
|
|||
|
||||
static void iwl_force_rf_reset(struct iwl_priv *priv)
|
||||
{
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (!iwl_is_any_associated(priv)) {
|
||||
|
@ -1639,7 +1597,7 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
|
|||
{
|
||||
struct iwl_force_reset *force_reset;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return -EINVAL;
|
||||
|
||||
if (mode >= IWL_MAX_FORCE_RESET) {
|
||||
|
@ -1698,9 +1656,9 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
|
||||
newtype = ieee80211_iftype_p2p(newtype, newp2p);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
if (!ctx->vif || !iwl_is_ready_rf(priv)) {
|
||||
if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
|
||||
/*
|
||||
* Huh? But wait ... this can maybe happen when
|
||||
* we're in the middle of a firmware restart!
|
||||
|
@ -1762,36 +1720,16 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
err = 0;
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* On every watchdog tick we check (latest) time stamp. If it does not
|
||||
* change during timeout period and queue is not empty we reset firmware.
|
||||
*/
|
||||
static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
|
||||
static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
|
||||
{
|
||||
struct iwl_tx_queue *txq = &priv->txq[cnt];
|
||||
struct iwl_queue *q = &txq->q;
|
||||
unsigned long timeout;
|
||||
int ret;
|
||||
|
||||
if (q->read_ptr == q->write_ptr) {
|
||||
txq->time_stamp = jiffies;
|
||||
return 0;
|
||||
}
|
||||
|
||||
timeout = txq->time_stamp +
|
||||
msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
|
||||
|
||||
if (time_after(jiffies, timeout)) {
|
||||
IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
|
||||
q->id, priv->cfg->base_params->wd_timeout);
|
||||
ret = iwl_force_reset(priv, IWL_FW_RESET, false);
|
||||
if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
|
||||
int ret = iwl_force_reset(priv, IWL_FW_RESET, false);
|
||||
return (ret == -EAGAIN) ? 0 : 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1811,7 +1749,7 @@ void iwl_bg_watchdog(unsigned long data)
|
|||
int cnt;
|
||||
unsigned long timeout;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
timeout = priv->cfg->base_params->wd_timeout;
|
||||
|
@ -1819,14 +1757,14 @@ void iwl_bg_watchdog(unsigned long data)
|
|||
return;
|
||||
|
||||
/* monitor and check for stuck cmd queue */
|
||||
if (iwl_check_stuck_queue(priv, priv->cmd_queue))
|
||||
if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
|
||||
return;
|
||||
|
||||
/* monitor and check for other stuck queues */
|
||||
if (iwl_is_any_associated(priv)) {
|
||||
for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
|
||||
for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
|
||||
/* skip as we already checked the command queue */
|
||||
if (cnt == priv->cmd_queue)
|
||||
if (cnt == priv->shrd->cmd_queue)
|
||||
continue;
|
||||
if (iwl_check_stuck_queue(priv, cnt))
|
||||
return;
|
||||
|
@ -1865,12 +1803,12 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
|
|||
|
||||
quot = (usec / interval) &
|
||||
(iwl_beacon_time_mask_high(priv,
|
||||
priv->hw_params.beacon_time_tsf_bits) >>
|
||||
priv->hw_params.beacon_time_tsf_bits);
|
||||
hw_params(priv).beacon_time_tsf_bits) >>
|
||||
hw_params(priv).beacon_time_tsf_bits);
|
||||
rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
|
||||
priv->hw_params.beacon_time_tsf_bits);
|
||||
hw_params(priv).beacon_time_tsf_bits);
|
||||
|
||||
return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
|
||||
return (quot << hw_params(priv).beacon_time_tsf_bits) + rem;
|
||||
}
|
||||
|
||||
/* base is usually what we get from ucode with each received frame,
|
||||
|
@ -1880,64 +1818,50 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
|
|||
u32 addon, u32 beacon_interval)
|
||||
{
|
||||
u32 base_low = base & iwl_beacon_time_mask_low(priv,
|
||||
priv->hw_params.beacon_time_tsf_bits);
|
||||
hw_params(priv).beacon_time_tsf_bits);
|
||||
u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
|
||||
priv->hw_params.beacon_time_tsf_bits);
|
||||
hw_params(priv).beacon_time_tsf_bits);
|
||||
u32 interval = beacon_interval * TIME_UNIT;
|
||||
u32 res = (base & iwl_beacon_time_mask_high(priv,
|
||||
priv->hw_params.beacon_time_tsf_bits)) +
|
||||
hw_params(priv).beacon_time_tsf_bits)) +
|
||||
(addon & iwl_beacon_time_mask_high(priv,
|
||||
priv->hw_params.beacon_time_tsf_bits));
|
||||
hw_params(priv).beacon_time_tsf_bits));
|
||||
|
||||
if (base_low > addon_low)
|
||||
res += base_low - addon_low;
|
||||
else if (base_low < addon_low) {
|
||||
res += interval + base_low - addon_low;
|
||||
res += (1 << priv->hw_params.beacon_time_tsf_bits);
|
||||
res += (1 << hw_params(priv).beacon_time_tsf_bits);
|
||||
} else
|
||||
res += (1 << priv->hw_params.beacon_time_tsf_bits);
|
||||
res += (1 << hw_params(priv).beacon_time_tsf_bits);
|
||||
|
||||
return cpu_to_le32(res);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
int iwl_suspend(struct iwl_priv *priv)
|
||||
void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
u8 sta_id, u8 tid)
|
||||
{
|
||||
/*
|
||||
* This function is called when system goes into suspend state
|
||||
* mac80211 will call iwl_mac_stop() from the mac80211 suspend function
|
||||
* first but since iwl_mac_stop() has no knowledge of who the caller is,
|
||||
* it will not call apm_ops.stop() to stop the DMA operation.
|
||||
* Calling apm_ops.stop here to make sure we stop the DMA.
|
||||
*
|
||||
* But of course ... if we have configured WoWLAN then we did other
|
||||
* things already :-)
|
||||
*/
|
||||
if (!priv->wowlan)
|
||||
iwl_apm_stop(priv);
|
||||
struct ieee80211_vif *vif = priv->contexts[ctx].vif;
|
||||
u8 *addr = priv->stations[sta_id].sta.sta.addr;
|
||||
|
||||
return 0;
|
||||
if (ctx == NUM_IWL_RXON_CTX)
|
||||
ctx = priv->stations[sta_id].ctxid;
|
||||
vif = priv->contexts[ctx].vif;
|
||||
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
|
||||
}
|
||||
|
||||
int iwl_resume(struct iwl_priv *priv)
|
||||
void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
u8 sta_id, u8 tid)
|
||||
{
|
||||
bool hw_rfkill = false;
|
||||
struct ieee80211_vif *vif;
|
||||
u8 *addr = priv->stations[sta_id].sta.sta.addr;
|
||||
|
||||
iwl_enable_interrupts(priv);
|
||||
if (ctx == NUM_IWL_RXON_CTX)
|
||||
ctx = priv->stations[sta_id].ctxid;
|
||||
vif = priv->contexts[ctx].vif;
|
||||
|
||||
if (!(iwl_read32(priv, CSR_GP_CNTRL) &
|
||||
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
|
||||
hw_rfkill = true;
|
||||
|
||||
if (hw_rfkill)
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
else
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
|
||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
|
||||
|
||||
return 0;
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
|
|
@ -71,11 +71,6 @@
|
|||
struct iwl_host_cmd;
|
||||
struct iwl_cmd;
|
||||
|
||||
|
||||
#define IWLWIFI_VERSION "in-tree:"
|
||||
#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
|
||||
#define DRV_AUTHOR "<ilw@linux.intel.com>"
|
||||
|
||||
#define TIME_UNIT 1024
|
||||
|
||||
#define IWL_CMD(x) case x: return #x
|
||||
|
@ -101,23 +96,6 @@ struct iwl_lib_ops {
|
|||
void (*temperature)(struct iwl_priv *priv);
|
||||
};
|
||||
|
||||
struct iwl_mod_params {
|
||||
int sw_crypto; /* def: 0 = using hardware encryption */
|
||||
int num_of_queues; /* def: HW dependent */
|
||||
int disable_11n; /* def: 0 = 11n capabilities enabled */
|
||||
int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
|
||||
int antenna; /* def: 0 = both antennas (use diversity) */
|
||||
int restart_fw; /* def: 1 = restart firmware */
|
||||
bool plcp_check; /* def: true = enable plcp health check */
|
||||
bool ack_check; /* def: false = disable ack health check */
|
||||
bool wd_disable; /* def: false = enable stuck queue check */
|
||||
bool bt_coex_active; /* def: true = enable bt coex */
|
||||
int led_mode; /* def: 0 = system default */
|
||||
bool no_sleep_autoadjust; /* def: true = disable autoadjust */
|
||||
bool power_save; /* def: false = disable power save */
|
||||
int power_level; /* def: 1 = power level */
|
||||
};
|
||||
|
||||
/*
|
||||
* @max_ll_items: max number of OTP blocks
|
||||
* @shadow_ram_support: shadow support for OTP memory
|
||||
|
@ -222,16 +200,7 @@ struct iwl_ht_params {
|
|||
* We enable the driver to be backward compatible wrt API version. The
|
||||
* driver specifies which APIs it supports (with @ucode_api_max being the
|
||||
* highest and @ucode_api_min the lowest). Firmware will only be loaded if
|
||||
* it has a supported API version. The firmware's API version will be
|
||||
* stored in @iwl_priv, enabling the driver to make runtime changes based
|
||||
* on firmware version used.
|
||||
*
|
||||
* For example,
|
||||
* if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
|
||||
* Driver interacts with Firmware API version >= 2.
|
||||
* } else {
|
||||
* Driver interacts with Firmware API version 1.
|
||||
* }
|
||||
* it has a supported API version.
|
||||
*
|
||||
* The ideal usage of this infrastructure is to treat a new ucode API
|
||||
* release as a new hardware revision.
|
||||
|
@ -292,7 +261,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
|
|||
void iwl_connection_init_rx_config(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx);
|
||||
void iwl_set_rate(struct iwl_priv *priv);
|
||||
void iwl_irq_handle_error(struct iwl_priv *priv);
|
||||
int iwl_mac_add_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif);
|
||||
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
|
@ -398,22 +366,10 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
|
|||
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
|
||||
u32 addon, u32 beacon_interval);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int iwl_suspend(struct iwl_priv *priv);
|
||||
int iwl_resume(struct iwl_priv *priv);
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
|
||||
void __devexit iwl_remove(struct iwl_priv * priv);
|
||||
|
||||
/*****************************************************
|
||||
* Error Handling Debugging
|
||||
******************************************************/
|
||||
void iwl_dump_nic_error_log(struct iwl_priv *priv);
|
||||
int iwl_dump_nic_event_log(struct iwl_priv *priv,
|
||||
bool full_log, char **buf, bool display);
|
||||
void iwl_dump_csr(struct iwl_priv *priv);
|
||||
int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx);
|
||||
|
@ -424,79 +380,11 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
|
|||
}
|
||||
#endif
|
||||
|
||||
void iwl_clear_isr_stats(struct iwl_priv *priv);
|
||||
|
||||
/*****************************************************
|
||||
* GEOS
|
||||
******************************************************/
|
||||
int iwlcore_init_geos(struct iwl_priv *priv);
|
||||
void iwlcore_free_geos(struct iwl_priv *priv);
|
||||
|
||||
/*************** DRIVER STATUS FUNCTIONS *****/
|
||||
|
||||
#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
|
||||
/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
|
||||
#define STATUS_INT_ENABLED 2
|
||||
#define STATUS_RF_KILL_HW 3
|
||||
#define STATUS_CT_KILL 4
|
||||
#define STATUS_INIT 5
|
||||
#define STATUS_ALIVE 6
|
||||
#define STATUS_READY 7
|
||||
#define STATUS_TEMPERATURE 8
|
||||
#define STATUS_GEO_CONFIGURED 9
|
||||
#define STATUS_EXIT_PENDING 10
|
||||
#define STATUS_STATISTICS 12
|
||||
#define STATUS_SCANNING 13
|
||||
#define STATUS_SCAN_ABORTING 14
|
||||
#define STATUS_SCAN_HW 15
|
||||
#define STATUS_POWER_PMI 16
|
||||
#define STATUS_FW_ERROR 17
|
||||
#define STATUS_DEVICE_ENABLED 18
|
||||
#define STATUS_CHANNEL_SWITCH_PENDING 19
|
||||
|
||||
|
||||
static inline int iwl_is_ready(struct iwl_priv *priv)
|
||||
{
|
||||
/* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
|
||||
* set but EXIT_PENDING is not */
|
||||
return test_bit(STATUS_READY, &priv->status) &&
|
||||
test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
|
||||
!test_bit(STATUS_EXIT_PENDING, &priv->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_alive(struct iwl_priv *priv)
|
||||
{
|
||||
return test_bit(STATUS_ALIVE, &priv->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_init(struct iwl_priv *priv)
|
||||
{
|
||||
return test_bit(STATUS_INIT, &priv->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
|
||||
{
|
||||
return test_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_rfkill(struct iwl_priv *priv)
|
||||
{
|
||||
return iwl_is_rfkill_hw(priv);
|
||||
}
|
||||
|
||||
static inline int iwl_is_ctkill(struct iwl_priv *priv)
|
||||
{
|
||||
return test_bit(STATUS_CT_KILL, &priv->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_ready_rf(struct iwl_priv *priv)
|
||||
{
|
||||
|
||||
if (iwl_is_rfkill(priv))
|
||||
return 0;
|
||||
|
||||
return iwl_is_ready(priv);
|
||||
}
|
||||
int iwl_init_geos(struct iwl_priv *priv);
|
||||
void iwl_free_geos(struct iwl_priv *priv);
|
||||
|
||||
extern void iwl_send_bt_config(struct iwl_priv *priv);
|
||||
extern int iwl_send_statistics_request(struct iwl_priv *priv,
|
||||
|
|
|
@ -29,50 +29,51 @@
|
|||
#ifndef __iwl_debug_h__
|
||||
#define __iwl_debug_h__
|
||||
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-shared.h"
|
||||
|
||||
struct iwl_priv;
|
||||
extern u32 iwl_debug_level;
|
||||
|
||||
#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a)
|
||||
#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a)
|
||||
#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a)
|
||||
#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a)
|
||||
/*No matter what is m (priv, bus, trans), this will work */
|
||||
#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
|
||||
#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
|
||||
#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
|
||||
#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
|
||||
|
||||
#define iwl_print_hex_error(priv, p, len) \
|
||||
#define iwl_print_hex_error(m, p, len) \
|
||||
do { \
|
||||
print_hex_dump(KERN_ERR, "iwl data: ", \
|
||||
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
#define IWL_DEBUG(__priv, level, fmt, args...) \
|
||||
#define IWL_DEBUG(m, level, fmt, args...) \
|
||||
do { \
|
||||
if (iwl_get_debug_level(__priv) & (level)) \
|
||||
dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
|
||||
if (iwl_get_debug_level((m)->shrd) & (level)) \
|
||||
dev_printk(KERN_ERR, bus(m)->dev, \
|
||||
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
|
||||
__func__ , ## args); \
|
||||
} while (0)
|
||||
|
||||
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
|
||||
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
|
||||
do { \
|
||||
if ((iwl_get_debug_level(__priv) & (level)) && net_ratelimit()) \
|
||||
dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
|
||||
if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
|
||||
dev_printk(KERN_ERR, bus(m)->dev, \
|
||||
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
|
||||
__func__ , ## args); \
|
||||
} while (0)
|
||||
|
||||
#define iwl_print_hex_dump(priv, level, p, len) \
|
||||
#define iwl_print_hex_dump(m, level, p, len) \
|
||||
do { \
|
||||
if (iwl_get_debug_level(priv) & level) \
|
||||
if (iwl_get_debug_level((m)->shrd) & level) \
|
||||
print_hex_dump(KERN_DEBUG, "iwl data: ", \
|
||||
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define IWL_DEBUG(__priv, level, fmt, args...)
|
||||
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
|
||||
static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
|
||||
const void *p, u32 len)
|
||||
{}
|
||||
#define IWL_DEBUG(m, level, fmt, args...)
|
||||
#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
|
||||
#define iwl_print_hex_dump(m, level, p, len)
|
||||
#endif /* CONFIG_IWLWIFI_DEBUG */
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
|
@ -166,6 +167,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
|
|||
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
|
||||
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
|
||||
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
|
||||
#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
|
||||
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
|
||||
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
|
||||
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
|
||||
|
|
|
@ -254,7 +254,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
|||
sram = priv->dbgfs_sram_offset & ~0x3;
|
||||
|
||||
/* read the first u32 from sram */
|
||||
val = iwl_read_targ_mem(priv, sram);
|
||||
val = iwl_read_targ_mem(bus(priv), sram);
|
||||
|
||||
for (; len; len--) {
|
||||
/* put the address at the start of every line */
|
||||
|
@ -273,7 +273,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
|||
if (++offset == 4) {
|
||||
sram += 4;
|
||||
offset = 0;
|
||||
val = iwl_read_targ_mem(priv, sram);
|
||||
val = iwl_read_targ_mem(bus(priv), sram);
|
||||
}
|
||||
|
||||
/* put in extra spaces and split lines for human readability */
|
||||
|
@ -340,7 +340,8 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
|
|||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
struct iwl_station_entry *station;
|
||||
int max_sta = priv->hw_params.max_stations;
|
||||
struct iwl_tid_data *tid_data;
|
||||
int max_sta = hw_params(priv).max_stations;
|
||||
char *buf;
|
||||
int i, j, pos = 0;
|
||||
ssize_t ret;
|
||||
|
@ -363,22 +364,18 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
|
|||
i, station->sta.sta.addr,
|
||||
station->sta.station_flags_msk);
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"TID\tseq_num\ttxq_id\tframes\ttfds\t");
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"start_idx\tbitmap\t\t\trate_n_flags\n");
|
||||
"TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
|
||||
|
||||
for (j = 0; j < MAX_TID_COUNT; j++) {
|
||||
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
|
||||
tid_data = &priv->shrd->tid_data[i][j];
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
|
||||
j, station->tid[j].seq_number,
|
||||
station->tid[j].agg.txq_id,
|
||||
station->tid[j].agg.frame_count,
|
||||
station->tid[j].tfds_in_queue,
|
||||
station->tid[j].agg.start_idx,
|
||||
station->tid[j].agg.bitmap,
|
||||
station->tid[j].agg.rate_n_flags);
|
||||
"%d:\t%#x\t%#x\t%u\t%#x",
|
||||
j, tid_data->seq_number,
|
||||
tid_data->agg.txq_id,
|
||||
tid_data->tfds_in_queue,
|
||||
tid_data->agg.rate_n_flags);
|
||||
|
||||
if (station->tid[j].agg.wait_for_ba)
|
||||
if (tid_data->agg.wait_for_ba)
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
" - waitforba");
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\n");
|
||||
|
@ -442,46 +439,6 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
char *buf;
|
||||
int pos = 0;
|
||||
ssize_t ret = -ENOMEM;
|
||||
|
||||
ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
|
||||
if (buf) {
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
u32 event_log_flag;
|
||||
char buf[8];
|
||||
int buf_size;
|
||||
|
||||
memset(buf, 0, sizeof(buf));
|
||||
buf_size = min(count, sizeof(buf) - 1);
|
||||
if (copy_from_user(buf, user_buf, buf_size))
|
||||
return -EFAULT;
|
||||
if (sscanf(buf, "%d", &event_log_flag) != 1)
|
||||
return -EFAULT;
|
||||
if (event_log_flag == 1)
|
||||
iwl_dump_nic_event_log(priv, true, NULL, false);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
|
@ -492,7 +449,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
|
|||
char *buf;
|
||||
ssize_t ret;
|
||||
|
||||
if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
|
||||
if (!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status))
|
||||
return -EAGAIN;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
|
@ -562,45 +519,46 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
|
|||
const size_t bufsz = sizeof(buf);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
|
||||
test_bit(STATUS_HCMD_ACTIVE, &priv->status));
|
||||
test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
|
||||
test_bit(STATUS_INT_ENABLED, &priv->status));
|
||||
test_bit(STATUS_INT_ENABLED, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->status));
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
|
||||
test_bit(STATUS_CT_KILL, &priv->status));
|
||||
test_bit(STATUS_CT_KILL, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
|
||||
test_bit(STATUS_INIT, &priv->status));
|
||||
test_bit(STATUS_INIT, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
|
||||
test_bit(STATUS_ALIVE, &priv->status));
|
||||
test_bit(STATUS_ALIVE, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
|
||||
test_bit(STATUS_READY, &priv->status));
|
||||
test_bit(STATUS_READY, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
|
||||
test_bit(STATUS_TEMPERATURE, &priv->status));
|
||||
test_bit(STATUS_TEMPERATURE, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
|
||||
test_bit(STATUS_GEO_CONFIGURED, &priv->status));
|
||||
test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
|
||||
test_bit(STATUS_EXIT_PENDING, &priv->status));
|
||||
test_bit(STATUS_EXIT_PENDING, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
|
||||
test_bit(STATUS_STATISTICS, &priv->status));
|
||||
test_bit(STATUS_STATISTICS, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
|
||||
test_bit(STATUS_SCANNING, &priv->status));
|
||||
test_bit(STATUS_SCANNING, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
|
||||
test_bit(STATUS_SCAN_ABORTING, &priv->status));
|
||||
test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
|
||||
test_bit(STATUS_SCAN_HW, &priv->status));
|
||||
test_bit(STATUS_SCAN_HW, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
|
||||
test_bit(STATUS_POWER_PMI, &priv->status));
|
||||
test_bit(STATUS_POWER_PMI, &priv->shrd->status));
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
|
||||
test_bit(STATUS_FW_ERROR, &priv->status));
|
||||
test_bit(STATUS_FW_ERROR, &priv->shrd->status));
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
|
||||
static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos) {
|
||||
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
|
||||
int pos = 0;
|
||||
int cnt = 0;
|
||||
char *buf;
|
||||
|
@ -613,61 +571,25 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"Interrupt Statistics Report:\n");
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
|
||||
priv->isr_stats.hw);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
|
||||
priv->isr_stats.sw);
|
||||
if (priv->isr_stats.sw || priv->isr_stats.hw) {
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"\tLast Restarting Code: 0x%X\n",
|
||||
priv->isr_stats.err_code);
|
||||
}
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
|
||||
priv->isr_stats.sch);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
|
||||
priv->isr_stats.alive);
|
||||
#endif
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"HW RF KILL switch toggled:\t %u\n",
|
||||
priv->isr_stats.rfkill);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
|
||||
priv->isr_stats.ctkill);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
|
||||
priv->isr_stats.wakeup);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"Rx command responses:\t\t %u\n",
|
||||
priv->isr_stats.rx);
|
||||
for (cnt = 0; cnt < REPLY_MAX; cnt++) {
|
||||
if (priv->isr_stats.rx_handlers[cnt] > 0)
|
||||
if (priv->rx_handlers_stats[cnt] > 0)
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"\tRx handler[%36s]:\t\t %u\n",
|
||||
get_cmd_string(cnt),
|
||||
priv->isr_stats.rx_handlers[cnt]);
|
||||
priv->rx_handlers_stats[cnt]);
|
||||
}
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
|
||||
priv->isr_stats.tx);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
|
||||
priv->isr_stats.unhandled);
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
|
||||
static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
|
||||
char buf[8];
|
||||
int buf_size;
|
||||
u32 reset_flag;
|
||||
|
@ -679,7 +601,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
|
|||
if (sscanf(buf, "%x", &reset_flag) != 1)
|
||||
return -EFAULT;
|
||||
if (reset_flag == 0)
|
||||
iwl_clear_isr_stats(priv);
|
||||
memset(&priv->rx_handlers_stats[0], 0,
|
||||
sizeof(priv->rx_handlers_stats));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -814,14 +737,14 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
|
|||
if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
|
||||
return -EINVAL;
|
||||
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
if (!iwl_is_ready_rf(priv->shrd))
|
||||
return -EAGAIN;
|
||||
|
||||
priv->power_data.debug_sleep_level_override = value;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
iwl_power_update_mode(priv, true);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -870,188 +793,17 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
|
|||
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(sram);
|
||||
DEBUGFS_READ_FILE_OPS(wowlan_sram);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
|
||||
DEBUGFS_READ_FILE_OPS(nvm);
|
||||
DEBUGFS_READ_FILE_OPS(stations);
|
||||
DEBUGFS_READ_FILE_OPS(channels);
|
||||
DEBUGFS_READ_FILE_OPS(status);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
|
||||
DEBUGFS_READ_FILE_OPS(qos);
|
||||
DEBUGFS_READ_FILE_OPS(thermal_throttling);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
|
||||
DEBUGFS_READ_FILE_OPS(current_sleep_command);
|
||||
|
||||
static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
int pos = 0, ofs = 0;
|
||||
int cnt = 0, entry;
|
||||
struct iwl_tx_queue *txq;
|
||||
struct iwl_queue *q;
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
char *buf;
|
||||
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
|
||||
(priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
|
||||
const u8 *ptr;
|
||||
ssize_t ret;
|
||||
|
||||
if (!priv->txq) {
|
||||
IWL_ERR(priv, "txq not ready\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
IWL_ERR(priv, "Can not allocate buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
|
||||
for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
|
||||
txq = &priv->txq[cnt];
|
||||
q = &txq->q;
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"q[%d]: read_ptr: %u, write_ptr: %u\n",
|
||||
cnt, q->read_ptr, q->write_ptr);
|
||||
}
|
||||
if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) {
|
||||
ptr = priv->tx_traffic;
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"Tx Traffic idx: %u\n", priv->tx_traffic_idx);
|
||||
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
|
||||
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
|
||||
entry++, ofs += 16) {
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"0x%.4x ", ofs);
|
||||
hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
|
||||
buf + pos, bufsz - pos, 0);
|
||||
pos += strlen(buf + pos);
|
||||
if (bufsz - pos > 0)
|
||||
buf[pos++] = '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"read: %u, write: %u\n",
|
||||
rxq->read, rxq->write);
|
||||
|
||||
if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) {
|
||||
ptr = priv->rx_traffic;
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"Rx Traffic idx: %u\n", priv->rx_traffic_idx);
|
||||
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
|
||||
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
|
||||
entry++, ofs += 16) {
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"0x%.4x ", ofs);
|
||||
hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
|
||||
buf + pos, bufsz - pos, 0);
|
||||
pos += strlen(buf + pos);
|
||||
if (bufsz - pos > 0)
|
||||
buf[pos++] = '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
char buf[8];
|
||||
int buf_size;
|
||||
int traffic_log;
|
||||
|
||||
memset(buf, 0, sizeof(buf));
|
||||
buf_size = min(count, sizeof(buf) - 1);
|
||||
if (copy_from_user(buf, user_buf, buf_size))
|
||||
return -EFAULT;
|
||||
if (sscanf(buf, "%d", &traffic_log) != 1)
|
||||
return -EFAULT;
|
||||
if (traffic_log == 0)
|
||||
iwl_reset_traffic_log(priv);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos) {
|
||||
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
struct iwl_tx_queue *txq;
|
||||
struct iwl_queue *q;
|
||||
char *buf;
|
||||
int pos = 0;
|
||||
int cnt;
|
||||
int ret;
|
||||
const size_t bufsz = sizeof(char) * 64 *
|
||||
priv->cfg->base_params->num_of_queues;
|
||||
|
||||
if (!priv->txq) {
|
||||
IWL_ERR(priv, "txq not ready\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
|
||||
txq = &priv->txq[cnt];
|
||||
q = &txq->q;
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"hwq %.2d: read=%u write=%u stop=%d"
|
||||
" swq_id=%#.2x (ac %d/hwq %d)\n",
|
||||
cnt, q->read_ptr, q->write_ptr,
|
||||
!!test_bit(cnt, priv->queue_stopped),
|
||||
txq->swq_id, txq->swq_id & 3,
|
||||
(txq->swq_id >> 2) & 0x1f);
|
||||
if (cnt >= 4)
|
||||
continue;
|
||||
/* for the ACs, display the stop count too */
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
" stop-count: %d\n",
|
||||
atomic_read(&priv->queue_stop_count[cnt]));
|
||||
}
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos) {
|
||||
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
char buf[256];
|
||||
int pos = 0;
|
||||
const size_t bufsz = sizeof(buf);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
|
||||
rxq->read);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
|
||||
rxq->write);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
|
||||
rxq->free_count);
|
||||
if (rxq->rb_stts) {
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
|
||||
le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
|
||||
} else {
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"closed_rb_num: Not Allocated\n");
|
||||
}
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
}
|
||||
|
||||
static const char *fmt_value = " %-30s %10u\n";
|
||||
static const char *fmt_hex = " %-30s 0x%02X\n";
|
||||
static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
|
||||
|
@ -1096,7 +848,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
|
|||
struct statistics_rx_non_phy *delta_general, *max_general;
|
||||
struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
|
||||
|
||||
if (!iwl_is_alive(priv))
|
||||
if (!iwl_is_alive(priv->shrd))
|
||||
return -EAGAIN;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
|
@ -1522,7 +1274,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
|
|||
ssize_t ret;
|
||||
struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
|
||||
|
||||
if (!iwl_is_alive(priv))
|
||||
if (!iwl_is_alive(priv->shrd))
|
||||
return -EAGAIN;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
|
@ -1716,7 +1468,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
|
|||
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
|
||||
struct statistics_div *div, *accum_div, *delta_div, *max_div;
|
||||
|
||||
if (!iwl_is_alive(priv))
|
||||
if (!iwl_is_alive(priv->shrd))
|
||||
return -EAGAIN;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
|
@ -1829,16 +1581,16 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
|
|||
ssize_t ret;
|
||||
struct statistics_bt_activity *bt, *accum_bt;
|
||||
|
||||
if (!iwl_is_alive(priv))
|
||||
if (!iwl_is_alive(priv->shrd))
|
||||
return -EAGAIN;
|
||||
|
||||
if (!priv->bt_enable_flag)
|
||||
return -EINVAL;
|
||||
|
||||
/* make request to uCode to retrieve statistics information */
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
|
||||
if (ret) {
|
||||
IWL_ERR(priv,
|
||||
|
@ -1917,7 +1669,7 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
|
|||
(sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
|
||||
ssize_t ret;
|
||||
|
||||
if (!iwl_is_alive(priv))
|
||||
if (!iwl_is_alive(priv->shrd))
|
||||
return -EAGAIN;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
|
@ -2199,7 +1951,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
|
|||
const size_t bufsz = sizeof(buf);
|
||||
u32 pwrsave_status;
|
||||
|
||||
pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
|
||||
pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
|
||||
CSR_GP_REG_POWER_SAVE_STATUS_MSK;
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
|
||||
|
@ -2229,30 +1981,9 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
|
|||
return -EFAULT;
|
||||
|
||||
/* make request to uCode to retrieve statistics information */
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
iwl_send_statistics_request(priv, CMD_SYNC, true);
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_csr_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
char buf[8];
|
||||
int buf_size;
|
||||
int csr;
|
||||
|
||||
memset(buf, 0, sizeof(buf));
|
||||
buf_size = min(count, sizeof(buf) - 1);
|
||||
if (copy_from_user(buf, user_buf, buf_size))
|
||||
return -EFAULT;
|
||||
if (sscanf(buf, "%d", &csr) != 1)
|
||||
return -EFAULT;
|
||||
|
||||
iwl_dump_csr(priv);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -2333,25 +2064,6 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
|
|||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
char *buf;
|
||||
int pos = 0;
|
||||
ssize_t ret = -EFAULT;
|
||||
|
||||
ret = pos = iwl_dump_fh(priv, &buf, true);
|
||||
if (buf) {
|
||||
ret = simple_read_from_buffer(user_buf,
|
||||
count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos) {
|
||||
|
@ -2504,7 +2216,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
|
|||
if (sscanf(buf, "%d", &flush) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (iwl_is_rfkill(priv))
|
||||
if (iwl_is_rfkill(priv->shrd))
|
||||
return -EFAULT;
|
||||
|
||||
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
|
||||
|
@ -2628,9 +2340,6 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
|
|||
|
||||
DEBUGFS_READ_FILE_OPS(rx_statistics);
|
||||
DEBUGFS_READ_FILE_OPS(tx_statistics);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
|
||||
DEBUGFS_READ_FILE_OPS(rx_queue);
|
||||
DEBUGFS_READ_FILE_OPS(tx_queue);
|
||||
DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
|
||||
DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
|
||||
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
|
||||
|
@ -2639,9 +2348,7 @@ DEBUGFS_READ_FILE_OPS(chain_noise);
|
|||
DEBUGFS_READ_FILE_OPS(power_save_status);
|
||||
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
|
||||
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
|
||||
DEBUGFS_WRITE_FILE_OPS(csr);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
|
||||
DEBUGFS_READ_FILE_OPS(fh_reg);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
|
||||
|
@ -2682,11 +2389,10 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
|
|||
DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
|
||||
|
@ -2694,14 +2400,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
|
|||
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
|
||||
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
|
||||
DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
|
||||
DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
|
||||
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
|
||||
|
@ -2725,6 +2426,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
|
|||
&priv->disable_sens_cal);
|
||||
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
|
||||
&priv->disable_chain_noise_cal);
|
||||
|
||||
if (iwl_trans_dbgfs_register(trans(priv), dir_debug))
|
||||
goto err;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
|
|
@ -36,12 +36,12 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/ieee80211_radiotap.h>
|
||||
|
||||
#include "iwl-eeprom.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-led.h"
|
||||
|
@ -50,8 +50,7 @@
|
|||
#include "iwl-agn-tt.h"
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
#define DRV_NAME "iwlagn"
|
||||
#include "iwl-shared.h"
|
||||
|
||||
struct iwl_tx_queue;
|
||||
|
||||
|
@ -90,14 +89,6 @@ struct iwl_tx_queue;
|
|||
#define DEFAULT_SHORT_RETRY_LIMIT 7U
|
||||
#define DEFAULT_LONG_RETRY_LIMIT 4U
|
||||
|
||||
struct iwl_rx_mem_buffer {
|
||||
dma_addr_t page_dma;
|
||||
struct page *page;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define rxb_addr(r) page_address(r->page)
|
||||
|
||||
/* defined below */
|
||||
struct iwl_device_cmd;
|
||||
|
||||
|
@ -156,12 +147,6 @@ struct iwl_queue {
|
|||
* space less than this */
|
||||
};
|
||||
|
||||
/* One for each TFD */
|
||||
struct iwl_tx_info {
|
||||
struct sk_buff *skb;
|
||||
struct iwl_rxon_context *ctx;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue - Tx Queue for DMA
|
||||
* @q: generic Rx/Tx queue descriptor
|
||||
|
@ -173,6 +158,8 @@ struct iwl_tx_info {
|
|||
* @time_stamp: time (in jiffies) of last read_ptr change
|
||||
* @need_update: indicates need to update read/write index
|
||||
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
|
||||
* @sta_id: valid if sched_retry is set
|
||||
* @tid: valid if sched_retry is set
|
||||
*
|
||||
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
||||
* descriptors) and required locking structures.
|
||||
|
@ -185,12 +172,15 @@ struct iwl_tx_queue {
|
|||
struct iwl_tfd *tfds;
|
||||
struct iwl_device_cmd **cmd;
|
||||
struct iwl_cmd_meta *meta;
|
||||
struct iwl_tx_info *txb;
|
||||
struct sk_buff **skbs;
|
||||
unsigned long time_stamp;
|
||||
u8 need_update;
|
||||
u8 sched_retry;
|
||||
u8 active;
|
||||
u8 swq_id;
|
||||
|
||||
u16 sta_id;
|
||||
u16 tid;
|
||||
};
|
||||
|
||||
#define IWL_NUM_SCAN_RATES (2)
|
||||
|
@ -254,13 +244,6 @@ struct iwl_channel_info {
|
|||
#define IWL_DEFAULT_CMD_QUEUE_NUM 4
|
||||
#define IWL_IPAN_CMD_QUEUE_NUM 9
|
||||
|
||||
/*
|
||||
* This queue number is required for proper operation
|
||||
* because the ucode will stop/start the scheduler as
|
||||
* required.
|
||||
*/
|
||||
#define IWL_IPAN_MCAST_QUEUE 8
|
||||
|
||||
#define IEEE80211_DATA_LEN 2304
|
||||
#define IEEE80211_4ADDR_LEN 30
|
||||
#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
|
||||
|
@ -334,81 +317,11 @@ struct iwl_host_cmd {
|
|||
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
|
||||
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
|
||||
|
||||
/**
|
||||
* struct iwl_rx_queue - Rx queue
|
||||
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
|
||||
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
|
||||
* @read: Shared index to newest available Rx buffer
|
||||
* @write: Shared index to oldest written Rx packet
|
||||
* @free_count: Number of pre-allocated buffers in rx_free
|
||||
* @rx_free: list of free SKBs for use
|
||||
* @rx_used: List of Rx buffers with no SKB
|
||||
* @need_update: flag to indicate we need to update read/write index
|
||||
* @rb_stts: driver's pointer to receive buffer status
|
||||
* @rb_stts_dma: bus address of receive buffer status
|
||||
*
|
||||
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
|
||||
*/
|
||||
struct iwl_rx_queue {
|
||||
__le32 *bd;
|
||||
dma_addr_t bd_dma;
|
||||
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
|
||||
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
|
||||
u32 read;
|
||||
u32 write;
|
||||
u32 free_count;
|
||||
u32 write_actual;
|
||||
struct list_head rx_free;
|
||||
struct list_head rx_used;
|
||||
int need_update;
|
||||
struct iwl_rb_status *rb_stts;
|
||||
dma_addr_t rb_stts_dma;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define IWL_SUPPORTED_RATES_IE_LEN 8
|
||||
|
||||
#define MAX_TID_COUNT 9
|
||||
|
||||
#define IWL_INVALID_RATE 0xFF
|
||||
#define IWL_INVALID_VALUE -1
|
||||
|
||||
/**
|
||||
* struct iwl_ht_agg -- aggregation status while waiting for block-ack
|
||||
* @txq_id: Tx queue used for Tx attempt
|
||||
* @frame_count: # frames attempted by Tx command
|
||||
* @wait_for_ba: Expect block-ack before next Tx reply
|
||||
* @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
|
||||
* @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
|
||||
* @bitmap1: High order, one bit for each frame pending ACK in Tx window
|
||||
* @rate_n_flags: Rate at which Tx was attempted
|
||||
*
|
||||
* If REPLY_TX indicates that aggregation was attempted, driver must wait
|
||||
* for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
|
||||
* until block ack arrives.
|
||||
*/
|
||||
struct iwl_ht_agg {
|
||||
u16 txq_id;
|
||||
u16 frame_count;
|
||||
u16 wait_for_ba;
|
||||
u16 start_idx;
|
||||
u64 bitmap;
|
||||
u32 rate_n_flags;
|
||||
#define IWL_AGG_OFF 0
|
||||
#define IWL_AGG_ON 1
|
||||
#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
|
||||
#define IWL_EMPTYING_HW_QUEUE_DELBA 3
|
||||
u8 state;
|
||||
u8 tx_fifo;
|
||||
};
|
||||
|
||||
|
||||
struct iwl_tid_data {
|
||||
u16 seq_number; /* agn only */
|
||||
u16 tfds_in_queue;
|
||||
struct iwl_ht_agg agg;
|
||||
};
|
||||
|
||||
union iwl_ht_rate_supp {
|
||||
u16 rates;
|
||||
struct {
|
||||
|
@ -459,7 +372,6 @@ struct iwl_qos_info {
|
|||
*/
|
||||
struct iwl_station_entry {
|
||||
struct iwl_addsta_cmd sta;
|
||||
struct iwl_tid_data tid[MAX_TID_COUNT];
|
||||
u8 used, ctxid;
|
||||
struct iwl_link_quality_cmd *lq;
|
||||
};
|
||||
|
@ -647,54 +559,6 @@ struct iwl_sensitivity_ranges {
|
|||
#define CELSIUS_TO_KELVIN(x) ((x)+273)
|
||||
|
||||
|
||||
/**
|
||||
* struct iwl_hw_params
|
||||
* @max_txq_num: Max # Tx queues supported
|
||||
* @scd_bc_tbls_size: size of scheduler byte count tables
|
||||
* @tfd_size: TFD size
|
||||
* @tx/rx_chains_num: Number of TX/RX chains
|
||||
* @valid_tx/rx_ant: usable antennas
|
||||
* @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
|
||||
* @max_rxq_log: Log-base-2 of max_rxq_size
|
||||
* @rx_page_order: Rx buffer page order
|
||||
* @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
|
||||
* @max_stations:
|
||||
* @ht40_channel: is 40MHz width possible in band 2.4
|
||||
* BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
|
||||
* @sw_crypto: 0 for hw, 1 for sw
|
||||
* @max_xxx_size: for ucode uses
|
||||
* @ct_kill_threshold: temperature threshold
|
||||
* @beacon_time_tsf_bits: number of valid tsf bits for beacon time
|
||||
* @calib_init_cfg: setup initial calibrations for the hw
|
||||
* @calib_rt_cfg: setup runtime calibrations for the hw
|
||||
* @struct iwl_sensitivity_ranges: range of sensitivity values
|
||||
*/
|
||||
struct iwl_hw_params {
|
||||
u8 max_txq_num;
|
||||
u16 scd_bc_tbls_size;
|
||||
u32 tfd_size;
|
||||
u8 tx_chains_num;
|
||||
u8 rx_chains_num;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u16 max_rxq_size;
|
||||
u16 max_rxq_log;
|
||||
u32 rx_page_order;
|
||||
u8 max_stations;
|
||||
u8 ht40_channel;
|
||||
u8 max_beacon_itrvl; /* in 1024 ms */
|
||||
u32 max_inst_size;
|
||||
u32 max_data_size;
|
||||
u32 ct_kill_threshold; /* value in hw-dependent units */
|
||||
u32 ct_kill_exit_threshold; /* value in hw-dependent units */
|
||||
/* for 1000, 6000 series and up */
|
||||
u16 beacon_time_tsf_bits;
|
||||
u32 calib_init_cfg;
|
||||
u32 calib_rt_cfg;
|
||||
const struct iwl_sensitivity_ranges *sens;
|
||||
};
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Functions implemented in core module which are forward declared here
|
||||
|
@ -710,26 +574,6 @@ struct iwl_hw_params {
|
|||
****************************************************************************/
|
||||
extern void iwl_update_chain_flags(struct iwl_priv *priv);
|
||||
extern const u8 iwl_bcast_addr[ETH_ALEN];
|
||||
extern int iwl_queue_space(const struct iwl_queue *q);
|
||||
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
|
||||
{
|
||||
return q->write_ptr >= q->read_ptr ?
|
||||
(i >= q->read_ptr && i < q->write_ptr) :
|
||||
!(i < q->read_ptr && i >= q->write_ptr);
|
||||
}
|
||||
|
||||
|
||||
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
|
||||
{
|
||||
return index & (q->n_window - 1);
|
||||
}
|
||||
|
||||
|
||||
struct iwl_dma_ptr {
|
||||
dma_addr_t dma;
|
||||
void *addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
#define IWL_OPERATION_MODE_AUTO 0
|
||||
#define IWL_OPERATION_MODE_HT_ONLY 1
|
||||
|
@ -897,22 +741,6 @@ enum iwl_pa_type {
|
|||
IWL_PA_INTERNAL = 1,
|
||||
};
|
||||
|
||||
/* interrupt statistics */
|
||||
struct isr_statistics {
|
||||
u32 hw;
|
||||
u32 sw;
|
||||
u32 err_code;
|
||||
u32 sch;
|
||||
u32 alive;
|
||||
u32 rfkill;
|
||||
u32 ctkill;
|
||||
u32 wakeup;
|
||||
u32 rx;
|
||||
u32 rx_handlers[REPLY_MAX];
|
||||
u32 tx;
|
||||
u32 unhandled;
|
||||
};
|
||||
|
||||
/* reply_tx_statistics (for _agn devices) */
|
||||
struct reply_tx_error_statistics {
|
||||
u32 pp_delay;
|
||||
|
@ -1114,20 +942,9 @@ struct iwl_notification_wait {
|
|||
bool triggered, aborted;
|
||||
};
|
||||
|
||||
enum iwl_rxon_context_id {
|
||||
IWL_RXON_CTX_BSS,
|
||||
IWL_RXON_CTX_PAN,
|
||||
|
||||
NUM_IWL_RXON_CTX
|
||||
};
|
||||
|
||||
struct iwl_rxon_context {
|
||||
struct ieee80211_vif *vif;
|
||||
|
||||
const u8 *ac_to_fifo;
|
||||
const u8 *ac_to_queue;
|
||||
u8 mcast_queue;
|
||||
|
||||
/*
|
||||
* We could use the vif to indicate active, but we
|
||||
* also need it to be active during disabling when
|
||||
|
@ -1175,6 +992,9 @@ struct iwl_rxon_context {
|
|||
u8 extension_chan_offset;
|
||||
} ht;
|
||||
|
||||
u8 bssid[ETH_ALEN];
|
||||
bool preauth_bssid;
|
||||
|
||||
bool last_tx_rejected;
|
||||
};
|
||||
|
||||
|
@ -1203,16 +1023,17 @@ struct iwl_testmode_trace {
|
|||
};
|
||||
#endif
|
||||
|
||||
/* uCode ownership */
|
||||
#define IWL_OWNERSHIP_DRIVER 0
|
||||
#define IWL_OWNERSHIP_TM 1
|
||||
|
||||
struct iwl_priv {
|
||||
|
||||
/*data shared among all the driver's layers */
|
||||
struct iwl_shared _shrd;
|
||||
struct iwl_shared *shrd;
|
||||
|
||||
/* ieee device used by generic ieee processing code */
|
||||
struct ieee80211_hw *hw;
|
||||
struct ieee80211_channel *ieee_channels;
|
||||
struct ieee80211_rate *ieee_rates;
|
||||
struct kmem_cache *tx_cmd_pool;
|
||||
struct iwl_cfg *cfg;
|
||||
|
||||
enum ieee80211_band band;
|
||||
|
@ -1238,6 +1059,9 @@ struct iwl_priv {
|
|||
/* jiffies when last recovery from statistics was performed */
|
||||
unsigned long rx_statistics_jiffies;
|
||||
|
||||
/*counters */
|
||||
u32 rx_handlers_stats[REPLY_MAX];
|
||||
|
||||
/* force reset */
|
||||
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
|
||||
|
||||
|
@ -1268,21 +1092,12 @@ struct iwl_priv {
|
|||
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
|
||||
u8 mgmt_tx_ant;
|
||||
|
||||
/* spinlock */
|
||||
spinlock_t lock; /* protect general shared data */
|
||||
spinlock_t hcmd_lock; /* protect hcmd */
|
||||
spinlock_t reg_lock; /* protect hw register access */
|
||||
struct mutex mutex;
|
||||
|
||||
/*TODO: remove these pointers - use bus(priv) instead */
|
||||
struct iwl_bus *bus; /* bus specific data */
|
||||
struct iwl_trans trans;
|
||||
|
||||
/* microcode/device supports multiple contexts */
|
||||
u8 valid_contexts;
|
||||
|
||||
/* command queue number */
|
||||
u8 cmd_queue;
|
||||
|
||||
/* max number of station keys */
|
||||
u8 sta_key_max_num;
|
||||
|
||||
|
@ -1296,9 +1111,6 @@ struct iwl_priv {
|
|||
u32 ucode_ver; /* version of ucode, copy of
|
||||
iwl_ucode.ver */
|
||||
|
||||
/* uCode owner: default: IWL_OWNERSHIP_DRIVER */
|
||||
u8 ucode_owner;
|
||||
|
||||
struct fw_img ucode_rt;
|
||||
struct fw_img ucode_init;
|
||||
struct fw_img ucode_wowlan;
|
||||
|
@ -1334,48 +1146,21 @@ struct iwl_priv {
|
|||
|
||||
int activity_timer_active;
|
||||
|
||||
/* Rx and Tx DMA processing queues */
|
||||
struct iwl_rx_queue rxq;
|
||||
struct iwl_tx_queue *txq;
|
||||
unsigned long txq_ctx_active_msk;
|
||||
struct iwl_dma_ptr kw; /* keep warm address */
|
||||
struct iwl_dma_ptr scd_bc_tbls;
|
||||
|
||||
u32 scd_base_addr; /* scheduler sram base address */
|
||||
|
||||
unsigned long status;
|
||||
|
||||
/* counts mgmt, ctl, and data packets */
|
||||
struct traffic_stats tx_stats;
|
||||
struct traffic_stats rx_stats;
|
||||
|
||||
/* counts interrupts */
|
||||
struct isr_statistics isr_stats;
|
||||
|
||||
struct iwl_power_mgr power_data;
|
||||
struct iwl_tt_mgmt thermal_throttle;
|
||||
|
||||
/* station table variables */
|
||||
|
||||
/* Note: if lock and sta_lock are needed, lock must be acquired first */
|
||||
spinlock_t sta_lock;
|
||||
int num_stations;
|
||||
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
|
||||
unsigned long ucode_key_table;
|
||||
|
||||
/* queue refcounts */
|
||||
#define IWL_MAX_HW_QUEUES 32
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
/* for each AC */
|
||||
atomic_t queue_stop_count[4];
|
||||
|
||||
/* Indication if ieee80211_ops->open has been called */
|
||||
u8 is_open;
|
||||
|
||||
u8 mac80211_registered;
|
||||
|
||||
bool wowlan;
|
||||
|
||||
/* eeprom -- this is in the card's little endian byte order */
|
||||
u8 *eeprom;
|
||||
int nvm_device_type;
|
||||
|
@ -1411,14 +1196,6 @@ struct iwl_priv {
|
|||
} accum_stats, delta_stats, max_delta_stats;
|
||||
#endif
|
||||
|
||||
/* INT ICT Table */
|
||||
__le32 *ict_tbl;
|
||||
void *ict_tbl_vir;
|
||||
dma_addr_t ict_tbl_dma;
|
||||
dma_addr_t aligned_ict_tbl_dma;
|
||||
int ict_index;
|
||||
u32 inta;
|
||||
bool use_ict;
|
||||
/*
|
||||
* reporting the number of tids has AGG on. 0 means
|
||||
* no AGGREGATION
|
||||
|
@ -1475,15 +1252,8 @@ struct iwl_priv {
|
|||
struct iwl_rxon_context *cur_rssi_ctx;
|
||||
bool bt_is_sco;
|
||||
|
||||
struct iwl_hw_params hw_params;
|
||||
|
||||
u32 inta_mask;
|
||||
|
||||
struct workqueue_struct *workqueue;
|
||||
|
||||
struct work_struct restart;
|
||||
struct work_struct scan_completed;
|
||||
struct work_struct rx_replenish;
|
||||
struct work_struct abort_scan;
|
||||
|
||||
struct work_struct beacon_update;
|
||||
|
@ -1499,8 +1269,6 @@ struct iwl_priv {
|
|||
struct work_struct bt_full_concurrency;
|
||||
struct work_struct bt_runtime_config;
|
||||
|
||||
struct tasklet_struct irq_tasklet;
|
||||
|
||||
struct delayed_work scan_check;
|
||||
|
||||
/* TX Power */
|
||||
|
@ -1509,12 +1277,6 @@ struct iwl_priv {
|
|||
s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
|
||||
s8 tx_power_next;
|
||||
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
/* debugging info */
|
||||
u32 debug_level; /* per device debugging will override global
|
||||
iwl_debug_level if set */
|
||||
#endif /* CONFIG_IWLWIFI_DEBUG */
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
/* debugfs */
|
||||
u16 tx_traffic_idx;
|
||||
|
@ -1552,47 +1314,7 @@ struct iwl_priv {
|
|||
bool have_rekey_data;
|
||||
}; /*iwl_priv */
|
||||
|
||||
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
|
||||
{
|
||||
set_bit(txq_id, &priv->txq_ctx_active_msk);
|
||||
}
|
||||
|
||||
static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
|
||||
{
|
||||
clear_bit(txq_id, &priv->txq_ctx_active_msk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
/*
|
||||
* iwl_get_debug_level: Return active debug level for device
|
||||
*
|
||||
* Using sysfs it is possible to set per device debug level. This debug
|
||||
* level will be used if set, otherwise the global debug level which can be
|
||||
* set via module parameter is used.
|
||||
*/
|
||||
static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
|
||||
{
|
||||
if (priv->debug_level)
|
||||
return priv->debug_level;
|
||||
else
|
||||
return iwl_debug_level;
|
||||
}
|
||||
#else
|
||||
static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
|
||||
{
|
||||
return iwl_debug_level;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
|
||||
int txq_id, int idx)
|
||||
{
|
||||
if (priv->txq[txq_id].txb[idx].skb)
|
||||
return (struct ieee80211_hdr *)priv->txq[txq_id].
|
||||
txb[idx].skb->data;
|
||||
return NULL;
|
||||
}
|
||||
extern struct iwl_mod_params iwlagn_mod_params;
|
||||
|
||||
static inline struct iwl_rxon_context *
|
||||
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
|
||||
|
@ -1659,13 +1381,4 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
|
|||
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
|
||||
{
|
||||
__free_pages(page, priv->hw_params.rx_page_order);
|
||||
}
|
||||
|
||||
static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
|
||||
{
|
||||
free_pages(page, priv->hw_params.rx_page_order);
|
||||
}
|
||||
#endif /* __iwl_dev_h__ */
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
struct iwl_priv;
|
||||
|
||||
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(name, proto, ...) \
|
||||
|
|
|
@ -155,11 +155,11 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
|
|||
|
||||
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
|
||||
/* Request semaphore */
|
||||
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
|
||||
|
||||
/* See if we got it */
|
||||
ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
|
||||
EEPROM_SEM_TIMEOUT);
|
||||
|
@ -176,14 +176,14 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
|
|||
|
||||
static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
|
||||
|
||||
}
|
||||
|
||||
static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
|
||||
{
|
||||
u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
|
||||
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
|
||||
int ret = 0;
|
||||
|
||||
IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
|
||||
|
@ -216,17 +216,17 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
|
|||
|
||||
static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
|
||||
{
|
||||
iwl_read32(priv, CSR_OTP_GP_REG);
|
||||
iwl_read32(bus(priv), CSR_OTP_GP_REG);
|
||||
|
||||
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
|
||||
iwl_clear_bit(priv, CSR_OTP_GP_REG,
|
||||
iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
|
||||
else
|
||||
iwl_set_bit(priv, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
|
||||
}
|
||||
|
||||
static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
|
||||
static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
|
||||
{
|
||||
u32 otpgp;
|
||||
int nvm_type;
|
||||
|
@ -243,7 +243,7 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
|
|||
nvm_type = NVM_DEVICE_TYPE_EEPROM;
|
||||
break;
|
||||
default:
|
||||
otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
|
||||
otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
|
||||
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
|
||||
nvm_type = NVM_DEVICE_TYPE_OTP;
|
||||
else
|
||||
|
@ -258,22 +258,22 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
|
|||
int ret;
|
||||
|
||||
/* Enable 40MHz radio clock */
|
||||
iwl_write32(priv, CSR_GP_CNTRL,
|
||||
iwl_read32(priv, CSR_GP_CNTRL) |
|
||||
iwl_write32(bus(priv), CSR_GP_CNTRL,
|
||||
iwl_read32(bus(priv), CSR_GP_CNTRL) |
|
||||
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
/* wait for clock to be ready */
|
||||
ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
|
||||
ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
25000);
|
||||
if (ret < 0)
|
||||
IWL_ERR(priv, "Time out access OTP\n");
|
||||
else {
|
||||
iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
|
||||
iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_RESET_REQ);
|
||||
udelay(5);
|
||||
iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
|
||||
iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_RESET_REQ);
|
||||
|
||||
/*
|
||||
|
@ -281,7 +281,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
|
|||
* this is only applicable for HW with OTP shadow RAM
|
||||
*/
|
||||
if (priv->cfg->base_params->shadow_ram_support)
|
||||
iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
}
|
||||
return ret;
|
||||
|
@ -293,9 +293,9 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
|
|||
u32 r;
|
||||
u32 otpgp;
|
||||
|
||||
iwl_write32(priv, CSR_EEPROM_REG,
|
||||
iwl_write32(bus(priv), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
|
||||
ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
|
||||
ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
IWL_EEPROM_ACCESS_TIMEOUT);
|
||||
|
@ -303,13 +303,13 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
|
|||
IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
|
||||
return ret;
|
||||
}
|
||||
r = iwl_read32(priv, CSR_EEPROM_REG);
|
||||
r = iwl_read32(bus(priv), CSR_EEPROM_REG);
|
||||
/* check for ECC errors: */
|
||||
otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
|
||||
otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
|
||||
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
|
||||
/* stop in this case */
|
||||
/* set the uncorrectable OTP ECC bit for acknowledgement */
|
||||
iwl_set_bit(priv, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
|
||||
IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
|
||||
return -EINVAL;
|
||||
|
@ -317,7 +317,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
|
|||
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
|
||||
/* continue in this case */
|
||||
/* set the correctable OTP ECC bit for acknowledgement */
|
||||
iwl_set_bit(priv, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
|
||||
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
|
||||
}
|
||||
|
@ -424,14 +424,14 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
|
|||
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
||||
{
|
||||
__le16 *e;
|
||||
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
|
||||
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
|
||||
int sz;
|
||||
int ret;
|
||||
u16 addr;
|
||||
u16 validblockaddr = 0;
|
||||
u16 cache_addr = 0;
|
||||
|
||||
priv->nvm_device_type = iwlcore_get_nvm_type(priv, hw_rev);
|
||||
priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
|
||||
if (priv->nvm_device_type == -ENOENT)
|
||||
return -ENOENT;
|
||||
/* allocate eeprom */
|
||||
|
@ -469,11 +469,11 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
|||
ret = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
iwl_write32(priv, CSR_EEPROM_GP,
|
||||
iwl_read32(priv, CSR_EEPROM_GP) &
|
||||
iwl_write32(bus(priv), CSR_EEPROM_GP,
|
||||
iwl_read32(bus(priv), CSR_EEPROM_GP) &
|
||||
~CSR_EEPROM_GP_IF_OWNER_MSK);
|
||||
|
||||
iwl_set_bit(priv, CSR_OTP_GP_REG,
|
||||
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
|
||||
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
|
||||
/* traversing the linked list if no shadow ram supported */
|
||||
|
@ -498,10 +498,10 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
|||
for (addr = 0; addr < sz; addr += sizeof(u16)) {
|
||||
u32 r;
|
||||
|
||||
iwl_write32(priv, CSR_EEPROM_REG,
|
||||
iwl_write32(bus(priv), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
|
||||
|
||||
ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
|
||||
ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
CSR_EEPROM_REG_READ_VALID_MSK,
|
||||
IWL_EEPROM_ACCESS_TIMEOUT);
|
||||
|
@ -509,7 +509,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
|
|||
IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
|
||||
goto done;
|
||||
}
|
||||
r = iwl_read32(priv, CSR_EEPROM_REG);
|
||||
r = iwl_read32(bus(priv), CSR_EEPROM_REG);
|
||||
e[addr / 2] = cpu_to_le16(r >> 16);
|
||||
}
|
||||
}
|
||||
|
@ -838,7 +838,7 @@ void iwl_rf_config(struct iwl_priv *priv)
|
|||
|
||||
/* write radio config values to register */
|
||||
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
|
||||
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
|
||||
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
|
||||
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
|
||||
|
@ -850,7 +850,7 @@ void iwl_rf_config(struct iwl_priv *priv)
|
|||
WARN_ON(1);
|
||||
|
||||
/* set CSR_HW_CONFIG_REG for uCode use */
|
||||
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
|
||||
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
|
||||
}
|
||||
|
|
|
@ -301,7 +301,6 @@ void iwl_eeprom_free(struct iwl_priv *priv);
|
|||
int iwl_eeprom_check_version(struct iwl_priv *priv);
|
||||
int iwl_eeprom_check_sku(struct iwl_priv *priv);
|
||||
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
|
||||
int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
|
||||
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
|
||||
int iwl_init_channel_map(struct iwl_priv *priv);
|
||||
void iwl_free_channel_map(struct iwl_priv *priv);
|
||||
|
|
|
@ -63,6 +63,8 @@
|
|||
#ifndef __iwl_fh_h__
|
||||
#define __iwl_fh_h__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/****************************/
|
||||
/* Flow Handler Definitions */
|
||||
/****************************/
|
||||
|
@ -266,8 +268,6 @@
|
|||
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
|
||||
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
|
||||
|
||||
#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
|
||||
|
||||
/**
|
||||
* Rx Shared Status Registers (RSSR)
|
||||
*
|
||||
|
@ -422,10 +422,6 @@
|
|||
#define RX_FREE_BUFFERS 64
|
||||
#define RX_LOW_WATERMARK 8
|
||||
|
||||
/* Size of one Rx buffer in host DRAM */
|
||||
#define IWL_RX_BUF_SIZE_4K (4 * 1024)
|
||||
#define IWL_RX_BUF_SIZE_8K (8 * 1024)
|
||||
|
||||
/**
|
||||
* struct iwl_rb_status - reseve buffer status
|
||||
* host memory mapped FH registers
|
||||
|
@ -508,4 +504,16 @@ struct iwl_tfd {
|
|||
/* Keep Warm Size */
|
||||
#define IWL_KW_SIZE 0x1000 /* 4k */
|
||||
|
||||
/* Fixed (non-configurable) rx data from phy */
|
||||
|
||||
/**
|
||||
* struct iwlagn_schedq_bc_tbl scheduler byte count table
|
||||
* base physical address provided by SCD_DRAM_BASE_ADDR
|
||||
* @tfd_offset 0-12 - tx command byte count
|
||||
* 12-16 - station index
|
||||
*/
|
||||
struct iwlagn_scd_bc_tbl {
|
||||
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
|
||||
} __packed;
|
||||
|
||||
#endif /* !__iwl_fh_h__ */
|
||||
|
|
|
@ -64,99 +64,10 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
|
|||
return --index & (n_bd - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* we have 8 bits used like this:
|
||||
*
|
||||
* 7 6 5 4 3 2 1 0
|
||||
* | | | | | | | |
|
||||
* | | | | | | +-+-------- AC queue (0-3)
|
||||
* | | | | | |
|
||||
* | +-+-+-+-+------------ HW queue ID
|
||||
* |
|
||||
* +---------------------- unused
|
||||
*/
|
||||
static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
|
||||
{
|
||||
BUG_ON(ac > 3); /* only have 2 bits */
|
||||
BUG_ON(hwq > 31); /* only use 5 bits */
|
||||
|
||||
txq->swq_id = (hwq << 2) | ac;
|
||||
}
|
||||
|
||||
static inline void iwl_wake_queue(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
u8 queue = txq->swq_id;
|
||||
u8 ac = queue & 3;
|
||||
u8 hwq = (queue >> 2) & 0x1f;
|
||||
|
||||
if (test_and_clear_bit(hwq, priv->queue_stopped))
|
||||
if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
|
||||
ieee80211_wake_queue(priv->hw, ac);
|
||||
}
|
||||
|
||||
static inline void iwl_stop_queue(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
u8 queue = txq->swq_id;
|
||||
u8 ac = queue & 3;
|
||||
u8 hwq = (queue >> 2) & 0x1f;
|
||||
|
||||
if (!test_and_set_bit(hwq, priv->queue_stopped))
|
||||
if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
|
||||
ieee80211_stop_queue(priv->hw, ac);
|
||||
}
|
||||
|
||||
static inline void iwl_wake_any_queue(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx)
|
||||
{
|
||||
u8 ac;
|
||||
|
||||
for (ac = 0; ac < AC_NUM; ac++) {
|
||||
IWL_DEBUG_INFO(priv, "Queue Status: Q[%d] %s\n",
|
||||
ac, (atomic_read(&priv->queue_stop_count[ac]) > 0)
|
||||
? "stopped" : "awake");
|
||||
iwl_wake_queue(priv, &priv->txq[ctx->ac_to_queue[ac]]);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ieee80211_stop_queue
|
||||
#undef ieee80211_stop_queue
|
||||
#endif
|
||||
|
||||
#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
|
||||
|
||||
#ifdef ieee80211_wake_queue
|
||||
#undef ieee80211_wake_queue
|
||||
#endif
|
||||
|
||||
#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
|
||||
|
||||
static inline void iwl_disable_interrupts(struct iwl_priv *priv)
|
||||
{
|
||||
clear_bit(STATUS_INT_ENABLED, &priv->status);
|
||||
|
||||
/* disable interrupts from uCode/NIC to host */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* acknowledge/clear/reset any interrupts still pending
|
||||
* from uCode or flow handler (Rx/Tx DMA) */
|
||||
iwl_write32(priv, CSR_INT, 0xffffffff);
|
||||
iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
|
||||
IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
|
||||
{
|
||||
IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
|
||||
iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
|
||||
}
|
||||
|
||||
static inline void iwl_enable_interrupts(struct iwl_priv *priv)
|
||||
{
|
||||
IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
|
||||
set_bit(STATUS_INT_ENABLED, &priv->status);
|
||||
iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
|
||||
iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,46 +25,50 @@
|
|||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
*****************************************************************************/
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include "iwl-io.h"
|
||||
#include"iwl-csr.h"
|
||||
#include "iwl-debug.h"
|
||||
|
||||
#define IWL_POLL_INTERVAL 10 /* microseconds */
|
||||
|
||||
static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
|
||||
static inline void __iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
{
|
||||
iwl_write32(priv, reg, iwl_read32(priv, reg) | mask);
|
||||
iwl_write32(bus, reg, iwl_read32(bus, reg) | mask);
|
||||
}
|
||||
|
||||
static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
|
||||
static inline void __iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
{
|
||||
iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask);
|
||||
iwl_write32(bus, reg, iwl_read32(bus, reg) & ~mask);
|
||||
}
|
||||
|
||||
void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
|
||||
void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
__iwl_set_bit(priv, reg, mask);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
__iwl_set_bit(bus, reg, mask);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
|
||||
void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
__iwl_clear_bit(priv, reg, mask);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
__iwl_clear_bit(bus, reg, mask);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
|
||||
int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
|
||||
u32 bits, u32 mask, int timeout)
|
||||
{
|
||||
int t = 0;
|
||||
|
||||
do {
|
||||
if ((iwl_read32(priv, addr) & mask) == (bits & mask))
|
||||
if ((iwl_read32(bus, addr) & mask) == (bits & mask))
|
||||
return t;
|
||||
udelay(IWL_POLL_INTERVAL);
|
||||
t += IWL_POLL_INTERVAL;
|
||||
|
@ -73,14 +77,14 @@ int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int iwl_grab_nic_access_silent(struct iwl_priv *priv)
|
||||
int iwl_grab_nic_access_silent(struct iwl_bus *bus)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->reg_lock);
|
||||
lockdep_assert_held(&bus->reg_lock);
|
||||
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
__iwl_set_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/*
|
||||
* These bits say the device is running, and should keep running for
|
||||
|
@ -101,70 +105,70 @@ int iwl_grab_nic_access_silent(struct iwl_priv *priv)
|
|||
* 5000 series and later (including 1000 series) have non-volatile SRAM,
|
||||
* and do not save/restore SRAM when power cycling.
|
||||
*/
|
||||
ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
|
||||
ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
|
||||
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
|
||||
if (ret < 0) {
|
||||
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
|
||||
iwl_write32(bus, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_grab_nic_access(struct iwl_priv *priv)
|
||||
int iwl_grab_nic_access(struct iwl_bus *bus)
|
||||
{
|
||||
int ret = iwl_grab_nic_access_silent(priv);
|
||||
int ret = iwl_grab_nic_access_silent(bus);
|
||||
if (ret) {
|
||||
u32 val = iwl_read32(priv, CSR_GP_CNTRL);
|
||||
IWL_ERR(priv,
|
||||
u32 val = iwl_read32(bus, CSR_GP_CNTRL);
|
||||
IWL_ERR(bus,
|
||||
"MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_release_nic_access(struct iwl_priv *priv)
|
||||
void iwl_release_nic_access(struct iwl_bus *bus)
|
||||
{
|
||||
lockdep_assert_held(&priv->reg_lock);
|
||||
__iwl_clear_bit(priv, CSR_GP_CNTRL,
|
||||
lockdep_assert_held(&bus->reg_lock);
|
||||
__iwl_clear_bit(bus, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
}
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
|
||||
u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
|
||||
{
|
||||
u32 value;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
iwl_grab_nic_access(priv);
|
||||
value = iwl_read32(priv, reg);
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
value = iwl_read32(bus(bus), reg);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
|
||||
void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(priv)) {
|
||||
iwl_write32(priv, reg, value);
|
||||
iwl_release_nic_access(priv);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus)) {
|
||||
iwl_write32(bus, reg, value);
|
||||
iwl_release_nic_access(bus);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
|
||||
int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
|
||||
int timeout)
|
||||
{
|
||||
int t = 0;
|
||||
|
||||
do {
|
||||
if ((iwl_read_direct32(priv, addr) & mask) == mask)
|
||||
if ((iwl_read_direct32(bus, addr) & mask) == mask)
|
||||
return t;
|
||||
udelay(IWL_POLL_INTERVAL);
|
||||
t += IWL_POLL_INTERVAL;
|
||||
|
@ -173,122 +177,122 @@ int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg)
|
||||
static inline u32 __iwl_read_prph(struct iwl_bus *bus, u32 reg)
|
||||
{
|
||||
iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
|
||||
iwl_write32(bus, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
|
||||
rmb();
|
||||
return iwl_read32(priv, HBUS_TARG_PRPH_RDAT);
|
||||
return iwl_read32(bus, HBUS_TARG_PRPH_RDAT);
|
||||
}
|
||||
|
||||
static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
|
||||
static inline void __iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
|
||||
{
|
||||
iwl_write32(priv, HBUS_TARG_PRPH_WADDR,
|
||||
iwl_write32(bus, HBUS_TARG_PRPH_WADDR,
|
||||
((addr & 0x0000FFFF) | (3 << 24)));
|
||||
wmb();
|
||||
iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val);
|
||||
iwl_write32(bus, HBUS_TARG_PRPH_WDAT, val);
|
||||
}
|
||||
|
||||
u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
|
||||
u32 iwl_read_prph(struct iwl_bus *bus, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
iwl_grab_nic_access(priv);
|
||||
val = __iwl_read_prph(priv, reg);
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
val = __iwl_read_prph(bus, reg);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
|
||||
void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(priv)) {
|
||||
__iwl_write_prph(priv, addr, val);
|
||||
iwl_release_nic_access(priv);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus)) {
|
||||
__iwl_write_prph(bus, addr, val);
|
||||
iwl_release_nic_access(bus);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
|
||||
void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
iwl_grab_nic_access(priv);
|
||||
__iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask);
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
__iwl_write_prph(bus, reg, __iwl_read_prph(bus, reg) | mask);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
|
||||
void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
|
||||
u32 bits, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
iwl_grab_nic_access(priv);
|
||||
__iwl_write_prph(priv, reg,
|
||||
(__iwl_read_prph(priv, reg) & mask) | bits);
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
__iwl_write_prph(bus, reg,
|
||||
(__iwl_read_prph(bus, reg) & mask) | bits);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
|
||||
void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
iwl_grab_nic_access(priv);
|
||||
val = __iwl_read_prph(priv, reg);
|
||||
__iwl_write_prph(priv, reg, (val & ~mask));
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
val = __iwl_read_prph(bus, reg);
|
||||
__iwl_write_prph(bus, reg, (val & ~mask));
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
|
||||
void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
|
||||
void *buf, int words)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs;
|
||||
u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
iwl_grab_nic_access(priv);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
iwl_grab_nic_access(bus);
|
||||
|
||||
iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr);
|
||||
iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
|
||||
rmb();
|
||||
|
||||
for (offs = 0; offs < words; offs++)
|
||||
vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
|
||||
vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
|
||||
|
||||
iwl_release_nic_access(priv);
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
iwl_release_nic_access(bus);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
||||
u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
|
||||
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
_iwl_read_targ_mem_words(priv, addr, &value, 1);
|
||||
_iwl_read_targ_mem_words(bus, addr, &value, 1);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
|
||||
void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(priv)) {
|
||||
iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr);
|
||||
spin_lock_irqsave(&bus->reg_lock, flags);
|
||||
if (!iwl_grab_nic_access(bus)) {
|
||||
iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
|
||||
wmb();
|
||||
iwl_write32(priv, HBUS_TARG_MEM_WDAT, val);
|
||||
iwl_release_nic_access(priv);
|
||||
iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
|
||||
iwl_release_nic_access(bus);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->reg_lock, flags);
|
||||
spin_unlock_irqrestore(&bus->reg_lock, flags);
|
||||
}
|
||||
|
|
|
@ -29,65 +29,62 @@
|
|||
#ifndef __iwl_io_h__
|
||||
#define __iwl_io_h__
|
||||
|
||||
#include <linux/io.h>
|
||||
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-devtrace.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-bus.h"
|
||||
|
||||
static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
|
||||
static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite8(priv, ofs, val);
|
||||
bus_write8(priv->bus, ofs, val);
|
||||
trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
|
||||
bus_write8(bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
|
||||
static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite32(priv, ofs, val);
|
||||
bus_write32(priv->bus, ofs, val);
|
||||
trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
|
||||
bus_write32(bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
|
||||
static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
|
||||
{
|
||||
u32 val = bus_read32(priv->bus, ofs);
|
||||
trace_iwlwifi_dev_ioread32(priv, ofs, val);
|
||||
u32 val = bus_read32(bus, ofs);
|
||||
trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask);
|
||||
void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask);
|
||||
void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
|
||||
int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
|
||||
int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
|
||||
u32 bits, u32 mask, int timeout);
|
||||
int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
|
||||
int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
|
||||
int timeout);
|
||||
|
||||
int iwl_grab_nic_access_silent(struct iwl_priv *priv);
|
||||
int iwl_grab_nic_access(struct iwl_priv *priv);
|
||||
void iwl_release_nic_access(struct iwl_priv *priv);
|
||||
int iwl_grab_nic_access_silent(struct iwl_bus *bus);
|
||||
int iwl_grab_nic_access(struct iwl_bus *bus);
|
||||
void iwl_release_nic_access(struct iwl_bus *bus);
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg);
|
||||
void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value);
|
||||
u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
|
||||
void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
|
||||
|
||||
|
||||
u32 iwl_read_prph(struct iwl_priv *priv, u32 reg);
|
||||
void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val);
|
||||
void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
|
||||
void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
|
||||
u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
|
||||
void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
|
||||
void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
|
||||
u32 bits, u32 mask);
|
||||
void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
|
||||
void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
|
||||
|
||||
void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
|
||||
void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
|
||||
void *buf, int words);
|
||||
|
||||
#define iwl_read_targ_mem_words(priv, addr, buf, bufsize) \
|
||||
#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
|
||||
do { \
|
||||
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
|
||||
_iwl_read_targ_mem_words(priv, addr, buf, \
|
||||
_iwl_read_targ_mem_words(bus, addr, buf, \
|
||||
(bufsize) / sizeof(u32));\
|
||||
} while (0)
|
||||
|
||||
u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr);
|
||||
void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val);
|
||||
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
|
||||
void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
|
||||
#endif
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "iwl-agn.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-shared.h"
|
||||
|
||||
/* Throughput OFF time(ms) ON time (ms)
|
||||
* >300 25 25
|
||||
|
@ -70,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
|
|||
/* Set led register off */
|
||||
void iwlagn_led_enable(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
|
||||
iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -107,11 +108,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
|
|||
};
|
||||
u32 reg;
|
||||
|
||||
reg = iwl_read32(priv, CSR_LED_REG);
|
||||
reg = iwl_read32(bus(priv), CSR_LED_REG);
|
||||
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
|
||||
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
|
||||
iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
|
||||
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
}
|
||||
|
||||
/* Set led pattern command */
|
||||
|
@ -125,7 +126,7 @@ static int iwl_led_cmd(struct iwl_priv *priv,
|
|||
};
|
||||
int ret;
|
||||
|
||||
if (!test_bit(STATUS_READY, &priv->status))
|
||||
if (!test_bit(STATUS_READY, &priv->shrd->status))
|
||||
return -EBUSY;
|
||||
|
||||
if (priv->blink_on == on && priv->blink_off == off)
|
||||
|
|
|
@ -64,9 +64,11 @@
|
|||
#include <linux/pci-aspm.h>
|
||||
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-pci.h"
|
||||
|
||||
/* PCI registers */
|
||||
#define PCI_CFG_RETRY_TIMEOUT 0x041
|
||||
|
@ -91,6 +93,7 @@ static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
|
|||
{
|
||||
int pos;
|
||||
u16 pci_lnk_ctl;
|
||||
|
||||
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
|
||||
|
||||
pos = pci_pcie_cap(pci_dev);
|
||||
|
@ -120,21 +123,21 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
|
|||
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
|
||||
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
|
||||
/* L1-ASPM enabled; disable(!) L0S */
|
||||
iwl_set_bit(bus->drv_data, CSR_GIO_REG,
|
||||
iwl_set_bit(bus, CSR_GIO_REG,
|
||||
CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
|
||||
} else {
|
||||
/* L1-ASPM disabled; enable(!) L0S */
|
||||
iwl_clear_bit(bus->drv_data, CSR_GIO_REG,
|
||||
iwl_clear_bit(bus, CSR_GIO_REG,
|
||||
CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
|
||||
static void iwl_pci_set_drv_data(struct iwl_bus *bus, struct iwl_shared *shrd)
|
||||
{
|
||||
bus->drv_data = drv_data;
|
||||
pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
|
||||
bus->shrd = shrd;
|
||||
pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), shrd);
|
||||
}
|
||||
|
||||
static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
|
||||
|
@ -162,7 +165,7 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
|
|||
return val;
|
||||
}
|
||||
|
||||
static struct iwl_bus_ops pci_ops = {
|
||||
static const struct iwl_bus_ops bus_ops_pci = {
|
||||
.get_pm_support = iwl_pci_is_pm_supported,
|
||||
.apm_config = iwl_pci_apm_config,
|
||||
.set_drv_data = iwl_pci_set_drv_data,
|
||||
|
@ -256,6 +259,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|||
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
|
||||
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
|
||||
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
|
||||
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
|
||||
|
||||
/* 6x30 Series */
|
||||
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
|
||||
|
@ -328,6 +332,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|||
{IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
|
||||
{IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
|
||||
{IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
|
||||
{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
|
||||
|
||||
/* 2x30 Series */
|
||||
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
|
||||
|
@ -457,9 +462,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
bus->dev = &pdev->dev;
|
||||
bus->irq = pdev->irq;
|
||||
bus->ops = &pci_ops;
|
||||
bus->ops = &bus_ops_pci;
|
||||
|
||||
err = iwl_probe(bus, cfg);
|
||||
err = iwl_probe(bus, &trans_ops_pcie, cfg);
|
||||
if (err)
|
||||
goto out_disable_msi;
|
||||
return 0;
|
||||
|
@ -493,33 +498,33 @@ static void iwl_pci_down(struct iwl_bus *bus)
|
|||
|
||||
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct iwl_priv *priv = pci_get_drvdata(pdev);
|
||||
void *bus_specific = priv->bus->bus_specific;
|
||||
struct iwl_shared *shrd = pci_get_drvdata(pdev);
|
||||
struct iwl_bus *bus = shrd->bus;
|
||||
|
||||
iwl_remove(priv);
|
||||
iwl_remove(shrd->priv);
|
||||
|
||||
iwl_pci_down(bus_specific);
|
||||
iwl_pci_down(bus);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
static int iwl_pci_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct iwl_priv *priv = pci_get_drvdata(pdev);
|
||||
struct iwl_shared *shrd = pci_get_drvdata(pdev);
|
||||
|
||||
/* Before you put code here, think about WoWLAN. You cannot check here
|
||||
* whether WoWLAN is enabled or not, and your code will run even if
|
||||
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
|
||||
*/
|
||||
|
||||
return iwl_suspend(priv);
|
||||
return iwl_trans_suspend(shrd->trans);
|
||||
}
|
||||
|
||||
static int iwl_pci_resume(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct iwl_priv *priv = pci_get_drvdata(pdev);
|
||||
struct iwl_shared *shrd = pci_get_drvdata(pdev);
|
||||
|
||||
/* Before you put code here, think about WoWLAN. You cannot check here
|
||||
* whether WoWLAN is enabled or not, and your code will run even if
|
||||
|
@ -532,7 +537,7 @@ static int iwl_pci_resume(struct device *device)
|
|||
*/
|
||||
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
||||
|
||||
return iwl_resume(priv);
|
||||
return iwl_trans_resume(shrd->trans);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_pci_h__
|
||||
#define __iwl_pci_h__
|
||||
|
||||
|
||||
/* This file includes the declaration that are internal to the PCI
|
||||
* implementation of the bus layer
|
||||
*/
|
||||
|
||||
/* configuration for the _agn devices */
|
||||
extern struct iwl_cfg iwl5300_agn_cfg;
|
||||
extern struct iwl_cfg iwl5100_agn_cfg;
|
||||
extern struct iwl_cfg iwl5350_agn_cfg;
|
||||
extern struct iwl_cfg iwl5100_bgn_cfg;
|
||||
extern struct iwl_cfg iwl5100_abg_cfg;
|
||||
extern struct iwl_cfg iwl5150_agn_cfg;
|
||||
extern struct iwl_cfg iwl5150_abg_cfg;
|
||||
extern struct iwl_cfg iwl6005_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6005_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6005_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6005_2agn_sff_cfg;
|
||||
extern struct iwl_cfg iwl1030_bgn_cfg;
|
||||
extern struct iwl_cfg iwl1030_bg_cfg;
|
||||
extern struct iwl_cfg iwl6030_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6030_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6030_2bgn_cfg;
|
||||
extern struct iwl_cfg iwl6030_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6000i_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6000i_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6000i_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6000_3agn_cfg;
|
||||
extern struct iwl_cfg iwl6050_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6050_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6150_bgn_cfg;
|
||||
extern struct iwl_cfg iwl6150_bg_cfg;
|
||||
extern struct iwl_cfg iwl1000_bgn_cfg;
|
||||
extern struct iwl_cfg iwl1000_bg_cfg;
|
||||
extern struct iwl_cfg iwl100_bgn_cfg;
|
||||
extern struct iwl_cfg iwl100_bg_cfg;
|
||||
extern struct iwl_cfg iwl130_bgn_cfg;
|
||||
extern struct iwl_cfg iwl130_bg_cfg;
|
||||
extern struct iwl_cfg iwl2000_2bgn_cfg;
|
||||
extern struct iwl_cfg iwl2000_2bg_cfg;
|
||||
extern struct iwl_cfg iwl2000_2bgn_d_cfg;
|
||||
extern struct iwl_cfg iwl2030_2bgn_cfg;
|
||||
extern struct iwl_cfg iwl2030_2bg_cfg;
|
||||
extern struct iwl_cfg iwl6035_2agn_cfg;
|
||||
extern struct iwl_cfg iwl6035_2abg_cfg;
|
||||
extern struct iwl_cfg iwl6035_2bg_cfg;
|
||||
extern struct iwl_cfg iwl105_bg_cfg;
|
||||
extern struct iwl_cfg iwl105_bgn_cfg;
|
||||
extern struct iwl_cfg iwl135_bg_cfg;
|
||||
extern struct iwl_cfg iwl135_bgn_cfg;
|
||||
|
||||
#endif /* __iwl_pci_h__ */
|
|
@ -43,6 +43,7 @@
|
|||
#include "iwl-debug.h"
|
||||
#include "iwl-power.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-shared.h"
|
||||
|
||||
/*
|
||||
* Setting power level allows the card to go to sleep when not busy.
|
||||
|
@ -214,7 +215,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
|
|||
else
|
||||
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable)
|
||||
if (hw_params(priv).shadow_reg_enable)
|
||||
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
|
||||
else
|
||||
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
|
||||
|
@ -300,7 +301,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
|
|||
if (priv->power_data.bus_pm)
|
||||
cmd->flags |= IWL_POWER_PCI_PM_MSK;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable)
|
||||
if (hw_params(priv).shadow_reg_enable)
|
||||
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
|
||||
else
|
||||
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
|
||||
|
@ -335,7 +336,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
|
|||
le32_to_cpu(cmd->sleep_interval[3]),
|
||||
le32_to_cpu(cmd->sleep_interval[4]));
|
||||
|
||||
return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC,
|
||||
return iwl_trans_send_cmd_pdu(trans(priv), POWER_TABLE_CMD, CMD_SYNC,
|
||||
sizeof(struct iwl_powertable_cmd), cmd);
|
||||
}
|
||||
|
||||
|
@ -347,7 +348,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
|
|||
|
||||
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
|
||||
|
||||
if (priv->wowlan)
|
||||
if (priv->shrd->wowlan)
|
||||
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
|
||||
else if (!priv->cfg->base_params->no_idle_support &&
|
||||
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
|
||||
|
@ -382,7 +383,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
|
|||
int ret;
|
||||
bool update_chains;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
/* Don't update the RX chain when chain noise calibration is running */
|
||||
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
|
||||
|
@ -391,23 +392,23 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
|
|||
if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
|
||||
return 0;
|
||||
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
if (!iwl_is_ready_rf(priv->shrd))
|
||||
return -EIO;
|
||||
|
||||
/* scan complete use sleep_power_next, need to be updated */
|
||||
memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
|
||||
if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
|
||||
if (test_bit(STATUS_SCANNING, &priv->shrd->status) && !force) {
|
||||
IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
|
||||
set_bit(STATUS_POWER_PMI, &priv->status);
|
||||
set_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
||||
|
||||
ret = iwl_set_power(priv, cmd);
|
||||
if (!ret) {
|
||||
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
|
||||
clear_bit(STATUS_POWER_PMI, &priv->status);
|
||||
clear_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
||||
|
||||
if (update_chains)
|
||||
iwl_update_chain_flags(priv);
|
||||
|
|
|
@ -217,8 +217,8 @@
|
|||
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
|
||||
|
||||
#define SCD_QUEUECHAIN_SEL_ALL(priv) \
|
||||
(((1<<(priv)->hw_params.max_txq_num) - 1) &\
|
||||
(~(1<<(priv)->cmd_queue)))
|
||||
(((1<<hw_params(priv).max_txq_num) - 1) &\
|
||||
(~(1<<(priv)->shrd->cmd_queue)))
|
||||
|
||||
#define SCD_BASE (PRPH_BASE + 0xa02c00)
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "iwl-helpers.h"
|
||||
#include "iwl-agn-calib.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-shared.h"
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
|
@ -73,7 +74,7 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
|
||||
|
||||
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
||||
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
|
||||
|
@ -121,7 +122,8 @@ static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
|
|||
struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||
u32 __maybe_unused len =
|
||||
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
|
||||
"notification for %s:\n", len,
|
||||
get_cmd_string(pkt->hdr.cmd));
|
||||
|
@ -148,8 +150,8 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
|
|||
|
||||
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
|
||||
|
||||
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
queue_work(priv->workqueue, &priv->beacon_update);
|
||||
if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
queue_work(priv->shrd->workqueue, &priv->beacon_update);
|
||||
}
|
||||
|
||||
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
|
||||
|
@ -258,7 +260,7 @@ static void iwl_recover_from_statistics(struct iwl_priv *priv,
|
|||
{
|
||||
unsigned int msecs;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return;
|
||||
|
||||
msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
|
||||
|
@ -474,7 +476,7 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
|
|||
|
||||
priv->rx_statistics_jiffies = stamp;
|
||||
|
||||
set_bit(STATUS_STATISTICS, &priv->status);
|
||||
set_bit(STATUS_STATISTICS, &priv->shrd->status);
|
||||
|
||||
/* Reschedule the statistics timer to occur in
|
||||
* reg_recalib_period seconds to ensure we get a
|
||||
|
@ -483,10 +485,10 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
|
|||
mod_timer(&priv->statistics_periodic, jiffies +
|
||||
msecs_to_jiffies(reg_recalib_period * 1000));
|
||||
|
||||
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
|
||||
if (unlikely(!test_bit(STATUS_SCANNING, &priv->shrd->status)) &&
|
||||
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
|
||||
iwl_rx_calc_noise(priv);
|
||||
queue_work(priv->workqueue, &priv->run_time_calib_work);
|
||||
queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
|
||||
}
|
||||
if (priv->cfg->lib->temperature && change)
|
||||
priv->cfg->lib->temperature(priv);
|
||||
|
@ -518,7 +520,7 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
|
|||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
|
||||
unsigned long status = priv->status;
|
||||
unsigned long status = priv->shrd->status;
|
||||
|
||||
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
|
||||
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
|
||||
|
@ -529,16 +531,16 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
|
|||
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
|
||||
CT_CARD_DISABLED)) {
|
||||
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
|
||||
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
|
||||
iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
|
||||
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
|
||||
|
||||
if (!(flags & RXON_CARD_DISABLED)) {
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
|
||||
iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
|
||||
iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
|
||||
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
|
||||
}
|
||||
if (flags & CT_CARD_DISABLED)
|
||||
|
@ -548,18 +550,18 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
|
|||
iwl_tt_exit_ct_kill(priv);
|
||||
|
||||
if (flags & HW_CARD_DISABLED)
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
|
||||
else
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
|
||||
|
||||
|
||||
if (!(flags & RXON_CARD_DISABLED))
|
||||
iwl_scan_cancel(priv);
|
||||
|
||||
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->status)))
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)))
|
||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->status));
|
||||
test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
|
||||
else
|
||||
wake_up_interruptible(&priv->wait_command_queue);
|
||||
}
|
||||
|
@ -580,7 +582,7 @@ static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
|
|||
le32_to_cpu(missed_beacon->total_missed_becons),
|
||||
le32_to_cpu(missed_beacon->num_recvd_beacons),
|
||||
le32_to_cpu(missed_beacon->num_expected_beacons));
|
||||
if (!test_bit(STATUS_SCANNING, &priv->status))
|
||||
if (!test_bit(STATUS_SCANNING, &priv->shrd->status))
|
||||
iwl_init_sensitivity(priv);
|
||||
}
|
||||
}
|
||||
|
@ -697,7 +699,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||
ctx->active.bssid_addr))
|
||||
continue;
|
||||
ctx->last_tx_rejected = false;
|
||||
iwl_wake_any_queue(priv, ctx);
|
||||
iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1018,7 +1020,7 @@ void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
* handle those that need handling via function in
|
||||
* rx_handlers table. See iwl_setup_rx_handlers() */
|
||||
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
||||
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
||||
priv->rx_handlers_stats[pkt->hdr.cmd]++;
|
||||
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
|
||||
} else {
|
||||
/* No handling needed */
|
||||
|
|
|
@ -68,14 +68,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
|
|||
/* Exit instantly with error when device is not ready
|
||||
* to receive scan abort command or it does not perform
|
||||
* hardware scan currently */
|
||||
if (!test_bit(STATUS_READY, &priv->status) ||
|
||||
!test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
|
||||
!test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||
test_bit(STATUS_FW_ERROR, &priv->status) ||
|
||||
test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (!test_bit(STATUS_READY, &priv->shrd->status) ||
|
||||
!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) ||
|
||||
!test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
|
||||
test_bit(STATUS_FW_ERROR, &priv->shrd->status) ||
|
||||
test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
|
||||
return -EIO;
|
||||
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
ret = iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -91,7 +91,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
|
|||
ret = -EIO;
|
||||
}
|
||||
|
||||
iwl_free_pages(priv, cmd.reply_page);
|
||||
iwl_free_pages(priv->shrd, cmd.reply_page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -116,17 +116,17 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
|
|||
|
||||
void iwl_force_scan_end(struct iwl_priv *priv)
|
||||
{
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (!test_bit(STATUS_SCANNING, &priv->status)) {
|
||||
if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
|
||||
return;
|
||||
}
|
||||
|
||||
IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
|
||||
clear_bit(STATUS_SCANNING, &priv->status);
|
||||
clear_bit(STATUS_SCAN_HW, &priv->status);
|
||||
clear_bit(STATUS_SCAN_ABORTING, &priv->status);
|
||||
clear_bit(STATUS_SCANNING, &priv->shrd->status);
|
||||
clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
|
||||
clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
|
||||
iwl_complete_scan(priv, true);
|
||||
}
|
||||
|
||||
|
@ -134,14 +134,14 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
if (!test_bit(STATUS_SCANNING, &priv->status)) {
|
||||
if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
|
||||
if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
|
||||
return;
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
|
|||
int iwl_scan_cancel(struct iwl_priv *priv)
|
||||
{
|
||||
IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
|
||||
queue_work(priv->workqueue, &priv->abort_scan);
|
||||
queue_work(priv->shrd->workqueue, &priv->abort_scan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -173,19 +173,19 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
|
|||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(ms);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
|
||||
|
||||
iwl_do_scan_abort(priv);
|
||||
|
||||
while (time_before_eq(jiffies, timeout)) {
|
||||
if (!test_bit(STATUS_SCAN_HW, &priv->status))
|
||||
if (!test_bit(STATUS_SCAN_HW, &priv->shrd->status))
|
||||
break;
|
||||
msleep(20);
|
||||
}
|
||||
|
||||
return test_bit(STATUS_SCAN_HW, &priv->status);
|
||||
return test_bit(STATUS_SCAN_HW, &priv->shrd->status);
|
||||
}
|
||||
|
||||
/* Service response to REPLY_SCAN_CMD (0x80) */
|
||||
|
@ -257,13 +257,13 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
|
|||
scan_notif->tsf_high, scan_notif->status);
|
||||
|
||||
/* The HW is no longer scanning */
|
||||
clear_bit(STATUS_SCAN_HW, &priv->status);
|
||||
clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
|
||||
|
||||
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
|
||||
(priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
|
||||
jiffies_to_msecs(jiffies - priv->scan_start));
|
||||
|
||||
queue_work(priv->workqueue, &priv->scan_completed);
|
||||
queue_work(priv->shrd->workqueue, &priv->scan_completed);
|
||||
|
||||
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
|
||||
iwl_advanced_bt_coexist(priv) &&
|
||||
|
@ -283,7 +283,8 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
|
|||
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
|
||||
}
|
||||
priv->bt_status = scan_notif->bt_status;
|
||||
queue_work(priv->workqueue, &priv->bt_traffic_change_work);
|
||||
queue_work(priv->shrd->workqueue,
|
||||
&priv->bt_traffic_change_work);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,7 +344,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
|
|||
|
||||
void iwl_init_scan_params(struct iwl_priv *priv)
|
||||
{
|
||||
u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
|
||||
u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1;
|
||||
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
|
||||
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
|
||||
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
|
||||
|
@ -357,22 +358,22 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
|
|||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&priv->shrd->mutex);
|
||||
|
||||
cancel_delayed_work(&priv->scan_check);
|
||||
|
||||
if (!iwl_is_ready_rf(priv)) {
|
||||
if (!iwl_is_ready_rf(priv->shrd)) {
|
||||
IWL_WARN(priv, "Request scan called when driver not ready.\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv,
|
||||
"Multiple concurrent scan requests in parallel.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
|
||||
if (test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -382,19 +383,19 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
|
|||
scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
|
||||
"internal short ");
|
||||
|
||||
set_bit(STATUS_SCANNING, &priv->status);
|
||||
set_bit(STATUS_SCANNING, &priv->shrd->status);
|
||||
priv->scan_type = scan_type;
|
||||
priv->scan_start = jiffies;
|
||||
priv->scan_band = band;
|
||||
|
||||
ret = iwlagn_request_scan(priv, vif);
|
||||
if (ret) {
|
||||
clear_bit(STATUS_SCANNING, &priv->status);
|
||||
clear_bit(STATUS_SCANNING, &priv->shrd->status);
|
||||
priv->scan_type = IWL_SCAN_NORMAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
queue_delayed_work(priv->workqueue, &priv->scan_check,
|
||||
queue_delayed_work(priv->shrd->workqueue, &priv->scan_check,
|
||||
IWL_SCAN_CHECK_WATCHDOG);
|
||||
|
||||
return 0;
|
||||
|
@ -412,9 +413,9 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
|
|||
if (req->n_channels == 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
if (test_bit(STATUS_SCANNING, &priv->status) &&
|
||||
if (test_bit(STATUS_SCANNING, &priv->shrd->status) &&
|
||||
priv->scan_type != IWL_SCAN_NORMAL) {
|
||||
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
|
||||
ret = -EAGAIN;
|
||||
|
@ -439,7 +440,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
|
|||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -450,7 +451,7 @@ out_unlock:
|
|||
*/
|
||||
void iwl_internal_short_hw_scan(struct iwl_priv *priv)
|
||||
{
|
||||
queue_work(priv->workqueue, &priv->start_internal_scan);
|
||||
queue_work(priv->shrd->workqueue, &priv->start_internal_scan);
|
||||
}
|
||||
|
||||
static void iwl_bg_start_internal_scan(struct work_struct *work)
|
||||
|
@ -460,14 +461,14 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
|
|||
|
||||
IWL_DEBUG_SCAN(priv, "Start internal scan\n");
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
|
||||
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (test_bit(STATUS_SCANNING, &priv->status)) {
|
||||
if (test_bit(STATUS_SCANNING, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -475,7 +476,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
|
|||
if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
|
||||
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
|
||||
unlock:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
static void iwl_bg_scan_check(struct work_struct *data)
|
||||
|
@ -488,9 +489,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
|
|||
/* Since we are here firmware does not finish scan and
|
||||
* most likely is in bad shape, so we don't bother to
|
||||
* send abort command, just force scan complete to mac80211 */
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
iwl_force_scan_end(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -548,9 +549,9 @@ static void iwl_bg_abort_scan(struct work_struct *work)
|
|||
|
||||
/* We keep scan_check work queued in case when firmware will not
|
||||
* report back scan completed notification */
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
iwl_scan_cancel_timeout(priv, 200);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
static void iwl_bg_scan_completed(struct work_struct *work)
|
||||
|
@ -563,13 +564,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
|
|||
|
||||
cancel_delayed_work(&priv->scan_check);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
|
||||
aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
|
||||
if (aborted)
|
||||
IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
|
||||
|
||||
if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
|
||||
if (!test_and_clear_bit(STATUS_SCANNING, &priv->shrd->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
|
||||
goto out_settings;
|
||||
}
|
||||
|
@ -605,13 +606,13 @@ out_complete:
|
|||
|
||||
out_settings:
|
||||
/* Can we still talk to firmware ? */
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
if (!iwl_is_ready_rf(priv->shrd))
|
||||
goto out;
|
||||
|
||||
iwlagn_post_scan(priv);
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
|
||||
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
|
||||
|
@ -629,8 +630,8 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
|
|||
cancel_work_sync(&priv->scan_completed);
|
||||
|
||||
if (cancel_delayed_work_sync(&priv->scan_check)) {
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
iwl_force_scan_end(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,430 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_shared_h__
|
||||
#define __iwl_shared_h__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "iwl-commands.h"
|
||||
|
||||
/*This files includes all the types / functions that are exported by the
|
||||
* upper layer to the bus and transport layer */
|
||||
|
||||
struct iwl_cfg;
|
||||
struct iwl_bus;
|
||||
struct iwl_priv;
|
||||
struct iwl_sensitivity_ranges;
|
||||
struct iwl_trans_ops;
|
||||
|
||||
#define DRV_NAME "iwlagn"
|
||||
#define IWLWIFI_VERSION "in-tree:"
|
||||
#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
|
||||
#define DRV_AUTHOR "<ilw@linux.intel.com>"
|
||||
|
||||
extern struct iwl_mod_params iwlagn_mod_params;
|
||||
|
||||
/**
|
||||
* struct iwl_mod_params
|
||||
* @sw_crypto: using hardware encryption, default = 0
|
||||
* @num_of_queues: number of tx queue, HW dependent
|
||||
* @disable_11n: 11n capabilities enabled, default = 0
|
||||
* @amsdu_size_8K: enable 8K amsdu size, default = 1
|
||||
* @antenna: both antennas (use diversity), default = 0
|
||||
* @restart_fw: restart firmware, default = 1
|
||||
* @plcp_check: enable plcp health check, default = true
|
||||
* @ack_check: disable ack health check, default = false
|
||||
* @wd_disable: enable stuck queue check, default = false
|
||||
* @bt_coex_active: enable bt coex, default = true
|
||||
* @led_mode: system default, default = 0
|
||||
* @no_sleep_autoadjust: disable autoadjust, default = true
|
||||
* @power_save: disable power save, default = false
|
||||
* @power_level: power level, default = 1
|
||||
* @debug_level: levels are IWL_DL_*
|
||||
* @ant_coupling: antenna coupling in dB, default = 0
|
||||
* @bt_ch_announce: BT channel inhibition, default = enable
|
||||
* @wanted_ucode_alternative: ucode alternative to use, default = 1
|
||||
* @auto_agg: enable agg. without check, default = true
|
||||
*/
|
||||
struct iwl_mod_params {
|
||||
int sw_crypto;
|
||||
int num_of_queues;
|
||||
int disable_11n;
|
||||
int amsdu_size_8K;
|
||||
int antenna;
|
||||
int restart_fw;
|
||||
bool plcp_check;
|
||||
bool ack_check;
|
||||
bool wd_disable;
|
||||
bool bt_coex_active;
|
||||
int led_mode;
|
||||
bool no_sleep_autoadjust;
|
||||
bool power_save;
|
||||
int power_level;
|
||||
u32 debug_level;
|
||||
int ant_coupling;
|
||||
bool bt_ch_announce;
|
||||
int wanted_ucode_alternative;
|
||||
bool auto_agg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_hw_params
|
||||
* @max_txq_num: Max # Tx queues supported
|
||||
* @num_ampdu_queues: num of ampdu queues
|
||||
* @tx/rx_chains_num: Number of TX/RX chains
|
||||
* @valid_tx/rx_ant: usable antennas
|
||||
* @max_stations:
|
||||
* @ht40_channel: is 40MHz width possible in band 2.4
|
||||
* @beacon_time_tsf_bits: number of valid tsf bits for beacon time
|
||||
* @sku:
|
||||
* @rx_page_order: Rx buffer page order
|
||||
* @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
|
||||
* BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
|
||||
* @sw_crypto: 0 for hw, 1 for sw
|
||||
* @max_xxx_size: for ucode uses
|
||||
* @ct_kill_threshold: temperature threshold
|
||||
* @wd_timeout: TX queues watchdog timeout
|
||||
* @calib_init_cfg: setup initial calibrations for the hw
|
||||
* @calib_rt_cfg: setup runtime calibrations for the hw
|
||||
* @struct iwl_sensitivity_ranges: range of sensitivity values
|
||||
*/
|
||||
struct iwl_hw_params {
|
||||
u8 max_txq_num;
|
||||
u8 num_ampdu_queues;
|
||||
u8 tx_chains_num;
|
||||
u8 rx_chains_num;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u8 max_stations;
|
||||
u8 ht40_channel;
|
||||
bool shadow_reg_enable;
|
||||
u16 beacon_time_tsf_bits;
|
||||
u16 sku;
|
||||
u32 rx_page_order;
|
||||
u32 max_inst_size;
|
||||
u32 max_data_size;
|
||||
u32 ct_kill_threshold; /* value in hw-dependent units */
|
||||
u32 ct_kill_exit_threshold; /* value in hw-dependent units */
|
||||
/* for 1000, 6000 series and up */
|
||||
unsigned int wd_timeout;
|
||||
|
||||
u32 calib_init_cfg;
|
||||
u32 calib_rt_cfg;
|
||||
const struct iwl_sensitivity_ranges *sens;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_ht_agg - aggregation status while waiting for block-ack
|
||||
* @txq_id: Tx queue used for Tx attempt
|
||||
* @wait_for_ba: Expect block-ack before next Tx reply
|
||||
* @rate_n_flags: Rate at which Tx was attempted
|
||||
*
|
||||
* If REPLY_TX indicates that aggregation was attempted, driver must wait
|
||||
* for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
|
||||
* until block ack arrives.
|
||||
*/
|
||||
struct iwl_ht_agg {
|
||||
u16 txq_id;
|
||||
u16 wait_for_ba;
|
||||
u32 rate_n_flags;
|
||||
#define IWL_AGG_OFF 0
|
||||
#define IWL_AGG_ON 1
|
||||
#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
|
||||
#define IWL_EMPTYING_HW_QUEUE_DELBA 3
|
||||
u8 state;
|
||||
};
|
||||
|
||||
struct iwl_tid_data {
|
||||
u16 seq_number; /* agn only */
|
||||
u16 tfds_in_queue;
|
||||
struct iwl_ht_agg agg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_shared - shared fields for all the layers of the driver
|
||||
*
|
||||
* @dbg_level_dev: dbg level set per device. Prevails on
|
||||
* iwlagn_mod_params.debug_level if set (!= 0)
|
||||
* @ucode_owner: IWL_OWNERSHIP_*
|
||||
* @cmd_queue: command queue number
|
||||
* @status: STATUS_*
|
||||
* @bus: pointer to the bus layer data
|
||||
* @priv: pointer to the upper layer data
|
||||
* @hw_params: see struct iwl_hw_params
|
||||
* @workqueue: the workqueue used by all the layers of the driver
|
||||
* @lock: protect general shared data
|
||||
* @sta_lock: protects the station table.
|
||||
* If lock and sta_lock are needed, lock must be acquired first.
|
||||
* @mutex:
|
||||
*/
|
||||
struct iwl_shared {
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
u32 dbg_level_dev;
|
||||
#endif /* CONFIG_IWLWIFI_DEBUG */
|
||||
|
||||
#define IWL_OWNERSHIP_DRIVER 0
|
||||
#define IWL_OWNERSHIP_TM 1
|
||||
u8 ucode_owner;
|
||||
u8 cmd_queue;
|
||||
unsigned long status;
|
||||
bool wowlan;
|
||||
|
||||
struct iwl_bus *bus;
|
||||
struct iwl_priv *priv;
|
||||
struct iwl_trans *trans;
|
||||
struct iwl_hw_params hw_params;
|
||||
|
||||
struct workqueue_struct *workqueue;
|
||||
spinlock_t lock;
|
||||
spinlock_t sta_lock;
|
||||
struct mutex mutex;
|
||||
|
||||
/*these 2 shouldn't really be here, but they are needed for
|
||||
* iwl_queue_stop, which is called from the upper layer too
|
||||
*/
|
||||
u8 mac80211_registered;
|
||||
struct ieee80211_hw *hw;
|
||||
|
||||
struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
|
||||
};
|
||||
|
||||
/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
|
||||
#define priv(_m) ((_m)->shrd->priv)
|
||||
#define bus(_m) ((_m)->shrd->bus)
|
||||
#define trans(_m) ((_m)->shrd->trans)
|
||||
#define hw_params(_m) ((_m)->shrd->hw_params)
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
/*
|
||||
* iwl_get_debug_level: Return active debug level for device
|
||||
*
|
||||
* Using sysfs it is possible to set per device debug level. This debug
|
||||
* level will be used if set, otherwise the global debug level which can be
|
||||
* set via module parameter is used.
|
||||
*/
|
||||
static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
|
||||
{
|
||||
if (shrd->dbg_level_dev)
|
||||
return shrd->dbg_level_dev;
|
||||
else
|
||||
return iwlagn_mod_params.debug_level;
|
||||
}
|
||||
#else
|
||||
static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
|
||||
{
|
||||
return iwlagn_mod_params.debug_level;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void iwl_free_pages(struct iwl_shared *shrd, unsigned long page)
|
||||
{
|
||||
free_pages(page, shrd->hw_params.rx_page_order);
|
||||
}
|
||||
|
||||
struct iwl_rx_mem_buffer {
|
||||
dma_addr_t page_dma;
|
||||
struct page *page;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define rxb_addr(r) page_address(r->page)
|
||||
|
||||
/*
|
||||
* mac80211 queues, ACs, hardware queues, FIFOs.
|
||||
*
|
||||
* Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
|
||||
*
|
||||
* Mac80211 uses the following numbers, which we get as from it
|
||||
* by way of skb_get_queue_mapping(skb):
|
||||
*
|
||||
* VO 0
|
||||
* VI 1
|
||||
* BE 2
|
||||
* BK 3
|
||||
*
|
||||
*
|
||||
* Regular (not A-MPDU) frames are put into hardware queues corresponding
|
||||
* to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
|
||||
* own queue per aggregation session (RA/TID combination), such queues are
|
||||
* set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
|
||||
* order to map frames to the right queue, we also need an AC->hw queue
|
||||
* mapping. This is implemented here.
|
||||
*
|
||||
* Due to the way hw queues are set up (by the hw specific modules like
|
||||
* iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
|
||||
* mapping.
|
||||
*/
|
||||
|
||||
static const u8 tid_to_ac[] = {
|
||||
IEEE80211_AC_BE,
|
||||
IEEE80211_AC_BK,
|
||||
IEEE80211_AC_BK,
|
||||
IEEE80211_AC_BE,
|
||||
IEEE80211_AC_VI,
|
||||
IEEE80211_AC_VI,
|
||||
IEEE80211_AC_VO,
|
||||
IEEE80211_AC_VO
|
||||
};
|
||||
|
||||
static inline int get_ac_from_tid(u16 tid)
|
||||
{
|
||||
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
||||
return tid_to_ac[tid];
|
||||
|
||||
/* no support for TIDs 8-15 yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
enum iwl_rxon_context_id {
|
||||
IWL_RXON_CTX_BSS,
|
||||
IWL_RXON_CTX_PAN,
|
||||
|
||||
NUM_IWL_RXON_CTX
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int iwl_suspend(struct iwl_priv *priv);
|
||||
int iwl_resume(struct iwl_priv *priv);
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
|
||||
struct iwl_cfg *cfg);
|
||||
void __devexit iwl_remove(struct iwl_priv * priv);
|
||||
|
||||
void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
u8 sta_id, u8 tid);
|
||||
void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
u8 sta_id, u8 tid);
|
||||
|
||||
/*****************************************************
|
||||
* DRIVER STATUS FUNCTIONS
|
||||
******************************************************/
|
||||
#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
|
||||
/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
|
||||
#define STATUS_INT_ENABLED 2
|
||||
#define STATUS_RF_KILL_HW 3
|
||||
#define STATUS_CT_KILL 4
|
||||
#define STATUS_INIT 5
|
||||
#define STATUS_ALIVE 6
|
||||
#define STATUS_READY 7
|
||||
#define STATUS_TEMPERATURE 8
|
||||
#define STATUS_GEO_CONFIGURED 9
|
||||
#define STATUS_EXIT_PENDING 10
|
||||
#define STATUS_STATISTICS 12
|
||||
#define STATUS_SCANNING 13
|
||||
#define STATUS_SCAN_ABORTING 14
|
||||
#define STATUS_SCAN_HW 15
|
||||
#define STATUS_POWER_PMI 16
|
||||
#define STATUS_FW_ERROR 17
|
||||
#define STATUS_DEVICE_ENABLED 18
|
||||
#define STATUS_CHANNEL_SWITCH_PENDING 19
|
||||
|
||||
static inline int iwl_is_ready(struct iwl_shared *shrd)
|
||||
{
|
||||
/* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
|
||||
* set but EXIT_PENDING is not */
|
||||
return test_bit(STATUS_READY, &shrd->status) &&
|
||||
test_bit(STATUS_GEO_CONFIGURED, &shrd->status) &&
|
||||
!test_bit(STATUS_EXIT_PENDING, &shrd->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_alive(struct iwl_shared *shrd)
|
||||
{
|
||||
return test_bit(STATUS_ALIVE, &shrd->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_init(struct iwl_shared *shrd)
|
||||
{
|
||||
return test_bit(STATUS_INIT, &shrd->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_rfkill_hw(struct iwl_shared *shrd)
|
||||
{
|
||||
return test_bit(STATUS_RF_KILL_HW, &shrd->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_rfkill(struct iwl_shared *shrd)
|
||||
{
|
||||
return iwl_is_rfkill_hw(shrd);
|
||||
}
|
||||
|
||||
static inline int iwl_is_ctkill(struct iwl_shared *shrd)
|
||||
{
|
||||
return test_bit(STATUS_CT_KILL, &shrd->status);
|
||||
}
|
||||
|
||||
static inline int iwl_is_ready_rf(struct iwl_shared *shrd)
|
||||
{
|
||||
if (iwl_is_rfkill(shrd))
|
||||
return 0;
|
||||
|
||||
return iwl_is_ready(shrd);
|
||||
}
|
||||
|
||||
#endif /* #__iwl_shared_h__ */
|
|
@ -38,7 +38,7 @@
|
|||
#include "iwl-trans.h"
|
||||
#include "iwl-agn.h"
|
||||
|
||||
/* priv->sta_lock must be held */
|
||||
/* priv->shrd->sta_lock must be held */
|
||||
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
|
||||
{
|
||||
|
||||
|
@ -75,7 +75,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
|
|||
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
|
||||
sta_id);
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
|
||||
switch (pkt->u.add_sta.status) {
|
||||
case ADD_STA_SUCCESS_MSK:
|
||||
|
@ -118,7 +118,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
|
|||
priv->stations[sta_id].sta.mode ==
|
||||
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
|
||||
addsta->sta.addr);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
|||
}
|
||||
|
||||
cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
ret = iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
|
||||
if (ret || (flags & CMD_ASYNC))
|
||||
return ret;
|
||||
|
@ -177,7 +177,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
|||
pkt = (struct iwl_rx_packet *)cmd.reply_page;
|
||||
ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
|
||||
}
|
||||
iwl_free_pages(priv, cmd.reply_page);
|
||||
iwl_free_pages(priv->shrd, cmd.reply_page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -251,7 +251,8 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
else if (is_broadcast_ether_addr(addr))
|
||||
sta_id = ctx->bcast_sta_id;
|
||||
else
|
||||
for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
|
||||
for (i = IWL_STA_ID;
|
||||
i < hw_params(priv).max_stations; i++) {
|
||||
if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
|
||||
addr)) {
|
||||
sta_id = i;
|
||||
|
@ -336,12 +337,12 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
struct iwl_addsta_cmd sta_cmd;
|
||||
|
||||
*sta_id_r = 0;
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
|
||||
if (sta_id == IWL_INVALID_STATION) {
|
||||
IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
|
||||
addr);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -353,7 +354,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
|
||||
IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
|
||||
sta_id);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
|
@ -361,23 +362,23 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
|
||||
IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
|
||||
sta_id, addr);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
|
||||
/* Add station to device's station table */
|
||||
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
if (ret) {
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
IWL_ERR(priv, "Adding station %pM failed.\n",
|
||||
priv->stations[sta_id].sta.sta.addr);
|
||||
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
|
||||
priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
}
|
||||
*sta_id_r = sta_id;
|
||||
return ret;
|
||||
|
@ -386,7 +387,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
/**
|
||||
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
|
||||
*
|
||||
* priv->sta_lock must be held
|
||||
* priv->shrd->sta_lock must be held
|
||||
*/
|
||||
static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
|
||||
{
|
||||
|
@ -424,7 +425,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
|
|||
|
||||
cmd.flags |= CMD_WANT_SKB;
|
||||
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
ret = iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -440,9 +441,11 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
|
|||
switch (pkt->u.rem_sta.status) {
|
||||
case REM_STA_SUCCESS_MSK:
|
||||
if (!temporary) {
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock,
|
||||
flags_spin);
|
||||
iwl_sta_ucode_deactivate(priv, sta_id);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock,
|
||||
flags_spin);
|
||||
}
|
||||
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
|
||||
break;
|
||||
|
@ -452,7 +455,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
|
|||
break;
|
||||
}
|
||||
}
|
||||
iwl_free_pages(priv, cmd.reply_page);
|
||||
iwl_free_pages(priv->shrd, cmd.reply_page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -465,7 +468,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!iwl_is_ready(priv)) {
|
||||
if (!iwl_is_ready(priv->shrd)) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Unable to remove station %pM, device not ready.\n",
|
||||
addr);
|
||||
|
@ -483,7 +486,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
|
|||
if (WARN_ON(sta_id == IWL_INVALID_STATION))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
|
||||
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
|
||||
IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
|
||||
|
@ -509,11 +512,11 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
|
|||
if (WARN_ON(priv->num_stations < 0))
|
||||
priv->num_stations = 0;
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return iwl_send_remove_station(priv, addr, sta_id, false);
|
||||
out_err:
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -534,8 +537,8 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
|
|||
|
||||
IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
for (i = 0; i < priv->hw_params.max_stations; i++) {
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
for (i = 0; i < hw_params(priv).max_stations; i++) {
|
||||
if (ctx && ctx->ctxid != priv->stations[i].ctxid)
|
||||
continue;
|
||||
|
||||
|
@ -545,7 +548,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
|
|||
cleared = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
|
||||
if (!cleared)
|
||||
IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
|
||||
|
@ -569,14 +572,14 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
int ret;
|
||||
bool send_lq;
|
||||
|
||||
if (!iwl_is_ready(priv)) {
|
||||
if (!iwl_is_ready(priv->shrd)) {
|
||||
IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
for (i = 0; i < priv->hw_params.max_stations; i++) {
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
for (i = 0; i < hw_params(priv).max_stations; i++) {
|
||||
if (ctx->ctxid != priv->stations[i].ctxid)
|
||||
continue;
|
||||
if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
|
||||
|
@ -589,7 +592,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->hw_params.max_stations; i++) {
|
||||
for (i = 0; i < hw_params(priv).max_stations; i++) {
|
||||
if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
|
||||
memcpy(&sta_cmd, &priv->stations[i].sta,
|
||||
sizeof(struct iwl_addsta_cmd));
|
||||
|
@ -599,15 +602,18 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
sizeof(struct iwl_link_quality_cmd));
|
||||
send_lq = true;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock,
|
||||
flags_spin);
|
||||
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
if (ret) {
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock,
|
||||
flags_spin);
|
||||
IWL_ERR(priv, "Adding station %pM failed.\n",
|
||||
priv->stations[i].sta.sta.addr);
|
||||
priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
|
||||
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock,
|
||||
flags_spin);
|
||||
}
|
||||
/*
|
||||
* Rate scaling has already been initialized, send
|
||||
|
@ -615,12 +621,12 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
*/
|
||||
if (send_lq)
|
||||
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
if (!found)
|
||||
IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
|
||||
else
|
||||
|
@ -636,9 +642,9 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
struct iwl_link_quality_cmd lq;
|
||||
bool active;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -648,7 +654,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
|
||||
active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
|
||||
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
if (active) {
|
||||
ret = iwl_send_remove_station(
|
||||
|
@ -658,9 +664,9 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
|
||||
priv->stations[sta_id].sta.sta.addr, ret);
|
||||
}
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
if (ret)
|
||||
|
@ -685,8 +691,8 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
|
|||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
for (i = 0; i < priv->hw_params.max_stations; i++) {
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
for (i = 0; i < hw_params(priv).max_stations; i++) {
|
||||
if (!(priv->stations[i].used & IWL_STA_BCAST))
|
||||
continue;
|
||||
|
||||
|
@ -697,7 +703,7 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
|
|||
kfree(priv->stations[i].lq);
|
||||
priv->stations[i].lq = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
|
@ -781,19 +787,19 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
return -EINVAL;
|
||||
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
|
||||
iwl_dump_lq_cmd(priv, lq);
|
||||
if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_lq_table_valid(priv, ctx, lq))
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
ret = iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
|
@ -803,9 +809,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
if (init) {
|
||||
IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
|
||||
lq->sta_id);
|
||||
spin_lock_irqsave(&priv->sta_lock, flags_spin);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
|
||||
priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -820,13 +826,13 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
|
|||
|
||||
IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
|
||||
sta->addr);
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
|
||||
sta->addr);
|
||||
ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error removing station %pM\n",
|
||||
sta->addr);
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
|
|||
unsigned long flags;
|
||||
struct iwl_rxon_context *ctx;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||
memset(priv->stations, 0, sizeof(priv->stations));
|
||||
priv->num_stations = 0;
|
||||
|
||||
|
@ -94,7 +94,7 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
|
|||
ctx->key_mapping_keys = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
}
|
||||
|
||||
static inline int iwl_sta_id(struct ieee80211_sta *sta)
|
||||
|
|
|
@ -72,7 +72,6 @@
|
|||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-testmode.h"
|
||||
|
@ -239,7 +238,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
|
||||
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
|
||||
/* ok, let's submit the command to ucode */
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
return iwl_trans_send_cmd(trans(priv), &cmd);
|
||||
}
|
||||
|
||||
|
||||
|
@ -277,7 +276,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_REG_READ32:
|
||||
val32 = iwl_read32(priv, ofs);
|
||||
val32 = iwl_read32(bus(priv), ofs);
|
||||
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
|
||||
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
|
||||
|
@ -299,7 +298,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
} else {
|
||||
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
|
||||
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
|
||||
iwl_write32(priv, ofs, val32);
|
||||
iwl_write32(bus(priv), ofs, val32);
|
||||
}
|
||||
break;
|
||||
case IWL_TM_CMD_APP2DEV_REG_WRITE8:
|
||||
|
@ -309,7 +308,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
} else {
|
||||
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
|
||||
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
|
||||
iwl_write8(priv, ofs, val8);
|
||||
iwl_write8(bus(priv), ofs, val8);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -405,7 +404,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
|
||||
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
|
||||
iwl_testmode_cfg_init_calib(priv);
|
||||
trans_stop_device(&priv->trans);
|
||||
iwl_trans_stop_device(trans(priv));
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
|
||||
|
@ -613,7 +612,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
|
||||
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
|
||||
if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
|
||||
priv->ucode_owner = owner;
|
||||
priv->shrd->ucode_owner = owner;
|
||||
else {
|
||||
IWL_DEBUG_INFO(priv, "Invalid owner\n");
|
||||
return -EINVAL;
|
||||
|
@ -661,7 +660,7 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
|
|||
return -ENOMSG;
|
||||
}
|
||||
/* in case multiple accesses to the device happens */
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_UCODE:
|
||||
|
@ -702,7 +701,7 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
|
|||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -738,7 +737,7 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* in case multiple accesses to the device happens */
|
||||
mutex_lock(&priv->mutex);
|
||||
mutex_lock(&priv->shrd->mutex);
|
||||
switch (cmd) {
|
||||
case IWL_TM_CMD_APP2DEV_READ_TRACE:
|
||||
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
|
||||
|
@ -749,6 +748,6 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
mutex_unlock(&priv->shrd->mutex);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -29,54 +29,318 @@
|
|||
#ifndef __iwl_trans_int_pcie_h__
|
||||
#define __iwl_trans_int_pcie_h__
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-io.h"
|
||||
|
||||
struct iwl_tx_queue;
|
||||
struct iwl_queue;
|
||||
struct iwl_host_cmd;
|
||||
|
||||
/*This file includes the declaration that are internal to the
|
||||
* trans_pcie layer */
|
||||
|
||||
/**
|
||||
* struct isr_statistics - interrupt statistics
|
||||
*
|
||||
*/
|
||||
struct isr_statistics {
|
||||
u32 hw;
|
||||
u32 sw;
|
||||
u32 err_code;
|
||||
u32 sch;
|
||||
u32 alive;
|
||||
u32 rfkill;
|
||||
u32 ctkill;
|
||||
u32 wakeup;
|
||||
u32 rx;
|
||||
u32 tx;
|
||||
u32 unhandled;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_rx_queue - Rx queue
|
||||
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
|
||||
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
|
||||
* @pool:
|
||||
* @queue:
|
||||
* @read: Shared index to newest available Rx buffer
|
||||
* @write: Shared index to oldest written Rx packet
|
||||
* @free_count: Number of pre-allocated buffers in rx_free
|
||||
* @write_actual:
|
||||
* @rx_free: list of free SKBs for use
|
||||
* @rx_used: List of Rx buffers with no SKB
|
||||
* @need_update: flag to indicate we need to update read/write index
|
||||
* @rb_stts: driver's pointer to receive buffer status
|
||||
* @rb_stts_dma: bus address of receive buffer status
|
||||
* @lock:
|
||||
*
|
||||
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
|
||||
*/
|
||||
struct iwl_rx_queue {
|
||||
__le32 *bd;
|
||||
dma_addr_t bd_dma;
|
||||
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
|
||||
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
|
||||
u32 read;
|
||||
u32 write;
|
||||
u32 free_count;
|
||||
u32 write_actual;
|
||||
struct list_head rx_free;
|
||||
struct list_head rx_used;
|
||||
int need_update;
|
||||
struct iwl_rb_status *rb_stts;
|
||||
dma_addr_t rb_stts_dma;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct iwl_dma_ptr {
|
||||
dma_addr_t dma;
|
||||
void *addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/*
|
||||
* This queue number is required for proper operation
|
||||
* because the ucode will stop/start the scheduler as
|
||||
* required.
|
||||
*/
|
||||
#define IWL_IPAN_MCAST_QUEUE 8
|
||||
|
||||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
* @rx_replenish: work that will be called when buffers need to be allocated
|
||||
* @trans: pointer to the generic transport area
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
||||
* @kw: keep warm address
|
||||
* @ac_to_fifo: to what fifo is a specifc AC mapped ?
|
||||
* @ac_to_queue: to what tx queue is a specifc AC mapped ?
|
||||
* @mcast_queue:
|
||||
* @txq: Tx DMA processing queues
|
||||
* @txq_ctx_active_msk: what queue is active
|
||||
* queue_stopped: tracks what queue is stopped
|
||||
* queue_stop_count: tracks what SW queue is stopped
|
||||
*/
|
||||
struct iwl_trans_pcie {
|
||||
struct iwl_rx_queue rxq;
|
||||
struct work_struct rx_replenish;
|
||||
struct iwl_trans *trans;
|
||||
|
||||
/* INT ICT Table */
|
||||
__le32 *ict_tbl;
|
||||
void *ict_tbl_vir;
|
||||
dma_addr_t ict_tbl_dma;
|
||||
dma_addr_t aligned_ict_tbl_dma;
|
||||
int ict_index;
|
||||
u32 inta;
|
||||
bool use_ict;
|
||||
struct tasklet_struct irq_tasklet;
|
||||
struct isr_statistics isr_stats;
|
||||
|
||||
u32 inta_mask;
|
||||
u32 scd_base_addr;
|
||||
struct iwl_dma_ptr scd_bc_tbls;
|
||||
struct iwl_dma_ptr kw;
|
||||
|
||||
const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
|
||||
const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
|
||||
u8 mcast_queue[NUM_IWL_RXON_CTX];
|
||||
|
||||
struct iwl_tx_queue *txq;
|
||||
unsigned long txq_ctx_active_msk;
|
||||
#define IWL_MAX_HW_QUEUES 32
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
atomic_t queue_stop_count[4];
|
||||
};
|
||||
|
||||
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
|
||||
((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
|
||||
|
||||
/*****************************************************
|
||||
* RX
|
||||
******************************************************/
|
||||
void iwl_bg_rx_replenish(struct work_struct *data);
|
||||
void iwl_irq_tasklet(struct iwl_priv *priv);
|
||||
void iwlagn_rx_replenish(struct iwl_priv *priv);
|
||||
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
|
||||
void iwl_irq_tasklet(struct iwl_trans *trans);
|
||||
void iwlagn_rx_replenish(struct iwl_trans *trans);
|
||||
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
|
||||
struct iwl_rx_queue *q);
|
||||
|
||||
/*****************************************************
|
||||
* ICT
|
||||
******************************************************/
|
||||
int iwl_reset_ict(struct iwl_priv *priv);
|
||||
void iwl_disable_ict(struct iwl_priv *priv);
|
||||
int iwl_alloc_isr_ict(struct iwl_priv *priv);
|
||||
void iwl_free_isr_ict(struct iwl_priv *priv);
|
||||
int iwl_reset_ict(struct iwl_trans *trans);
|
||||
void iwl_disable_ict(struct iwl_trans *trans);
|
||||
int iwl_alloc_isr_ict(struct iwl_trans *trans);
|
||||
void iwl_free_isr_ict(struct iwl_trans *trans);
|
||||
irqreturn_t iwl_isr_ict(int irq, void *data);
|
||||
|
||||
|
||||
/*****************************************************
|
||||
* TX / HCMD
|
||||
******************************************************/
|
||||
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||
int index);
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
void iwl_txq_update_write_ptr(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq);
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len, u8 reset);
|
||||
int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
||||
int count, int slots_num, u32 id);
|
||||
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
|
||||
int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
|
||||
u16 len, const void *data);
|
||||
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
|
||||
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
int __must_check iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id,
|
||||
u32 flags, u16 len, const void *data);
|
||||
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq,
|
||||
u16 byte_cnt);
|
||||
int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo);
|
||||
void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
|
||||
int txq_id, u32 index);
|
||||
void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
|
||||
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
|
||||
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id,
|
||||
int tid);
|
||||
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
|
||||
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq,
|
||||
int tx_fifo_id, int scd_retry);
|
||||
void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
|
||||
int frame_limit);
|
||||
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id,
|
||||
int tid, u16 *ssn);
|
||||
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
int sta_id, int tid, int frame_limit);
|
||||
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||
int index);
|
||||
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
||||
struct sk_buff_head *skbs);
|
||||
int iwl_queue_space(const struct iwl_queue *q);
|
||||
|
||||
/*****************************************************
|
||||
* Error handling
|
||||
******************************************************/
|
||||
int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
|
||||
char **buf, bool display);
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
||||
void iwl_dump_csr(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
* Helpers
|
||||
******************************************************/
|
||||
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
|
||||
{
|
||||
clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
|
||||
|
||||
/* disable interrupts from uCode/NIC to host */
|
||||
iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* acknowledge/clear/reset any interrupts still pending
|
||||
* from uCode or flow handler (Rx/Tx DMA) */
|
||||
iwl_write32(bus(trans), CSR_INT, 0xffffffff);
|
||||
iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
|
||||
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
|
||||
}
|
||||
|
||||
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
|
||||
set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
|
||||
iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* we have 8 bits used like this:
|
||||
*
|
||||
* 7 6 5 4 3 2 1 0
|
||||
* | | | | | | | |
|
||||
* | | | | | | +-+-------- AC queue (0-3)
|
||||
* | | | | | |
|
||||
* | +-+-+-+-+------------ HW queue ID
|
||||
* |
|
||||
* +---------------------- unused
|
||||
*/
|
||||
static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
|
||||
{
|
||||
BUG_ON(ac > 3); /* only have 2 bits */
|
||||
BUG_ON(hwq > 31); /* only use 5 bits */
|
||||
|
||||
txq->swq_id = (hwq << 2) | ac;
|
||||
}
|
||||
|
||||
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
u8 queue = txq->swq_id;
|
||||
u8 ac = queue & 3;
|
||||
u8 hwq = (queue >> 2) & 0x1f;
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (unlikely(!trans->shrd->mac80211_registered))
|
||||
return;
|
||||
|
||||
if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
|
||||
if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
|
||||
ieee80211_wake_queue(trans->shrd->hw, ac);
|
||||
}
|
||||
|
||||
static inline void iwl_stop_queue(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
u8 queue = txq->swq_id;
|
||||
u8 ac = queue & 3;
|
||||
u8 hwq = (queue >> 2) & 0x1f;
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (unlikely(!trans->shrd->mac80211_registered))
|
||||
return;
|
||||
|
||||
if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
|
||||
if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
|
||||
ieee80211_stop_queue(trans->shrd->hw, ac);
|
||||
}
|
||||
|
||||
#ifdef ieee80211_stop_queue
|
||||
#undef ieee80211_stop_queue
|
||||
#endif
|
||||
|
||||
#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
|
||||
|
||||
#ifdef ieee80211_wake_queue
|
||||
#undef ieee80211_wake_queue
|
||||
#endif
|
||||
|
||||
#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
|
||||
|
||||
static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
|
||||
int txq_id)
|
||||
{
|
||||
set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
|
||||
}
|
||||
|
||||
static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
|
||||
int txq_id)
|
||||
{
|
||||
clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
|
||||
}
|
||||
|
||||
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
|
||||
{
|
||||
return q->write_ptr >= q->read_ptr ?
|
||||
(i >= q->read_ptr && i < q->write_ptr) :
|
||||
!(i < q->read_ptr && i >= q->write_ptr);
|
||||
}
|
||||
|
||||
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
|
||||
{
|
||||
return index & (q->n_window - 1);
|
||||
}
|
||||
|
||||
#endif /* __iwl_trans_int_pcie_h__ */
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -29,7 +29,6 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-dev.h"
|
||||
|
@ -41,11 +40,13 @@
|
|||
/**
|
||||
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
||||
*/
|
||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq,
|
||||
u16 byte_cnt)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int write_ptr = txq->q.write_ptr;
|
||||
int txq_id = txq->q.id;
|
||||
u8 sec_ctl = 0;
|
||||
|
@ -53,6 +54,8 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
|||
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
||||
__le16 bc_ent;
|
||||
|
||||
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
||||
|
||||
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
|
||||
|
@ -82,7 +85,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
|||
/**
|
||||
* iwl_txq_update_write_ptr - Send new write index to hardware
|
||||
*/
|
||||
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
||||
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
|
||||
{
|
||||
u32 reg = 0;
|
||||
int txq_id = txq->q.id;
|
||||
|
@ -90,28 +93,28 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|||
if (txq->need_update == 0)
|
||||
return;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable) {
|
||||
if (hw_params(trans).shadow_reg_enable) {
|
||||
/* shadow register enabled */
|
||||
iwl_write32(priv, HBUS_TARG_WRPTR,
|
||||
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
} else {
|
||||
/* if we're trying to save power */
|
||||
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
|
||||
if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
|
||||
/* wake up nic if it's powered down ...
|
||||
* uCode will wake up, and interrupt us again, so next
|
||||
* time we'll skip this part. */
|
||||
reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
|
||||
reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
|
||||
|
||||
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"Tx queue %d requesting wakeup,"
|
||||
" GP1 = 0x%x\n", txq_id, reg);
|
||||
iwl_set_bit(priv, CSR_GP_CNTRL,
|
||||
iwl_set_bit(bus(trans), CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
|
||||
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
|
||||
/*
|
||||
|
@ -120,7 +123,7 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|||
* trying to tx (during RFKILL, we're not trying to tx).
|
||||
*/
|
||||
} else
|
||||
iwl_write32(priv, HBUS_TARG_WRPTR,
|
||||
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
}
|
||||
txq->need_update = 0;
|
||||
|
@ -165,7 +168,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
|||
return tfd->num_tbs & 0x1f;
|
||||
}
|
||||
|
||||
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
|
||||
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
|
||||
struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
|
||||
{
|
||||
int i;
|
||||
|
@ -175,56 +178,56 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
|
|||
num_tbs = iwl_tfd_get_num_tbs(tfd);
|
||||
|
||||
if (num_tbs >= IWL_NUM_OF_TBS) {
|
||||
IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
|
||||
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
|
||||
/* @todo issue fatal error, it is quite serious situation */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (num_tbs)
|
||||
dma_unmap_single(priv->bus->dev,
|
||||
dma_unmap_single(bus(trans)->dev,
|
||||
dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len),
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++)
|
||||
dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||
dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||
iwl_tfd_tb_get_len(tfd, i), dma_dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
|
||||
* @priv - driver private data
|
||||
* @trans - transport private data
|
||||
* @txq - tx queue
|
||||
* @index - the index of the TFD to be freed
|
||||
*
|
||||
* Does NOT advance any TFD circular buffer read/write indexes
|
||||
* Does NOT free the TFD itself (which is within circular buffer)
|
||||
*/
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||
int index)
|
||||
{
|
||||
struct iwl_tfd *tfd_tmp = txq->tfds;
|
||||
|
||||
iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
|
||||
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->txb) {
|
||||
if (txq->skbs) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = txq->txb[index].skb;
|
||||
skb = txq->skbs[index];
|
||||
|
||||
/* can be called from irqs-disabled context */
|
||||
if (skb) {
|
||||
dev_kfree_skb_any(skb);
|
||||
txq->txb[index].skb = NULL;
|
||||
txq->skbs[index] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len,
|
||||
u8 reset)
|
||||
|
@ -244,7 +247,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
|||
|
||||
/* Each TFD can point to a maximum 20 Tx buffers */
|
||||
if (num_tbs >= IWL_NUM_OF_TBS) {
|
||||
IWL_ERR(priv, "Error can not send more than %d chunks\n",
|
||||
IWL_ERR(trans, "Error can not send more than %d chunks\n",
|
||||
IWL_NUM_OF_TBS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -253,7 +256,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
|||
return -EINVAL;
|
||||
|
||||
if (unlikely(addr & ~IWL_TX_DMA_MASK))
|
||||
IWL_ERR(priv, "Unaligned address = %llx\n",
|
||||
IWL_ERR(trans, "Unaligned address = %llx\n",
|
||||
(unsigned long long)addr);
|
||||
|
||||
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
|
||||
|
@ -302,8 +305,7 @@ int iwl_queue_space(const struct iwl_queue *q)
|
|||
/**
|
||||
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
|
||||
*/
|
||||
int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
||||
int count, int slots_num, u32 id)
|
||||
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
|
||||
{
|
||||
q->n_bd = count;
|
||||
q->n_window = slots_num;
|
||||
|
@ -332,16 +334,12 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*TODO: this functions should NOT be exported from trans module - export it
|
||||
* until the reclaim flow will be brought to the transport module too.
|
||||
* Add a declaration to make sparse happy */
|
||||
void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq);
|
||||
|
||||
void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
||||
int txq_id = txq->q.id;
|
||||
int read_ptr = txq->q.read_ptr;
|
||||
u8 sta_id = 0;
|
||||
|
@ -349,7 +347,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
|||
|
||||
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
if (txq_id != priv->cmd_queue)
|
||||
if (txq_id != trans->shrd->cmd_queue)
|
||||
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
|
||||
|
||||
bc_ent = cpu_to_le16(1 | (sta_id << 12));
|
||||
|
@ -360,56 +358,61 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
|||
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
|
||||
}
|
||||
|
||||
static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
|
||||
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
|
||||
u16 txq_id)
|
||||
{
|
||||
u32 tbl_dw_addr;
|
||||
u32 tbl_dw;
|
||||
u16 scd_q2ratid;
|
||||
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
|
||||
|
||||
tbl_dw_addr = priv->scd_base_addr +
|
||||
tbl_dw_addr = trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
|
||||
|
||||
tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
|
||||
tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
|
||||
|
||||
if (txq_id & 0x1)
|
||||
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
|
||||
else
|
||||
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
|
||||
|
||||
iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
|
||||
iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
|
||||
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
|
||||
{
|
||||
/* Simply stop the queue, but don't change any configuration;
|
||||
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
|
||||
iwl_write_prph(priv,
|
||||
iwl_write_prph(bus(trans),
|
||||
SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
|
||||
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
|
||||
}
|
||||
|
||||
void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
|
||||
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
|
||||
int txq_id, u32 index)
|
||||
{
|
||||
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
|
||||
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
|
||||
(index & 0xff) | (txq_id << 8));
|
||||
iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
|
||||
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
|
||||
}
|
||||
|
||||
void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
|
||||
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue *txq,
|
||||
int tx_fifo_id, int scd_retry)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int txq_id = txq->q.id;
|
||||
int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
|
||||
int active =
|
||||
test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
|
||||
|
||||
iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
||||
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
|
||||
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
|
||||
|
@ -417,55 +420,75 @@ void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
|
|||
|
||||
txq->sched_retry = scd_retry;
|
||||
|
||||
IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
|
||||
IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
|
||||
active ? "Activate" : "Deactivate",
|
||||
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
|
||||
}
|
||||
|
||||
void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
|
||||
int frame_limit)
|
||||
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
|
||||
u8 ctx, u16 tid)
|
||||
{
|
||||
const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
|
||||
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
||||
return ac_to_fifo[tid_to_ac[tid]];
|
||||
|
||||
/* no support for TIDs 8-15 yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id,
|
||||
int tid, int frame_limit)
|
||||
{
|
||||
int tx_fifo, txq_id, ssn_idx;
|
||||
u16 ra_tid;
|
||||
unsigned long flags;
|
||||
struct iwl_tid_data *tid_data;
|
||||
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (WARN_ON(sta_id == IWL_INVALID_STATION))
|
||||
return;
|
||||
if (WARN_ON(tid >= MAX_TID_COUNT))
|
||||
if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
tid_data = &priv->stations[sta_id].tid[tid];
|
||||
tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
|
||||
if (WARN_ON(tx_fifo < 0)) {
|
||||
IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
|
||||
tid_data = &trans->shrd->tid_data[sta_id][tid];
|
||||
ssn_idx = SEQ_TO_SN(tid_data->seq_number);
|
||||
txq_id = tid_data->agg.txq_id;
|
||||
tx_fifo = tid_data->agg.tx_fifo;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
||||
|
||||
ra_tid = BUILD_RAxTID(sta_id, tid);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irqsave(&trans->shrd->lock, flags);
|
||||
|
||||
/* Stop this Tx queue before configuring it */
|
||||
iwlagn_tx_queue_stop_scheduler(priv, txq_id);
|
||||
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
||||
|
||||
/* Map receiver-address / traffic-ID to this queue */
|
||||
iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
|
||||
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
|
||||
|
||||
/* Set this queue as a chain-building queue */
|
||||
iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
||||
iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
||||
|
||||
/* enable aggregations for the queue */
|
||||
iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id));
|
||||
iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
|
||||
|
||||
/* Place first TFD at index corresponding to start sequence number.
|
||||
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
||||
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
|
||||
trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
|
||||
|
||||
/* Set up Tx window size and frame limit for this queue */
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
|
||||
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
|
||||
sizeof(u32),
|
||||
((frame_limit <<
|
||||
|
@ -475,40 +498,159 @@ void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
|
|||
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||
|
||||
iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
|
||||
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
||||
iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
|
||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
|
||||
tx_fifo, 1);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
trans_pcie->txq[txq_id].sta_id = sta_id;
|
||||
trans_pcie->txq[txq_id].tid = tid;
|
||||
|
||||
spin_unlock_irqrestore(&trans->shrd->lock, flags);
|
||||
}
|
||||
|
||||
int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo)
|
||||
/*
|
||||
* Find first available (lowest unused) Tx Queue, mark it "active".
|
||||
* Called only when finding queue for aggregation.
|
||||
* Should never return anything < 7, because they should already
|
||||
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
|
||||
*/
|
||||
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int txq_id;
|
||||
|
||||
for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
|
||||
if (!test_and_set_bit(txq_id,
|
||||
&trans_pcie->txq_ctx_active_msk))
|
||||
return txq_id;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id,
|
||||
int tid, u16 *ssn)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tid_data *tid_data;
|
||||
unsigned long flags;
|
||||
u16 txq_id;
|
||||
struct iwl_priv *priv = priv(trans);
|
||||
|
||||
txq_id = iwlagn_txq_ctx_activate_free(trans);
|
||||
if (txq_id == -1) {
|
||||
IWL_ERR(trans, "No free aggregation queue available\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
|
||||
tid_data = &trans->shrd->tid_data[sta_id][tid];
|
||||
*ssn = SEQ_TO_SN(tid_data->seq_number);
|
||||
tid_data->agg.txq_id = txq_id;
|
||||
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
|
||||
|
||||
tid_data = &trans->shrd->tid_data[sta_id][tid];
|
||||
if (tid_data->tfds_in_queue == 0) {
|
||||
IWL_DEBUG_HT(trans, "HW queue is empty\n");
|
||||
tid_data->agg.state = IWL_AGG_ON;
|
||||
iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
|
||||
} else {
|
||||
IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
|
||||
"queue\n", tid_data->tfds_in_queue);
|
||||
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
||||
|
||||
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
|
||||
|
||||
trans_pcie->txq[txq_id].q.read_ptr = 0;
|
||||
trans_pcie->txq[txq_id].q.write_ptr = 0;
|
||||
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
||||
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
|
||||
|
||||
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
|
||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id,
|
||||
int tid)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
unsigned long flags;
|
||||
int read_ptr, write_ptr;
|
||||
struct iwl_tid_data *tid_data;
|
||||
int txq_id;
|
||||
|
||||
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
|
||||
|
||||
tid_data = &trans->shrd->tid_data[sta_id][tid];
|
||||
txq_id = tid_data->agg.txq_id;
|
||||
|
||||
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
||||
(IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
|
||||
IWL_ERR(priv,
|
||||
hw_params(trans).num_ampdu_queues <= txq_id)) {
|
||||
IWL_ERR(trans,
|
||||
"queue number out of range: %d, must be %d to %d\n",
|
||||
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
||||
IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
priv->cfg->base_params->num_of_ampdu_queues - 1);
|
||||
hw_params(trans).num_ampdu_queues - 1);
|
||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
iwlagn_tx_queue_stop_scheduler(priv, txq_id);
|
||||
switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
|
||||
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
||||
/*
|
||||
* This can happen if the peer stops aggregation
|
||||
* again before we've had a chance to drain the
|
||||
* queue we selected previously, i.e. before the
|
||||
* session was really started completely.
|
||||
*/
|
||||
IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
|
||||
goto turn_off;
|
||||
case IWL_AGG_ON:
|
||||
break;
|
||||
default:
|
||||
IWL_WARN(trans, "Stopping AGG while state not ON"
|
||||
"or starting\n");
|
||||
}
|
||||
|
||||
iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
|
||||
write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
|
||||
read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
|
||||
|
||||
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
||||
iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
|
||||
/* The queue is not empty */
|
||||
if (write_ptr != read_ptr) {
|
||||
IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
|
||||
trans->shrd->tid_data[sta_id][tid].agg.state =
|
||||
IWL_EMPTYING_HW_QUEUE_DELBA;
|
||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_txq_ctx_deactivate(priv, txq_id);
|
||||
iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
|
||||
IWL_DEBUG_HT(trans, "HW queue is empty\n");
|
||||
turn_off:
|
||||
trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
||||
|
||||
/* do not restore/save irqs */
|
||||
spin_unlock(&trans->shrd->sta_lock);
|
||||
spin_lock(&trans->shrd->lock);
|
||||
|
||||
iwl_trans_pcie_txq_agg_disable(trans, txq_id);
|
||||
|
||||
spin_unlock_irqrestore(&trans->shrd->lock, flags);
|
||||
|
||||
iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -524,9 +666,10 @@ int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
|
|||
* failed. On success, it turns the index (> 0) of command in the
|
||||
* command queue.
|
||||
*/
|
||||
static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
|
||||
struct iwl_queue *q = &txq->q;
|
||||
struct iwl_device_cmd *out_cmd;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
|
@ -544,14 +687,14 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
int trace_idx;
|
||||
#endif
|
||||
|
||||
if (test_bit(STATUS_FW_ERROR, &priv->status)) {
|
||||
IWL_WARN(priv, "fw recovery, no hcmd send\n");
|
||||
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
|
||||
IWL_WARN(trans, "fw recovery, no hcmd send\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
|
||||
if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
|
||||
!(cmd->flags & CMD_ON_DEMAND)) {
|
||||
IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
|
||||
IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -584,22 +727,22 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
|
||||
IWL_WARN(priv, "Not sending command - %s KILL\n",
|
||||
iwl_is_rfkill(priv) ? "RF" : "CT");
|
||||
if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
|
||||
IWL_WARN(trans, "Not sending command - %s KILL\n",
|
||||
iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->hcmd_lock, flags);
|
||||
spin_lock_irqsave(&trans->hcmd_lock, flags);
|
||||
|
||||
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
|
||||
|
||||
IWL_ERR(priv, "No space in command queue\n");
|
||||
is_ct_kill = iwl_check_for_ct_kill(priv);
|
||||
IWL_ERR(trans, "No space in command queue\n");
|
||||
is_ct_kill = iwl_check_for_ct_kill(priv(trans));
|
||||
if (!is_ct_kill) {
|
||||
IWL_ERR(priv, "Restarting adapter due to queue full\n");
|
||||
iwlagn_fw_error(priv, false);
|
||||
IWL_ERR(trans, "Restarting adapter queue is full\n");
|
||||
iwlagn_fw_error(priv(trans), false);
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
@ -618,8 +761,9 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
|
||||
out_cmd->hdr.cmd = cmd->id;
|
||||
out_cmd->hdr.flags = 0;
|
||||
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
|
||||
INDEX_TO_SEQ(q->write_ptr));
|
||||
out_cmd->hdr.sequence =
|
||||
cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
|
||||
INDEX_TO_SEQ(q->write_ptr));
|
||||
|
||||
/* and copy the data that needs to be copied */
|
||||
|
||||
|
@ -633,16 +777,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
cmd_dest += cmd->len[i];
|
||||
}
|
||||
|
||||
IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
|
||||
IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
|
||||
"%d bytes at %d[%d]:%d\n",
|
||||
get_cmd_string(out_cmd->hdr.cmd),
|
||||
out_cmd->hdr.cmd,
|
||||
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
|
||||
q->write_ptr, idx, priv->cmd_queue);
|
||||
q->write_ptr, idx, trans->shrd->cmd_queue);
|
||||
|
||||
phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
|
||||
phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
|
||||
if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -650,7 +794,8 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
dma_unmap_addr_set(out_meta, mapping, phys_addr);
|
||||
dma_unmap_len_set(out_meta, len, copy_size);
|
||||
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
|
||||
iwlagn_txq_attach_buf_to_tfd(trans, txq,
|
||||
phys_addr, copy_size, 1);
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
trace_bufs[0] = &out_cmd->hdr;
|
||||
trace_lens[0] = copy_size;
|
||||
|
@ -662,17 +807,18 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
continue;
|
||||
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
||||
continue;
|
||||
phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
|
||||
phys_addr = dma_map_single(bus(trans)->dev,
|
||||
(void *)cmd->data[i],
|
||||
cmd->len[i], DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(priv->bus->dev, phys_addr)) {
|
||||
iwlagn_unmap_tfd(priv, out_meta,
|
||||
if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
|
||||
iwlagn_unmap_tfd(trans, out_meta,
|
||||
&txq->tfds[q->write_ptr],
|
||||
DMA_BIDIRECTIONAL);
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
|
||||
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
|
||||
cmd->len[i], 0);
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
trace_bufs[trace_idx] = cmd->data[i];
|
||||
|
@ -688,7 +834,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
/* check that tracing gets all possible blocks */
|
||||
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
trace_iwlwifi_dev_hcmd(priv, cmd->flags,
|
||||
trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
|
||||
trace_bufs[0], trace_lens[0],
|
||||
trace_bufs[1], trace_lens[1],
|
||||
trace_bufs[2], trace_lens[2]);
|
||||
|
@ -696,10 +842,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
|
||||
/* Increment and update queue's write index */
|
||||
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
||||
iwl_txq_update_write_ptr(priv, txq);
|
||||
iwl_txq_update_write_ptr(trans, txq);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
|
||||
return idx;
|
||||
}
|
||||
|
||||
|
@ -712,7 +858,9 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
*/
|
||||
static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
|
||||
{
|
||||
struct iwl_tx_queue *txq = &priv->txq[txq_id];
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans(priv));
|
||||
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_queue *q = &txq->q;
|
||||
int nfreed = 0;
|
||||
|
||||
|
@ -752,17 +900,19 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
int cmd_index;
|
||||
struct iwl_device_cmd *cmd;
|
||||
struct iwl_cmd_meta *meta;
|
||||
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
|
||||
struct iwl_trans *trans = trans(priv);
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
|
||||
unsigned long flags;
|
||||
|
||||
/* If a Tx command is being handled and it isn't in the actual
|
||||
* command queue then there a command routing bug has been introduced
|
||||
* in the queue management code. */
|
||||
if (WARN(txq_id != priv->cmd_queue,
|
||||
if (WARN(txq_id != trans->shrd->cmd_queue,
|
||||
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
|
||||
txq_id, priv->cmd_queue, sequence,
|
||||
priv->txq[priv->cmd_queue].q.read_ptr,
|
||||
priv->txq[priv->cmd_queue].q.write_ptr)) {
|
||||
txq_id, trans->shrd->cmd_queue, sequence,
|
||||
trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
|
||||
trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
|
||||
iwl_print_hex_error(priv, pkt, 32);
|
||||
return;
|
||||
}
|
||||
|
@ -771,7 +921,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
cmd = txq->cmd[cmd_index];
|
||||
meta = &txq->meta[cmd_index];
|
||||
|
||||
iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
|
||||
iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Input error checking is done when commands are added to queue. */
|
||||
if (meta->flags & CMD_WANT_SKB) {
|
||||
|
@ -780,20 +931,20 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
} else if (meta->callback)
|
||||
meta->callback(priv, cmd, pkt);
|
||||
|
||||
spin_lock_irqsave(&priv->hcmd_lock, flags);
|
||||
spin_lock_irqsave(&trans->hcmd_lock, flags);
|
||||
|
||||
iwl_hcmd_queue_reclaim(priv, txq_id, index);
|
||||
|
||||
if (!(meta->flags & CMD_ASYNC)) {
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
|
||||
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
|
||||
get_cmd_string(cmd->hdr.cmd));
|
||||
wake_up_interruptible(&priv->wait_command_queue);
|
||||
}
|
||||
|
||||
meta->flags = 0;
|
||||
|
||||
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
|
||||
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
|
||||
}
|
||||
|
||||
const char *get_cmd_string(u8 cmd)
|
||||
|
@ -904,7 +1055,7 @@ static void iwl_generic_cmd_callback(struct iwl_priv *priv,
|
|||
#endif
|
||||
}
|
||||
|
||||
static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -916,77 +1067,78 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
if (!cmd->callback)
|
||||
cmd->callback = iwl_generic_cmd_callback;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
|
||||
return -EBUSY;
|
||||
|
||||
ret = iwl_enqueue_hcmd(priv, cmd);
|
||||
ret = iwl_enqueue_hcmd(trans, cmd);
|
||||
if (ret < 0) {
|
||||
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
get_cmd_string(cmd->id), ret);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int cmd_idx;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
lockdep_assert_held(&trans->shrd->mutex);
|
||||
|
||||
/* A synchronous command can not have a callback set. */
|
||||
if (WARN_ON(cmd->callback))
|
||||
return -EINVAL;
|
||||
|
||||
IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
|
||||
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
|
||||
get_cmd_string(cmd->id));
|
||||
|
||||
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
|
||||
set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
|
||||
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
|
||||
get_cmd_string(cmd->id));
|
||||
|
||||
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
|
||||
cmd_idx = iwl_enqueue_hcmd(trans, cmd);
|
||||
if (cmd_idx < 0) {
|
||||
ret = cmd_idx;
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
|
||||
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
get_cmd_string(cmd->id), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
|
||||
!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
|
||||
ret = wait_event_interruptible_timeout(priv(trans)->wait_command_queue,
|
||||
!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
|
||||
HOST_COMPLETE_TIMEOUT);
|
||||
if (!ret) {
|
||||
if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
|
||||
IWL_ERR(priv,
|
||||
if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
|
||||
IWL_ERR(trans,
|
||||
"Error sending %s: time out after %dms.\n",
|
||||
get_cmd_string(cmd->id),
|
||||
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
|
||||
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
|
||||
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
|
||||
"%s\n", get_cmd_string(cmd->id));
|
||||
ret = -ETIMEDOUT;
|
||||
goto cancel;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
|
||||
IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
|
||||
if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
|
||||
IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -ECANCELED;
|
||||
goto fail;
|
||||
}
|
||||
if (test_bit(STATUS_FW_ERROR, &priv->status)) {
|
||||
IWL_ERR(priv, "Command %s failed: FW Error\n",
|
||||
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
|
||||
IWL_ERR(trans, "Command %s failed: FW Error\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
|
||||
IWL_ERR(priv, "Error: Response NULL in '%s'\n",
|
||||
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -EIO;
|
||||
goto cancel;
|
||||
|
@ -1002,28 +1154,28 @@ cancel:
|
|||
* in later, it will possibly set an invalid
|
||||
* address (cmd->meta.source).
|
||||
*/
|
||||
priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
|
||||
trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
|
||||
~CMD_WANT_SKB;
|
||||
}
|
||||
fail:
|
||||
if (cmd->reply_page) {
|
||||
iwl_free_pages(priv, cmd->reply_page);
|
||||
iwl_free_pages(trans->shrd, cmd->reply_page);
|
||||
cmd->reply_page = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
if (cmd->flags & CMD_ASYNC)
|
||||
return iwl_send_cmd_async(priv, cmd);
|
||||
return iwl_send_cmd_async(trans, cmd);
|
||||
|
||||
return iwl_send_cmd_sync(priv, cmd);
|
||||
return iwl_send_cmd_sync(trans, cmd);
|
||||
}
|
||||
|
||||
int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
|
||||
const void *data)
|
||||
int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
|
||||
u16 len, const void *data)
|
||||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = id,
|
||||
|
@ -1032,5 +1184,53 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
|
|||
.flags = flags,
|
||||
};
|
||||
|
||||
return iwl_send_cmd(priv, &cmd);
|
||||
return iwl_trans_pcie_send_cmd(trans, &cmd);
|
||||
}
|
||||
|
||||
/* Frees buffers until index _not_ inclusive */
|
||||
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
||||
struct sk_buff_head *skbs)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_queue *q = &txq->q;
|
||||
int last_to_free;
|
||||
int freed = 0;
|
||||
|
||||
/*Since we free until index _not_ inclusive, the one before index is
|
||||
* the last we will free. This one must be used */
|
||||
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
|
||||
|
||||
if ((index >= q->n_bd) ||
|
||||
(iwl_queue_used(q, last_to_free) == 0)) {
|
||||
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
|
||||
"last_to_free %d is out of range [0-%d] %d %d.\n",
|
||||
__func__, txq_id, last_to_free, q->n_bd,
|
||||
q->write_ptr, q->read_ptr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
|
||||
q->read_ptr, index);
|
||||
|
||||
if (WARN_ON(!skb_queue_empty(skbs)))
|
||||
return 0;
|
||||
|
||||
for (;
|
||||
q->read_ptr != index;
|
||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
|
||||
|
||||
if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
|
||||
continue;
|
||||
|
||||
__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
|
||||
|
||||
txq->skbs[txq->q.read_ptr] = NULL;
|
||||
|
||||
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
|
||||
|
||||
iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
|
||||
freed++;
|
||||
}
|
||||
return freed;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -63,163 +63,235 @@
|
|||
#ifndef __iwl_trans_h__
|
||||
#define __iwl_trans_h__
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#include "iwl-shared.h"
|
||||
#include "iwl-commands.h"
|
||||
|
||||
/*This file includes the declaration that are exported from the transport
|
||||
* layer */
|
||||
|
||||
struct iwl_priv;
|
||||
struct iwl_rxon_context;
|
||||
struct iwl_host_cmd;
|
||||
struct iwl_shared;
|
||||
struct iwl_device_cmd;
|
||||
|
||||
/**
|
||||
* struct iwl_trans_ops - transport specific operations
|
||||
* @alloc: allocates the meta data (not the queues themselves)
|
||||
* @request_irq: requests IRQ - will be called before the FW load in probe flow
|
||||
* @start_device: allocates and inits all the resources for the transport
|
||||
* layer.
|
||||
* @prepare_card_hw: claim the ownership on the HW. Will be called during
|
||||
* probe.
|
||||
* @tx_start: starts and configures all the Tx fifo - usually done once the fw
|
||||
* is alive.
|
||||
* @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
|
||||
* @stop_device:stops the whole device (embedded CPU put to reset)
|
||||
* @rx_free: frees the rx memory
|
||||
* @tx_free: frees the tx memory
|
||||
* @send_cmd:send a host command
|
||||
* @send_cmd_pdu:send a host command: flags can be CMD_*
|
||||
* @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use
|
||||
* @tx: send an skb
|
||||
* @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
|
||||
* @reclaim: free packet until ssn. Returns a list of freed packets.
|
||||
* @tx_agg_alloc: allocate resources for a TX BA session
|
||||
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
|
||||
* ready and a successful ADDBA response has been received.
|
||||
* @txq_agg_disable: de-configure a Tx queue to send AMPDUs
|
||||
* @tx_agg_disable: de-configure a Tx queue to send AMPDUs
|
||||
* @kick_nic: remove the RESET from the embedded CPU and let it run
|
||||
* @sync_irq: the upper layer will typically disable interrupt and call this
|
||||
* handler. After this handler returns, it is guaranteed that all
|
||||
* the ISR / tasklet etc... have finished running and the transport
|
||||
* layer shall not pass any Rx.
|
||||
* @free: release all the ressource for the transport layer itself such as
|
||||
* irq, tasklet etc...
|
||||
* @stop_queue: stop a specific queue
|
||||
* @check_stuck_queue: check if a specific queue is stuck
|
||||
* @wait_tx_queue_empty: wait until all tx queues are empty
|
||||
* @dbgfs_register: add the dbgfs files under this directory. Files will be
|
||||
* automatically deleted.
|
||||
* @suspend: stop the device unless WoWLAN is configured
|
||||
* @resume: resume activity of the device
|
||||
*/
|
||||
struct iwl_trans_ops {
|
||||
|
||||
int (*start_device)(struct iwl_priv *priv);
|
||||
int (*prepare_card_hw)(struct iwl_priv *priv);
|
||||
void (*stop_device)(struct iwl_priv *priv);
|
||||
void (*tx_start)(struct iwl_priv *priv);
|
||||
void (*tx_free)(struct iwl_priv *priv);
|
||||
void (*rx_free)(struct iwl_priv *priv);
|
||||
struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
|
||||
int (*request_irq)(struct iwl_trans *iwl_trans);
|
||||
int (*start_device)(struct iwl_trans *trans);
|
||||
int (*prepare_card_hw)(struct iwl_trans *trans);
|
||||
void (*stop_device)(struct iwl_trans *trans);
|
||||
void (*tx_start)(struct iwl_trans *trans);
|
||||
|
||||
int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
|
||||
void (*wake_any_queue)(struct iwl_trans *trans, u8 ctx);
|
||||
|
||||
int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
|
||||
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
|
||||
int (*send_cmd_pdu)(struct iwl_trans *trans, u8 id, u32 flags, u16 len,
|
||||
const void *data);
|
||||
struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id);
|
||||
int (*tx)(struct iwl_priv *priv, struct sk_buff *skb,
|
||||
struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
|
||||
struct iwl_rxon_context *ctx);
|
||||
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id);
|
||||
void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
|
||||
int txq_id, int ssn, u32 status,
|
||||
struct sk_buff_head *skbs);
|
||||
|
||||
int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo);
|
||||
void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
|
||||
int frame_limit);
|
||||
int (*tx_agg_disable)(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id,
|
||||
int tid);
|
||||
int (*tx_agg_alloc)(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
||||
u16 *ssn);
|
||||
void (*tx_agg_setup)(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
||||
int frame_limit);
|
||||
|
||||
void (*kick_nic)(struct iwl_priv *priv);
|
||||
void (*kick_nic)(struct iwl_trans *trans);
|
||||
|
||||
void (*sync_irq)(struct iwl_priv *priv);
|
||||
void (*free)(struct iwl_priv *priv);
|
||||
void (*free)(struct iwl_trans *trans);
|
||||
|
||||
void (*stop_queue)(struct iwl_trans *trans, int q);
|
||||
|
||||
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
|
||||
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
|
||||
|
||||
int (*suspend)(struct iwl_trans *trans);
|
||||
int (*resume)(struct iwl_trans *trans);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans - transport common data
|
||||
* @ops - pointer to iwl_trans_ops
|
||||
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
|
||||
* @hcmd_lock: protects HCMD
|
||||
*/
|
||||
struct iwl_trans {
|
||||
const struct iwl_trans_ops *ops;
|
||||
struct iwl_priv *priv;
|
||||
struct iwl_shared *shrd;
|
||||
spinlock_t hcmd_lock;
|
||||
|
||||
/* pointer to trans specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
|
||||
};
|
||||
|
||||
static inline int trans_start_device(struct iwl_trans *trans)
|
||||
static inline int iwl_trans_request_irq(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->start_device(trans->priv);
|
||||
return trans->ops->request_irq(trans);
|
||||
}
|
||||
|
||||
static inline int trans_prepare_card_hw(struct iwl_trans *trans)
|
||||
static inline int iwl_trans_start_device(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->prepare_card_hw(trans->priv);
|
||||
return trans->ops->start_device(trans);
|
||||
}
|
||||
|
||||
static inline void trans_stop_device(struct iwl_trans *trans)
|
||||
static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->stop_device(trans->priv);
|
||||
return trans->ops->prepare_card_hw(trans);
|
||||
}
|
||||
|
||||
static inline void trans_tx_start(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->tx_start(trans->priv);
|
||||
trans->ops->stop_device(trans);
|
||||
}
|
||||
|
||||
static inline void trans_rx_free(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_tx_start(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->rx_free(trans->priv);
|
||||
trans->ops->tx_start(trans);
|
||||
}
|
||||
|
||||
static inline void trans_tx_free(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans, u8 ctx)
|
||||
{
|
||||
trans->ops->tx_free(trans->priv);
|
||||
trans->ops->wake_any_queue(trans, ctx);
|
||||
}
|
||||
|
||||
static inline int trans_send_cmd(struct iwl_trans *trans,
|
||||
|
||||
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
return trans->ops->send_cmd(trans->priv, cmd);
|
||||
return trans->ops->send_cmd(trans, cmd);
|
||||
}
|
||||
|
||||
static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
|
||||
u16 len, const void *data)
|
||||
static inline int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
|
||||
u32 flags, u16 len, const void *data)
|
||||
{
|
||||
return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data);
|
||||
return trans->ops->send_cmd_pdu(trans, id, flags, len, data);
|
||||
}
|
||||
|
||||
static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans,
|
||||
int txq_id)
|
||||
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id)
|
||||
{
|
||||
return trans->ops->get_tx_cmd(trans->priv, txq_id);
|
||||
return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
|
||||
}
|
||||
|
||||
static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
|
||||
struct iwl_rxon_context *ctx)
|
||||
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
|
||||
int tid, int txq_id, int ssn, u32 status,
|
||||
struct sk_buff_head *skbs)
|
||||
{
|
||||
return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx);
|
||||
trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
|
||||
}
|
||||
|
||||
static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo)
|
||||
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
int sta_id, int tid)
|
||||
{
|
||||
return trans->ops->txq_agg_disable(trans->priv, txq_id,
|
||||
ssn_idx, tx_fifo);
|
||||
return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
|
||||
}
|
||||
|
||||
static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id,
|
||||
int tid, int frame_limit)
|
||||
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
int sta_id, int tid, u16 *ssn)
|
||||
{
|
||||
trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit);
|
||||
return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
|
||||
}
|
||||
|
||||
static inline void trans_kick_nic(struct iwl_trans *trans)
|
||||
|
||||
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
|
||||
enum iwl_rxon_context_id ctx,
|
||||
int sta_id, int tid,
|
||||
int frame_limit)
|
||||
{
|
||||
trans->ops->kick_nic(trans->priv);
|
||||
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
|
||||
}
|
||||
|
||||
static inline void trans_sync_irq(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->sync_irq(trans->priv);
|
||||
trans->ops->kick_nic(trans);
|
||||
}
|
||||
|
||||
static inline void trans_free(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_free(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->free(trans->priv);
|
||||
trans->ops->free(trans);
|
||||
}
|
||||
|
||||
int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv);
|
||||
static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
|
||||
{
|
||||
trans->ops->stop_queue(trans, q);
|
||||
}
|
||||
|
||||
/*TODO: this functions should NOT be exported from trans module - export it
|
||||
* until the reclaim flow will be brought to the transport module too */
|
||||
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->wait_tx_queue_empty(trans);
|
||||
}
|
||||
|
||||
struct iwl_tx_queue;
|
||||
void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq);
|
||||
static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
|
||||
{
|
||||
return trans->ops->check_stuck_queue(trans, q);
|
||||
}
|
||||
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
|
||||
struct dentry *dir)
|
||||
{
|
||||
return trans->ops->dbgfs_register(trans, dir);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_suspend(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->suspend(trans);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_resume(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->resume(trans);
|
||||
}
|
||||
|
||||
/*****************************************************
|
||||
* Transport layers implementations
|
||||
******************************************************/
|
||||
extern const struct iwl_trans_ops trans_ops_pcie;
|
||||
|
||||
#endif /* __iwl_trans_h__ */
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
|
||||
|
||||
MODULE_FIRMWARE("3826.arm");
|
||||
MODULE_ALIAS("stlc45xx");
|
||||
|
||||
/*
|
||||
* gpios should be handled in board files and provided via platform data,
|
||||
|
@ -738,3 +737,4 @@ MODULE_LICENSE("GPL");
|
|||
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
|
||||
MODULE_ALIAS("spi:cx3110x");
|
||||
MODULE_ALIAS("spi:p54spi");
|
||||
MODULE_ALIAS("spi:stlc45xx");
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include <net/mac80211.h>
|
||||
|
||||
|
@ -582,10 +583,13 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
|
|||
if (chan) {
|
||||
struct survey_info *survey = &priv->survey[chan->hw_value];
|
||||
survey->noise = clamp_t(s8, priv->noise, -128, 127);
|
||||
survey->channel_time = priv->survey_raw.active / 1024;
|
||||
survey->channel_time_tx = priv->survey_raw.tx / 1024;
|
||||
survey->channel_time_busy = priv->survey_raw.cca / 1024 +
|
||||
survey->channel_time_tx;
|
||||
survey->channel_time = priv->survey_raw.active;
|
||||
survey->channel_time_tx = priv->survey_raw.tx;
|
||||
survey->channel_time_busy = priv->survey_raw.tx +
|
||||
priv->survey_raw.cca;
|
||||
do_div(survey->channel_time, 1024);
|
||||
do_div(survey->channel_time_tx, 1024);
|
||||
do_div(survey->channel_time_busy, 1024);
|
||||
}
|
||||
|
||||
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
|
||||
|
|
|
@ -46,6 +46,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
|
|||
goto out;
|
||||
}
|
||||
|
||||
wake_up->role_id = wl->role_id;
|
||||
wake_up->wake_up_event = wl->conf.conn.wake_up_event;
|
||||
wake_up->listen_interval = wl->conf.conn.listen_interval;
|
||||
|
||||
|
@ -101,6 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->current_tx_power = power * 10;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
|
||||
|
@ -128,6 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
|
|||
}
|
||||
|
||||
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
|
||||
feature->role_id = wl->role_id;
|
||||
feature->data_flow_options = 0;
|
||||
feature->options = 0;
|
||||
|
||||
|
@ -183,34 +186,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
|
||||
{
|
||||
struct acx_rx_config *rx_config;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx rx config");
|
||||
|
||||
rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
|
||||
if (!rx_config) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rx_config->config_options = cpu_to_le32(config);
|
||||
rx_config->filter_options = cpu_to_le32(filter);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
|
||||
rx_config, sizeof(*rx_config));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("failed to set rx config: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(rx_config);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_pd_threshold(struct wl1271 *wl)
|
||||
{
|
||||
struct acx_packet_detection *pd;
|
||||
|
@ -250,6 +225,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
|
|||
goto out;
|
||||
}
|
||||
|
||||
slot->role_id = wl->role_id;
|
||||
slot->wone_index = STATION_WONE_INDEX;
|
||||
slot->slot_time = slot_time;
|
||||
|
||||
|
@ -279,6 +255,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
|
|||
}
|
||||
|
||||
/* MAC filtering */
|
||||
acx->role_id = wl->role_id;
|
||||
acx->enabled = enable;
|
||||
acx->num_groups = mc_list_len;
|
||||
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
|
||||
|
@ -308,6 +285,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
|
|||
|
||||
wl1271_debug(DEBUG_ACX, "acx service period timeout");
|
||||
|
||||
rx_timeout->role_id = wl->role_id;
|
||||
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
|
||||
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
|
||||
|
||||
|
@ -344,6 +322,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
|
|||
goto out;
|
||||
}
|
||||
|
||||
rts->role_id = wl->role_id;
|
||||
rts->threshold = cpu_to_le16((u16)rts_threshold);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
|
||||
|
@ -403,6 +382,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
|
|||
goto out;
|
||||
}
|
||||
|
||||
beacon_filter->role_id = wl->role_id;
|
||||
beacon_filter->enable = enable_filter;
|
||||
|
||||
/*
|
||||
|
@ -439,6 +419,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
|
|||
}
|
||||
|
||||
/* configure default beacon pass-through rules */
|
||||
ie_table->role_id = wl->role_id;
|
||||
ie_table->num_ie = 0;
|
||||
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
|
||||
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
|
||||
|
@ -500,6 +481,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
|
|||
timeout = wl->conf.conn.bss_lose_timeout;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->synch_fail_thold = cpu_to_le32(threshold);
|
||||
acx->bss_lose_timeout = cpu_to_le32(timeout);
|
||||
|
||||
|
@ -546,13 +528,13 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_sta_sg_cfg(struct wl1271 *wl)
|
||||
int wl12xx_acx_sg_cfg(struct wl1271 *wl)
|
||||
{
|
||||
struct acx_sta_bt_wlan_coex_param *param;
|
||||
struct acx_bt_wlan_coex_param *param;
|
||||
struct conf_sg_settings *c = &wl->conf.sg;
|
||||
int i, ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx sg sta cfg");
|
||||
wl1271_debug(DEBUG_ACX, "acx sg cfg");
|
||||
|
||||
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
||||
if (!param) {
|
||||
|
@ -561,38 +543,8 @@ int wl1271_acx_sta_sg_cfg(struct wl1271 *wl)
|
|||
}
|
||||
|
||||
/* BT-WLAN coext parameters */
|
||||
for (i = 0; i < CONF_SG_STA_PARAMS_MAX; i++)
|
||||
param->params[i] = cpu_to_le32(c->sta_params[i]);
|
||||
param->param_idx = CONF_SG_PARAMS_ALL;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("failed to set sg config: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(param);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_ap_sg_cfg(struct wl1271 *wl)
|
||||
{
|
||||
struct acx_ap_bt_wlan_coex_param *param;
|
||||
struct conf_sg_settings *c = &wl->conf.sg;
|
||||
int i, ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx sg ap cfg");
|
||||
|
||||
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
||||
if (!param) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* BT-WLAN coext parameters */
|
||||
for (i = 0; i < CONF_SG_AP_PARAMS_MAX; i++)
|
||||
param->params[i] = cpu_to_le32(c->ap_params[i]);
|
||||
for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
|
||||
param->params[i] = cpu_to_le32(c->params[i]);
|
||||
param->param_idx = CONF_SG_PARAMS_ALL;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
|
||||
|
@ -647,6 +599,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
|
|||
goto out;
|
||||
}
|
||||
|
||||
bb->role_id = wl->role_id;
|
||||
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
|
||||
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
|
||||
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
|
||||
|
@ -676,6 +629,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx_aid->role_id = wl->role_id;
|
||||
acx_aid->aid = cpu_to_le16(aid);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
|
||||
|
@ -731,6 +685,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->preamble = preamble;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
|
||||
|
@ -758,6 +713,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->ctsprotect = ctsprotect;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
|
||||
|
@ -789,9 +745,8 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
|
|||
|
||||
int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
|
||||
{
|
||||
struct acx_sta_rate_policy *acx;
|
||||
struct acx_rate_policy *acx;
|
||||
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
|
||||
int idx = 0;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx rate policies");
|
||||
|
@ -803,25 +758,30 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
|
|||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
|
||||
wl->basic_rate, wl->rate_set);
|
||||
|
||||
/* configure one basic rate class */
|
||||
idx = ACX_TX_BASIC_RATE;
|
||||
acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
|
||||
acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
|
||||
acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
|
||||
acx->rate_class[idx].aflags = c->aflags;
|
||||
acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
|
||||
acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
|
||||
acx->rate_policy.short_retry_limit = c->short_retry_limit;
|
||||
acx->rate_policy.long_retry_limit = c->long_retry_limit;
|
||||
acx->rate_policy.aflags = c->aflags;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("Setting of rate policies failed: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* configure one AP supported rate class */
|
||||
idx = ACX_TX_AP_FULL_RATE;
|
||||
acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
|
||||
acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
|
||||
acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
|
||||
acx->rate_class[idx].aflags = c->aflags;
|
||||
acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
|
||||
acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
|
||||
acx->rate_policy.short_retry_limit = c->short_retry_limit;
|
||||
acx->rate_policy.long_retry_limit = c->long_retry_limit;
|
||||
acx->rate_policy.aflags = c->aflags;
|
||||
|
||||
acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
|
||||
acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
|
||||
acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
|
||||
if (ret < 0) {
|
||||
|
@ -837,7 +797,7 @@ out:
|
|||
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
|
||||
u8 idx)
|
||||
{
|
||||
struct acx_ap_rate_policy *acx;
|
||||
struct acx_rate_policy *acx;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x",
|
||||
|
@ -883,6 +843,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->ac = ac;
|
||||
acx->cw_min = cw_min;
|
||||
acx->cw_max = cpu_to_le16(cw_max);
|
||||
|
@ -916,6 +877,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->queue_id = queue_id;
|
||||
acx->channel_type = channel_type;
|
||||
acx->tsid = tsid;
|
||||
|
@ -995,52 +957,9 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
|
||||
int wl12xx_acx_mem_cfg(struct wl1271 *wl)
|
||||
{
|
||||
struct wl1271_acx_ap_config_memory *mem_conf;
|
||||
struct conf_memory_settings *mem;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
|
||||
|
||||
mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
|
||||
if (!mem_conf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (wl->chip.id == CHIP_ID_1283_PG20)
|
||||
/*
|
||||
* FIXME: The 128x AP FW does not yet support dynamic memory.
|
||||
* Use the base memory configuration for 128x for now. This
|
||||
* should be fine tuned in the future.
|
||||
*/
|
||||
mem = &wl->conf.mem_wl128x;
|
||||
else
|
||||
mem = &wl->conf.mem_wl127x;
|
||||
|
||||
/* memory config */
|
||||
mem_conf->num_stations = mem->num_stations;
|
||||
mem_conf->rx_mem_block_num = mem->rx_block_num;
|
||||
mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
|
||||
mem_conf->num_ssid_profiles = mem->ssid_profiles;
|
||||
mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
|
||||
sizeof(*mem_conf));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("wl1271 mem config failed: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(mem_conf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
|
||||
{
|
||||
struct wl1271_acx_sta_config_memory *mem_conf;
|
||||
struct wl12xx_acx_config_memory *mem_conf;
|
||||
struct conf_memory_settings *mem;
|
||||
int ret;
|
||||
|
||||
|
@ -1183,6 +1102,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
|
||||
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
|
||||
|
||||
|
@ -1210,6 +1130,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->version = ACX_IPV4_VERSION;
|
||||
acx->enable = enable;
|
||||
|
||||
|
@ -1269,6 +1190,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->enabled = enable;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
|
||||
|
@ -1295,6 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
|
||||
acx->index = index;
|
||||
acx->tpl_validation = tpl_valid;
|
||||
|
@ -1328,6 +1251,7 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
|
|||
|
||||
wl->last_rssi_event = -1;
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
|
||||
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
|
||||
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
|
||||
|
@ -1366,6 +1290,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->rssi_beacon = c->avg_weight_rssi_beacon;
|
||||
acx->rssi_data = c->avg_weight_rssi_data;
|
||||
acx->snr_beacon = c->avg_weight_snr_beacon;
|
||||
|
@ -1384,14 +1309,15 @@ out:
|
|||
|
||||
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
|
||||
struct ieee80211_sta_ht_cap *ht_cap,
|
||||
bool allow_ht_operation)
|
||||
bool allow_ht_operation, u8 hlid)
|
||||
{
|
||||
struct wl1271_acx_ht_capabilities *acx;
|
||||
u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
int ret = 0;
|
||||
u32 ht_capabilites = 0;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
|
||||
wl1271_debug(DEBUG_ACX, "acx ht capabilities setting "
|
||||
"sta supp: %d sta cap: %d", ht_cap->ht_supported,
|
||||
ht_cap->cap);
|
||||
|
||||
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
|
||||
if (!acx) {
|
||||
|
@ -1399,26 +1325,22 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Allow HT Operation ? */
|
||||
if (allow_ht_operation) {
|
||||
ht_capabilites =
|
||||
WL1271_ACX_FW_CAP_HT_OPERATION;
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
|
||||
ht_capabilites |=
|
||||
WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
|
||||
ht_capabilites |=
|
||||
WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
|
||||
ht_capabilites |=
|
||||
WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
|
||||
if (allow_ht_operation && ht_cap->ht_supported) {
|
||||
/* no need to translate capabilities - use the spec values */
|
||||
ht_capabilites = ht_cap->cap;
|
||||
|
||||
/*
|
||||
* this bit is not employed by the spec but only by FW to
|
||||
* indicate peer HT support
|
||||
*/
|
||||
ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
|
||||
|
||||
/* get data from A-MPDU parameters field */
|
||||
acx->ampdu_max_length = ht_cap->ampdu_factor;
|
||||
acx->ampdu_min_spacing = ht_cap->ampdu_density;
|
||||
}
|
||||
|
||||
memcpy(acx->mac_address, mac_address, ETH_ALEN);
|
||||
acx->hlid = hlid;
|
||||
acx->ht_capabilites = cpu_to_le32(ht_capabilites);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
|
||||
|
@ -1446,6 +1368,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
|
|||
goto out;
|
||||
}
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->ht_protection =
|
||||
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
|
||||
acx->rifs_mode = 0;
|
||||
|
@ -1467,14 +1390,12 @@ out:
|
|||
}
|
||||
|
||||
/* Configure BA session initiator/receiver parameters setting in the FW. */
|
||||
int wl1271_acx_set_ba_session(struct wl1271 *wl,
|
||||
enum ieee80211_back_parties direction,
|
||||
u8 tid_index, u8 policy)
|
||||
int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
|
||||
{
|
||||
struct wl1271_acx_ba_session_policy *acx;
|
||||
struct wl1271_acx_ba_initiator_policy *acx;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx ba session setting");
|
||||
wl1271_debug(DEBUG_ACX, "acx ba initiator policy");
|
||||
|
||||
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
|
||||
if (!acx) {
|
||||
|
@ -1482,33 +1403,18 @@ int wl1271_acx_set_ba_session(struct wl1271 *wl,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* ANY role */
|
||||
acx->role_id = 0xff;
|
||||
acx->tid = tid_index;
|
||||
acx->enable = policy;
|
||||
acx->ba_direction = direction;
|
||||
|
||||
switch (direction) {
|
||||
case WLAN_BACK_INITIATOR:
|
||||
acx->win_size = wl->conf.ht.tx_ba_win_size;
|
||||
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
|
||||
break;
|
||||
case WLAN_BACK_RECIPIENT:
|
||||
acx->win_size = RX_BA_WIN_SIZE;
|
||||
acx->inactivity_timeout = 0;
|
||||
break;
|
||||
default:
|
||||
wl1271_error("Incorrect acx command id=%x\n", direction);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* set for the current role */
|
||||
acx->role_id = wl->role_id;
|
||||
acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
|
||||
acx->win_size = wl->conf.ht.tx_ba_win_size;
|
||||
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
|
||||
|
||||
ret = wl1271_cmd_configure(wl,
|
||||
ACX_BA_SESSION_POLICY_CFG,
|
||||
ACX_BA_SESSION_INIT_POLICY,
|
||||
acx,
|
||||
sizeof(*acx));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("acx ba session setting failed: %d", ret);
|
||||
wl1271_warning("acx ba initiator policy failed: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1518,8 +1424,8 @@ out:
|
|||
}
|
||||
|
||||
/* setup BA session receiver setting in the FW. */
|
||||
int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
|
||||
bool enable)
|
||||
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
|
||||
u16 ssn, bool enable, u8 peer_hlid)
|
||||
{
|
||||
struct wl1271_acx_ba_receiver_setup *acx;
|
||||
int ret;
|
||||
|
@ -1532,11 +1438,10 @@ int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Single link for now */
|
||||
acx->link_id = 1;
|
||||
acx->hlid = peer_hlid;
|
||||
acx->tid = tid_index;
|
||||
acx->enable = enable;
|
||||
acx->win_size = 0;
|
||||
acx->win_size = wl->conf.ht.rx_ba_win_size;
|
||||
acx->ssn = ssn;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
|
||||
|
@ -1606,6 +1511,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
|
|||
if (!(conf_queues & BIT(i)))
|
||||
continue;
|
||||
|
||||
rx_streaming->role_id = wl->role_id;
|
||||
rx_streaming->tid = i;
|
||||
rx_streaming->enable = enable_queues & BIT(i);
|
||||
rx_streaming->period = wl->conf.rx_streaming.interval;
|
||||
|
@ -1635,6 +1541,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
|
|||
if (!acx)
|
||||
return -ENOMEM;
|
||||
|
||||
acx->role_id = wl->role_id;
|
||||
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
|
||||
|
@ -1703,31 +1610,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable)
|
||||
{
|
||||
struct acx_ap_beacon_filter *acx = NULL;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx set ap beacon filter: %d", enable);
|
||||
|
||||
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
|
||||
if (!acx)
|
||||
return -ENOMEM;
|
||||
|
||||
acx->enable = enable ? 1 : 0;
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_AP_BEACON_FILTER_OPT,
|
||||
acx, sizeof(*acx));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("acx set ap beacon filter failed: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(acx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_acx_fm_coex(struct wl1271 *wl)
|
||||
{
|
||||
struct wl1271_acx_fm_coex *acx;
|
||||
|
@ -1767,3 +1649,45 @@ out:
|
|||
kfree(acx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_acx_set_rate_mgmt_params *acx = NULL;
|
||||
struct conf_rate_policy_settings *conf = &wl->conf.rate;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_ACX, "acx set rate mgmt params");
|
||||
|
||||
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
|
||||
if (!acx)
|
||||
return -ENOMEM;
|
||||
|
||||
acx->index = ACX_RATE_MGMT_ALL_PARAMS;
|
||||
acx->rate_retry_score = cpu_to_le16(conf->rate_retry_score);
|
||||
acx->per_add = cpu_to_le16(conf->per_add);
|
||||
acx->per_th1 = cpu_to_le16(conf->per_th1);
|
||||
acx->per_th2 = cpu_to_le16(conf->per_th2);
|
||||
acx->max_per = cpu_to_le16(conf->max_per);
|
||||
acx->inverse_curiosity_factor = conf->inverse_curiosity_factor;
|
||||
acx->tx_fail_low_th = conf->tx_fail_low_th;
|
||||
acx->tx_fail_high_th = conf->tx_fail_high_th;
|
||||
acx->per_alpha_shift = conf->per_alpha_shift;
|
||||
acx->per_add_shift = conf->per_add_shift;
|
||||
acx->per_beta1_shift = conf->per_beta1_shift;
|
||||
acx->per_beta2_shift = conf->per_beta2_shift;
|
||||
acx->rate_check_up = conf->rate_check_up;
|
||||
acx->rate_check_down = conf->rate_check_down;
|
||||
memcpy(acx->rate_retry_policy, conf->rate_retry_policy,
|
||||
sizeof(acx->rate_retry_policy));
|
||||
|
||||
ret = wl1271_cmd_configure(wl, ACX_SET_RATE_MGMT_PARAMS,
|
||||
acx, sizeof(*acx));
|
||||
if (ret < 0) {
|
||||
wl1271_warning("acx set rate mgmt params failed: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(acx);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -101,6 +101,17 @@ struct acx_error_counter {
|
|||
__le32 seq_num_miss;
|
||||
} __packed;
|
||||
|
||||
enum wl12xx_role {
|
||||
WL1271_ROLE_STA = 0,
|
||||
WL1271_ROLE_IBSS,
|
||||
WL1271_ROLE_AP,
|
||||
WL1271_ROLE_DEVICE,
|
||||
WL1271_ROLE_P2P_CL,
|
||||
WL1271_ROLE_P2P_GO,
|
||||
|
||||
WL12XX_INVALID_ROLE_TYPE = 0xff
|
||||
};
|
||||
|
||||
enum wl1271_psm_mode {
|
||||
/* Active mode */
|
||||
WL1271_PSM_CAM = 0,
|
||||
|
@ -160,94 +171,6 @@ struct acx_rx_msdu_lifetime {
|
|||
__le32 lifetime;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* RX Config Options Table
|
||||
* Bit Definition
|
||||
* === ==========
|
||||
* 31:14 Reserved
|
||||
* 13 Copy RX Status - when set, write three receive status words
|
||||
* to top of rx'd MPDUs.
|
||||
* When cleared, do not write three status words (added rev 1.5)
|
||||
* 12 Reserved
|
||||
* 11 RX Complete upon FCS error - when set, give rx complete
|
||||
* interrupt for FCS errors, after the rx filtering, e.g. unicast
|
||||
* frames not to us with FCS error will not generate an interrupt.
|
||||
* 10 SSID Filter Enable - When set, the WiLink discards all beacon,
|
||||
* probe request, and probe response frames with an SSID that does
|
||||
* not match the SSID specified by the host in the START/JOIN
|
||||
* command.
|
||||
* When clear, the WiLink receives frames with any SSID.
|
||||
* 9 Broadcast Filter Enable - When set, the WiLink discards all
|
||||
* broadcast frames. When clear, the WiLink receives all received
|
||||
* broadcast frames.
|
||||
* 8:6 Reserved
|
||||
* 5 BSSID Filter Enable - When set, the WiLink discards any frames
|
||||
* with a BSSID that does not match the BSSID specified by the
|
||||
* host.
|
||||
* When clear, the WiLink receives frames from any BSSID.
|
||||
* 4 MAC Addr Filter - When set, the WiLink discards any frames
|
||||
* with a destination address that does not match the MAC address
|
||||
* of the adaptor.
|
||||
* When clear, the WiLink receives frames destined to any MAC
|
||||
* address.
|
||||
* 3 Promiscuous - When set, the WiLink receives all valid frames
|
||||
* (i.e., all frames that pass the FCS check).
|
||||
* When clear, only frames that pass the other filters specified
|
||||
* are received.
|
||||
* 2 FCS - When set, the WiLink includes the FCS with the received
|
||||
* frame.
|
||||
* When cleared, the FCS is discarded.
|
||||
* 1 PLCP header - When set, write all data from baseband to frame
|
||||
* buffer including PHY header.
|
||||
* 0 Reserved - Always equal to 0.
|
||||
*
|
||||
* RX Filter Options Table
|
||||
* Bit Definition
|
||||
* === ==========
|
||||
* 31:12 Reserved - Always equal to 0.
|
||||
* 11 Association - When set, the WiLink receives all association
|
||||
* related frames (association request/response, reassocation
|
||||
* request/response, and disassociation). When clear, these frames
|
||||
* are discarded.
|
||||
* 10 Auth/De auth - When set, the WiLink receives all authentication
|
||||
* and de-authentication frames. When clear, these frames are
|
||||
* discarded.
|
||||
* 9 Beacon - When set, the WiLink receives all beacon frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 8 Contention Free - When set, the WiLink receives all contention
|
||||
* free frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 7 Control - When set, the WiLink receives all control frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 6 Data - When set, the WiLink receives all data frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 5 FCS Error - When set, the WiLink receives frames that have FCS
|
||||
* errors.
|
||||
* When clear, these frames are discarded.
|
||||
* 4 Management - When set, the WiLink receives all management
|
||||
* frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 3 Probe Request - When set, the WiLink receives all probe request
|
||||
* frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 2 Probe Response - When set, the WiLink receives all probe
|
||||
* response frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK
|
||||
* frames.
|
||||
* When clear, these frames are discarded.
|
||||
* 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames
|
||||
* that have reserved frame types and sub types as defined by the
|
||||
* 802.11 specification.
|
||||
* When clear, these frames are discarded.
|
||||
*/
|
||||
struct acx_rx_config {
|
||||
struct acx_header header;
|
||||
|
||||
__le32 config_options;
|
||||
__le32 filter_options;
|
||||
} __packed;
|
||||
|
||||
struct acx_packet_detection {
|
||||
struct acx_header header;
|
||||
|
||||
|
@ -267,9 +190,10 @@ enum acx_slot_type {
|
|||
struct acx_slot {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 wone_index; /* Reserved */
|
||||
u8 slot_time;
|
||||
u8 reserved[6];
|
||||
u8 reserved[5];
|
||||
} __packed;
|
||||
|
||||
|
||||
|
@ -279,29 +203,35 @@ struct acx_slot {
|
|||
struct acx_dot11_grp_addr_tbl {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 enabled;
|
||||
u8 num_groups;
|
||||
u8 pad[2];
|
||||
u8 pad[1];
|
||||
u8 mac_table[ADDRESS_GROUP_MAX_LEN];
|
||||
} __packed;
|
||||
|
||||
struct acx_rx_timeout {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 reserved;
|
||||
__le16 ps_poll_timeout;
|
||||
__le16 upsd_timeout;
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
struct acx_rts_threshold {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 reserved;
|
||||
__le16 threshold;
|
||||
u8 pad[2];
|
||||
} __packed;
|
||||
|
||||
struct acx_beacon_filter_option {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 enable;
|
||||
/*
|
||||
* The number of beacons without the unicast TIM
|
||||
|
@ -311,7 +241,7 @@ struct acx_beacon_filter_option {
|
|||
* without the unicast TIM bit set are dropped.
|
||||
*/
|
||||
u8 max_num_beacons;
|
||||
u8 pad[2];
|
||||
u8 pad[1];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
@ -350,14 +280,17 @@ struct acx_beacon_filter_option {
|
|||
struct acx_beacon_filter_ie_table {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 num_ie;
|
||||
u8 pad[3];
|
||||
u8 pad[2];
|
||||
u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
|
||||
} __packed;
|
||||
|
||||
struct acx_conn_monit_params {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 padding[3];
|
||||
__le32 synch_fail_thold; /* number of beacons missed */
|
||||
__le32 bss_lose_timeout; /* number of TU's from synch fail */
|
||||
} __packed;
|
||||
|
@ -369,23 +302,14 @@ struct acx_bt_wlan_coex {
|
|||
u8 pad[3];
|
||||
} __packed;
|
||||
|
||||
struct acx_sta_bt_wlan_coex_param {
|
||||
struct acx_bt_wlan_coex_param {
|
||||
struct acx_header header;
|
||||
|
||||
__le32 params[CONF_SG_STA_PARAMS_MAX];
|
||||
__le32 params[CONF_SG_PARAMS_MAX];
|
||||
u8 param_idx;
|
||||
u8 padding[3];
|
||||
} __packed;
|
||||
|
||||
struct acx_ap_bt_wlan_coex_param {
|
||||
struct acx_header header;
|
||||
|
||||
__le32 params[CONF_SG_AP_PARAMS_MAX];
|
||||
u8 param_idx;
|
||||
u8 padding[3];
|
||||
} __packed;
|
||||
|
||||
|
||||
struct acx_dco_itrim_params {
|
||||
struct acx_header header;
|
||||
|
||||
|
@ -406,15 +330,16 @@ struct acx_energy_detection {
|
|||
struct acx_beacon_broadcast {
|
||||
struct acx_header header;
|
||||
|
||||
__le16 beacon_rx_timeout;
|
||||
__le16 broadcast_timeout;
|
||||
|
||||
u8 role_id;
|
||||
/* Enables receiving of broadcast packets in PS mode */
|
||||
u8 rx_broadcast_in_ps;
|
||||
|
||||
__le16 beacon_rx_timeout;
|
||||
__le16 broadcast_timeout;
|
||||
|
||||
/* Consecutive PS Poll failures before updating the host */
|
||||
u8 ps_poll_threshold;
|
||||
u8 pad[2];
|
||||
u8 pad[1];
|
||||
} __packed;
|
||||
|
||||
struct acx_event_mask {
|
||||
|
@ -424,35 +349,6 @@ struct acx_event_mask {
|
|||
__le32 high_event_mask; /* Unused */
|
||||
} __packed;
|
||||
|
||||
#define CFG_RX_FCS BIT(2)
|
||||
#define CFG_RX_ALL_GOOD BIT(3)
|
||||
#define CFG_UNI_FILTER_EN BIT(4)
|
||||
#define CFG_BSSID_FILTER_EN BIT(5)
|
||||
#define CFG_MC_FILTER_EN BIT(6)
|
||||
#define CFG_MC_ADDR0_EN BIT(7)
|
||||
#define CFG_MC_ADDR1_EN BIT(8)
|
||||
#define CFG_BC_REJECT_EN BIT(9)
|
||||
#define CFG_SSID_FILTER_EN BIT(10)
|
||||
#define CFG_RX_INT_FCS_ERROR BIT(11)
|
||||
#define CFG_RX_INT_ENCRYPTED BIT(12)
|
||||
#define CFG_RX_WR_RX_STATUS BIT(13)
|
||||
#define CFG_RX_FILTER_NULTI BIT(14)
|
||||
#define CFG_RX_RESERVE BIT(15)
|
||||
#define CFG_RX_TIMESTAMP_TSF BIT(16)
|
||||
|
||||
#define CFG_RX_RSV_EN BIT(0)
|
||||
#define CFG_RX_RCTS_ACK BIT(1)
|
||||
#define CFG_RX_PRSP_EN BIT(2)
|
||||
#define CFG_RX_PREQ_EN BIT(3)
|
||||
#define CFG_RX_MGMT_EN BIT(4)
|
||||
#define CFG_RX_FCS_ERROR BIT(5)
|
||||
#define CFG_RX_DATA_EN BIT(6)
|
||||
#define CFG_RX_CTL_EN BIT(7)
|
||||
#define CFG_RX_CF_EN BIT(8)
|
||||
#define CFG_RX_BCN_EN BIT(9)
|
||||
#define CFG_RX_AUTH_EN BIT(10)
|
||||
#define CFG_RX_ASSOC_EN BIT(11)
|
||||
|
||||
#define SCAN_PASSIVE BIT(0)
|
||||
#define SCAN_5GHZ_BAND BIT(1)
|
||||
#define SCAN_TRIGGERED BIT(2)
|
||||
|
@ -465,6 +361,8 @@ struct acx_event_mask {
|
|||
struct acx_feature_config {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 padding[3];
|
||||
__le32 options;
|
||||
__le32 data_flow_options;
|
||||
} __packed;
|
||||
|
@ -472,16 +370,18 @@ struct acx_feature_config {
|
|||
struct acx_current_tx_power {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 current_tx_power;
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
struct acx_wake_up_condition {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 wake_up_event; /* Only one bit can be set */
|
||||
u8 listen_interval;
|
||||
u8 pad[2];
|
||||
u8 pad[1];
|
||||
} __packed;
|
||||
|
||||
struct acx_aid {
|
||||
|
@ -490,8 +390,9 @@ struct acx_aid {
|
|||
/*
|
||||
* To be set when associated with an AP.
|
||||
*/
|
||||
u8 role_id;
|
||||
u8 reserved;
|
||||
__le16 aid;
|
||||
u8 pad[2];
|
||||
} __packed;
|
||||
|
||||
enum acx_preamble_type {
|
||||
|
@ -506,8 +407,9 @@ struct acx_preamble {
|
|||
* When set, the WiLink transmits the frames with a short preamble and
|
||||
* when cleared, the WiLink transmits the frames with a long preamble.
|
||||
*/
|
||||
u8 role_id;
|
||||
u8 preamble;
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
enum acx_ctsprotect_type {
|
||||
|
@ -517,8 +419,9 @@ enum acx_ctsprotect_type {
|
|||
|
||||
struct acx_ctsprotect {
|
||||
struct acx_header header;
|
||||
u8 role_id;
|
||||
u8 ctsprotect;
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
struct acx_tx_statistics {
|
||||
|
@ -753,18 +656,9 @@ struct acx_rate_class {
|
|||
|
||||
#define ACX_TX_BASIC_RATE 0
|
||||
#define ACX_TX_AP_FULL_RATE 1
|
||||
#define ACX_TX_RATE_POLICY_CNT 2
|
||||
struct acx_sta_rate_policy {
|
||||
struct acx_header header;
|
||||
|
||||
__le32 rate_class_cnt;
|
||||
struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
|
||||
} __packed;
|
||||
|
||||
|
||||
#define ACX_TX_AP_MODE_MGMT_RATE 4
|
||||
#define ACX_TX_AP_MODE_BCST_RATE 5
|
||||
struct acx_ap_rate_policy {
|
||||
struct acx_rate_policy {
|
||||
struct acx_header header;
|
||||
|
||||
__le32 rate_policy_idx;
|
||||
|
@ -773,22 +667,23 @@ struct acx_ap_rate_policy {
|
|||
|
||||
struct acx_ac_cfg {
|
||||
struct acx_header header;
|
||||
u8 role_id;
|
||||
u8 ac;
|
||||
u8 aifsn;
|
||||
u8 cw_min;
|
||||
__le16 cw_max;
|
||||
u8 aifsn;
|
||||
u8 reserved;
|
||||
__le16 tx_op_limit;
|
||||
} __packed;
|
||||
|
||||
struct acx_tid_config {
|
||||
struct acx_header header;
|
||||
u8 role_id;
|
||||
u8 queue_id;
|
||||
u8 channel_type;
|
||||
u8 tsid;
|
||||
u8 ps_scheme;
|
||||
u8 ack_policy;
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
__le32 apsd_conf[2];
|
||||
} __packed;
|
||||
|
||||
|
@ -804,19 +699,7 @@ struct acx_tx_config_options {
|
|||
__le16 tx_compl_threshold; /* number of packets */
|
||||
} __packed;
|
||||
|
||||
#define ACX_TX_DESCRIPTORS 32
|
||||
|
||||
struct wl1271_acx_ap_config_memory {
|
||||
struct acx_header header;
|
||||
|
||||
u8 rx_mem_block_num;
|
||||
u8 tx_min_mem_block_num;
|
||||
u8 num_stations;
|
||||
u8 num_ssid_profiles;
|
||||
__le32 total_tx_descriptors;
|
||||
} __packed;
|
||||
|
||||
struct wl1271_acx_sta_config_memory {
|
||||
struct wl12xx_acx_config_memory {
|
||||
struct acx_header header;
|
||||
|
||||
u8 rx_mem_block_num;
|
||||
|
@ -890,9 +773,10 @@ struct wl1271_acx_rx_config_opt {
|
|||
struct wl1271_acx_bet_enable {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 enable;
|
||||
u8 max_consecutive;
|
||||
u8 padding[2];
|
||||
u8 padding[1];
|
||||
} __packed;
|
||||
|
||||
#define ACX_IPV4_VERSION 4
|
||||
|
@ -905,9 +789,10 @@ struct wl1271_acx_bet_enable {
|
|||
|
||||
struct wl1271_acx_arp_filter {
|
||||
struct acx_header header;
|
||||
u8 role_id;
|
||||
u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
|
||||
u8 enable; /* bitmap of enabled ARP filtering features */
|
||||
u8 padding[2];
|
||||
u8 padding[1];
|
||||
u8 address[16]; /* The configured device IP address - all ARP
|
||||
requests directed to this IP address will pass
|
||||
through. For IPv4, the first four bytes are
|
||||
|
@ -925,8 +810,9 @@ struct wl1271_acx_pm_config {
|
|||
struct wl1271_acx_keep_alive_mode {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 enabled;
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
|
@ -942,11 +828,11 @@ enum {
|
|||
struct wl1271_acx_keep_alive_config {
|
||||
struct acx_header header;
|
||||
|
||||
__le32 period;
|
||||
u8 role_id;
|
||||
u8 index;
|
||||
u8 tpl_validation;
|
||||
u8 trigger;
|
||||
u8 padding;
|
||||
__le32 period;
|
||||
} __packed;
|
||||
|
||||
#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
|
||||
|
@ -990,26 +876,33 @@ enum {
|
|||
struct wl1271_acx_rssi_snr_trigger {
|
||||
struct acx_header header;
|
||||
|
||||
__le16 threshold;
|
||||
__le16 pacing; /* 0 - 60000 ms */
|
||||
u8 role_id;
|
||||
u8 metric;
|
||||
u8 type;
|
||||
u8 dir;
|
||||
__le16 threshold;
|
||||
__le16 pacing; /* 0 - 60000 ms */
|
||||
u8 hysteresis;
|
||||
u8 index;
|
||||
u8 enable;
|
||||
u8 padding[2];
|
||||
u8 padding[1];
|
||||
};
|
||||
|
||||
struct wl1271_acx_rssi_snr_avg_weights {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 padding[3];
|
||||
u8 rssi_beacon;
|
||||
u8 rssi_data;
|
||||
u8 snr_beacon;
|
||||
u8 snr_data;
|
||||
};
|
||||
|
||||
|
||||
/* special capability bit (not employed by the 802.11n spec) */
|
||||
#define WL12XX_HT_CAP_HT_OPERATION BIT(16)
|
||||
|
||||
/*
|
||||
* ACX_PEER_HT_CAP
|
||||
* Configure HT capabilities - declare the capabilities of the peer
|
||||
|
@ -1018,28 +911,11 @@ struct wl1271_acx_rssi_snr_avg_weights {
|
|||
struct wl1271_acx_ht_capabilities {
|
||||
struct acx_header header;
|
||||
|
||||
/*
|
||||
* bit 0 - Allow HT Operation
|
||||
* bit 1 - Allow Greenfield format in TX
|
||||
* bit 2 - Allow Short GI in TX
|
||||
* bit 3 - Allow L-SIG TXOP Protection in TX
|
||||
* bit 4 - Allow HT Control fields in TX.
|
||||
* Note, driver will still leave space for HT control in packets
|
||||
* regardless of the value of this field. FW will be responsible
|
||||
* to drop the HT field from any frame when this Bit set to 0.
|
||||
* bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
|
||||
* Exact policy setting for this feature is TBD.
|
||||
* Note, this bit can only be set to 1 if bit 3 is set to 1.
|
||||
*/
|
||||
/* bitmask of capability bits supported by the peer */
|
||||
__le32 ht_capabilites;
|
||||
|
||||
/*
|
||||
* Indicates to which peer these capabilities apply.
|
||||
* For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
|
||||
* for all peers.
|
||||
* Only valid for IBSS/DLS operation.
|
||||
*/
|
||||
u8 mac_address[ETH_ALEN];
|
||||
/* Indicates to which link these capabilities apply. */
|
||||
u8 hlid;
|
||||
|
||||
/*
|
||||
* This the maximum A-MPDU length supported by the AP. The FW may not
|
||||
|
@ -1049,17 +925,10 @@ struct wl1271_acx_ht_capabilities {
|
|||
|
||||
/* This is the minimal spacing required when sending A-MPDUs to the AP*/
|
||||
u8 ampdu_min_spacing;
|
||||
|
||||
u8 padding;
|
||||
} __packed;
|
||||
|
||||
/* HT Capabilites Fw Bit Mask Mapping */
|
||||
#define WL1271_ACX_FW_CAP_HT_OPERATION BIT(0)
|
||||
#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT BIT(1)
|
||||
#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS BIT(2)
|
||||
#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION BIT(3)
|
||||
#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS BIT(4)
|
||||
#define WL1271_ACX_FW_CAP_RD_INITIATION BIT(5)
|
||||
|
||||
|
||||
/*
|
||||
* ACX_HT_BSS_OPERATION
|
||||
* Configure HT capabilities - AP rules for behavior in the BSS.
|
||||
|
@ -1067,6 +936,8 @@ struct wl1271_acx_ht_capabilities {
|
|||
struct wl1271_acx_ht_information {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
|
||||
/* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
|
||||
u8 rifs_mode;
|
||||
|
||||
|
@ -1088,60 +959,51 @@ struct wl1271_acx_ht_information {
|
|||
*/
|
||||
u8 dual_cts_protection;
|
||||
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
#define RX_BA_WIN_SIZE 8
|
||||
#define RX_BA_MAX_SESSIONS 2
|
||||
|
||||
struct wl1271_acx_ba_session_policy {
|
||||
struct wl1271_acx_ba_initiator_policy {
|
||||
struct acx_header header;
|
||||
/*
|
||||
* Specifies role Id, Range 0-7, 0xFF means ANY role.
|
||||
* Future use. For now this field is irrelevant
|
||||
*/
|
||||
|
||||
/* Specifies role Id, Range 0-7, 0xFF means ANY role. */
|
||||
u8 role_id;
|
||||
|
||||
/*
|
||||
* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
|
||||
* Not applicable if Role Id is set to ANY.
|
||||
* Per TID setting for allowing TX BA. Set a bit to 1 to allow
|
||||
* TX BA sessions for the corresponding TID.
|
||||
*/
|
||||
u8 link_id;
|
||||
|
||||
u8 tid;
|
||||
|
||||
u8 enable;
|
||||
u8 tid_bitmap;
|
||||
|
||||
/* Windows size in number of packets */
|
||||
u16 win_size;
|
||||
u8 win_size;
|
||||
|
||||
/*
|
||||
* As initiator inactivity timeout in time units(TU) of 1024us.
|
||||
* As receiver reserved
|
||||
*/
|
||||
u8 padding1[1];
|
||||
|
||||
/* As initiator inactivity timeout in time units(TU) of 1024us */
|
||||
u16 inactivity_timeout;
|
||||
|
||||
/* Initiator = 1/Receiver = 0 */
|
||||
u8 ba_direction;
|
||||
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
struct wl1271_acx_ba_receiver_setup {
|
||||
struct acx_header header;
|
||||
|
||||
/* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
|
||||
u8 link_id;
|
||||
/* Specifies link id, range 0-31 */
|
||||
u8 hlid;
|
||||
|
||||
u8 tid;
|
||||
|
||||
u8 enable;
|
||||
|
||||
u8 padding[1];
|
||||
|
||||
/* Windows size in number of packets */
|
||||
u16 win_size;
|
||||
u8 win_size;
|
||||
|
||||
/* BA session starting sequence number. RANGE 0-FFF */
|
||||
u16 ssn;
|
||||
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
struct wl1271_acx_fw_tsf_information {
|
||||
|
@ -1158,6 +1020,7 @@ struct wl1271_acx_fw_tsf_information {
|
|||
struct wl1271_acx_ps_rx_streaming {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 tid;
|
||||
u8 enable;
|
||||
|
||||
|
@ -1166,17 +1029,20 @@ struct wl1271_acx_ps_rx_streaming {
|
|||
|
||||
/* timeout before first trigger (0-200 msec) */
|
||||
u8 timeout;
|
||||
u8 padding[3];
|
||||
} __packed;
|
||||
|
||||
struct wl1271_acx_ap_max_tx_retry {
|
||||
struct acx_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 padding_1;
|
||||
|
||||
/*
|
||||
* the number of frames transmission failures before
|
||||
* issuing the aging event.
|
||||
*/
|
||||
__le16 max_tx_retry;
|
||||
u8 padding_1[2];
|
||||
} __packed;
|
||||
|
||||
struct wl1271_acx_config_ps {
|
||||
|
@ -1195,13 +1061,6 @@ struct wl1271_acx_inconnection_sta {
|
|||
u8 padding1[2];
|
||||
} __packed;
|
||||
|
||||
struct acx_ap_beacon_filter {
|
||||
struct acx_header header;
|
||||
|
||||
u8 enable;
|
||||
u8 pad[3];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* ACX_FM_COEX_CFG
|
||||
* set the FM co-existence parameters.
|
||||
|
@ -1261,6 +1120,30 @@ struct wl1271_acx_fm_coex {
|
|||
u8 swallow_clk_diff;
|
||||
} __packed;
|
||||
|
||||
#define ACX_RATE_MGMT_ALL_PARAMS 0xff
|
||||
struct wl12xx_acx_set_rate_mgmt_params {
|
||||
struct acx_header header;
|
||||
|
||||
u8 index; /* 0xff to configure all params */
|
||||
u8 padding1;
|
||||
__le16 rate_retry_score;
|
||||
__le16 per_add;
|
||||
__le16 per_th1;
|
||||
__le16 per_th2;
|
||||
__le16 max_per;
|
||||
u8 inverse_curiosity_factor;
|
||||
u8 tx_fail_low_th;
|
||||
u8 tx_fail_high_th;
|
||||
u8 per_alpha_shift;
|
||||
u8 per_add_shift;
|
||||
u8 per_beta1_shift;
|
||||
u8 per_beta2_shift;
|
||||
u8 rate_check_up;
|
||||
u8 rate_check_down;
|
||||
u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
|
||||
u8 padding2[2];
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
ACX_WAKE_UP_CONDITIONS = 0x0002,
|
||||
ACX_MEM_CFG = 0x0003,
|
||||
|
@ -1268,10 +1151,7 @@ enum {
|
|||
ACX_AC_CFG = 0x0007,
|
||||
ACX_MEM_MAP = 0x0008,
|
||||
ACX_AID = 0x000A,
|
||||
/* ACX_FW_REV is missing in the ref driver, but seems to work */
|
||||
ACX_FW_REV = 0x000D,
|
||||
ACX_MEDIUM_USAGE = 0x000F,
|
||||
ACX_RX_CFG = 0x0010,
|
||||
ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */
|
||||
ACX_STATISTICS = 0x0013, /* Debug API */
|
||||
ACX_PWR_CONSUMPTION_STATISTICS = 0x0014,
|
||||
|
@ -1279,7 +1159,6 @@ enum {
|
|||
ACX_TID_CFG = 0x001A,
|
||||
ACX_PS_RX_STREAMING = 0x001B,
|
||||
ACX_BEACON_FILTER_OPT = 0x001F,
|
||||
ACX_AP_BEACON_FILTER_OPT = 0x0020,
|
||||
ACX_NOISE_HIST = 0x0021,
|
||||
ACX_HDK_VERSION = 0x0022, /* ??? */
|
||||
ACX_PD_THRESHOLD = 0x0023,
|
||||
|
@ -1287,7 +1166,6 @@ enum {
|
|||
ACX_CCA_THRESHOLD = 0x0025,
|
||||
ACX_EVENT_MBOX_MASK = 0x0026,
|
||||
ACX_CONN_MONIT_PARAMS = 0x002D,
|
||||
ACX_CONS_TX_FAILURE = 0x002F,
|
||||
ACX_BCN_DTIM_OPTIONS = 0x0031,
|
||||
ACX_SG_ENABLE = 0x0032,
|
||||
ACX_SG_CFG = 0x0033,
|
||||
|
@ -1314,11 +1192,14 @@ enum {
|
|||
ACX_RSSI_SNR_WEIGHTS = 0x0052,
|
||||
ACX_KEEP_ALIVE_MODE = 0x0053,
|
||||
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
|
||||
ACX_BA_SESSION_POLICY_CFG = 0x0055,
|
||||
ACX_BA_SESSION_INIT_POLICY = 0x0055,
|
||||
ACX_BA_SESSION_RX_SETUP = 0x0056,
|
||||
ACX_PEER_HT_CAP = 0x0057,
|
||||
ACX_HT_BSS_OPERATION = 0x0058,
|
||||
ACX_COEX_ACTIVITY = 0x0059,
|
||||
ACX_BURST_MODE = 0x005C,
|
||||
ACX_SET_RATE_MGMT_PARAMS = 0x005D,
|
||||
ACX_SET_RATE_ADAPT_PARAMS = 0x0060,
|
||||
ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
|
||||
ACX_GEN_FW_CMD = 0x0070,
|
||||
ACX_HOST_IF_CFG_BITMAP = 0x0071,
|
||||
|
@ -1342,7 +1223,6 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl);
|
|||
int wl1271_acx_mem_map(struct wl1271 *wl,
|
||||
struct acx_header *mem_map, size_t len);
|
||||
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
|
||||
int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
|
||||
int wl1271_acx_pd_threshold(struct wl1271 *wl);
|
||||
int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
|
||||
int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
|
||||
|
@ -1354,8 +1234,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
|
|||
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
|
||||
int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
|
||||
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
|
||||
int wl1271_acx_sta_sg_cfg(struct wl1271 *wl);
|
||||
int wl1271_acx_ap_sg_cfg(struct wl1271 *wl);
|
||||
int wl12xx_acx_sg_cfg(struct wl1271 *wl);
|
||||
int wl1271_acx_cca_threshold(struct wl1271 *wl);
|
||||
int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
|
||||
int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
|
||||
|
@ -1374,8 +1253,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
|
|||
u32 apsd_conf0, u32 apsd_conf1);
|
||||
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
|
||||
int wl1271_acx_tx_config_options(struct wl1271 *wl);
|
||||
int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
|
||||
int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
|
||||
int wl12xx_acx_mem_cfg(struct wl1271 *wl);
|
||||
int wl1271_acx_init_mem_config(struct wl1271 *wl);
|
||||
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
|
||||
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
|
||||
|
@ -1390,20 +1268,18 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
|
|||
int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
|
||||
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
|
||||
struct ieee80211_sta_ht_cap *ht_cap,
|
||||
bool allow_ht_operation);
|
||||
bool allow_ht_operation, u8 hlid);
|
||||
int wl1271_acx_set_ht_information(struct wl1271 *wl,
|
||||
u16 ht_operation_mode);
|
||||
int wl1271_acx_set_ba_session(struct wl1271 *wl,
|
||||
enum ieee80211_back_parties direction,
|
||||
u8 tid_index, u8 policy);
|
||||
int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
|
||||
bool enable);
|
||||
int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
|
||||
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
|
||||
u16 ssn, bool enable, u8 peer_hlid);
|
||||
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
|
||||
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
|
||||
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
|
||||
int wl1271_acx_config_ps(struct wl1271 *wl);
|
||||
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
|
||||
int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable);
|
||||
int wl1271_acx_fm_coex(struct wl1271 *wl);
|
||||
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
|
||||
|
||||
#endif /* __WL1271_ACX_H__ */
|
||||
|
|
|
@ -107,16 +107,6 @@ static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
|
|||
unsigned int quirks = 0;
|
||||
unsigned int *fw_ver = wl->chip.fw_ver;
|
||||
|
||||
/* Only for wl127x */
|
||||
if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
|
||||
/* Check STA version */
|
||||
(((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
|
||||
(fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
|
||||
/* Check AP version */
|
||||
((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
|
||||
(fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
|
||||
quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
|
||||
|
||||
/* Only new station firmwares support routing fw logs to the host */
|
||||
if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
|
||||
(fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
|
||||
|
@ -504,21 +494,18 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
|
|||
wl->event_mask = BSS_LOSE_EVENT_ID |
|
||||
SCAN_COMPLETE_EVENT_ID |
|
||||
PS_REPORT_EVENT_ID |
|
||||
JOIN_EVENT_COMPLETE_ID |
|
||||
DISCONNECT_EVENT_COMPLETE_ID |
|
||||
RSSI_SNR_TRIGGER_0_EVENT_ID |
|
||||
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
|
||||
SOFT_GEMINI_SENSE_EVENT_ID |
|
||||
PERIODIC_SCAN_REPORT_EVENT_ID |
|
||||
PERIODIC_SCAN_COMPLETE_EVENT_ID;
|
||||
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS)
|
||||
wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID |
|
||||
INACTIVE_STA_EVENT_ID |
|
||||
MAX_TX_RETRY_EVENT_ID;
|
||||
else
|
||||
wl->event_mask |= DUMMY_PACKET_EVENT_ID |
|
||||
BA_SESSION_RX_CONSTRAINT_EVENT_ID;
|
||||
PERIODIC_SCAN_COMPLETE_EVENT_ID |
|
||||
DUMMY_PACKET_EVENT_ID |
|
||||
PEER_REMOVE_COMPLETE_EVENT_ID |
|
||||
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
|
||||
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
|
||||
INACTIVE_STA_EVENT_ID |
|
||||
MAX_TX_RETRY_EVENT_ID;
|
||||
|
||||
ret = wl1271_event_unmask(wl);
|
||||
if (ret < 0) {
|
||||
|
@ -549,13 +536,13 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
|
|||
{
|
||||
u32 fuse;
|
||||
|
||||
fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1);
|
||||
if (wl->chip.id == CHIP_ID_1283_PG20)
|
||||
fuse = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
|
||||
else
|
||||
fuse = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
|
||||
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
|
||||
|
||||
wl->hw_pg_ver = (s8)fuse;
|
||||
|
||||
if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
|
||||
wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
|
||||
}
|
||||
|
||||
static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
|
||||
|
@ -696,7 +683,8 @@ static int wl127x_boot_clk(struct wl1271 *wl)
|
|||
u32 pause;
|
||||
u32 clk;
|
||||
|
||||
wl1271_boot_hw_version(wl);
|
||||
if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
|
||||
wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
|
||||
|
||||
if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
|
||||
wl->ref_clock == CONF_REF_CLK_38_4_E ||
|
||||
|
@ -750,6 +738,8 @@ int wl1271_load_firmware(struct wl1271 *wl)
|
|||
u32 tmp, clk;
|
||||
int selected_clock = -1;
|
||||
|
||||
wl1271_boot_hw_version(wl);
|
||||
|
||||
if (wl->chip.id == CHIP_ID_1283_PG20) {
|
||||
ret = wl128x_boot_clk(wl, &selected_clock);
|
||||
if (ret < 0)
|
||||
|
@ -852,9 +842,6 @@ int wl1271_boot(struct wl1271 *wl)
|
|||
/* Enable firmware interrupts now */
|
||||
wl1271_boot_enable_interrupts(wl);
|
||||
|
||||
/* set the wl1271 default filters */
|
||||
wl1271_set_default_filters(wl);
|
||||
|
||||
wl1271_event_mbox_config(wl);
|
||||
|
||||
out:
|
||||
|
|
|
@ -55,7 +55,8 @@ struct wl1271_static_data {
|
|||
#define OCP_REG_CLK_POLARITY 0x0cb2
|
||||
#define OCP_REG_CLK_PULL 0x0cb4
|
||||
|
||||
#define REG_FUSE_DATA_2_1 0x050a
|
||||
#define WL127X_REG_FUSE_DATA_2_1 0x050a
|
||||
#define WL128X_REG_FUSE_DATA_2_1 0x2152
|
||||
#define PG_VER_MASK 0x3c
|
||||
#define PG_VER_OFFSET 2
|
||||
|
||||
|
|
|
@ -363,63 +363,470 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
|
||||
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
|
||||
{
|
||||
struct wl1271_cmd_join *join;
|
||||
int ret, i;
|
||||
u8 *bssid;
|
||||
struct wl12xx_cmd_role_enable *cmd;
|
||||
int ret;
|
||||
|
||||
join = kzalloc(sizeof(*join), GFP_KERNEL);
|
||||
if (!join) {
|
||||
wl1271_debug(DEBUG_CMD, "cmd role enable");
|
||||
|
||||
if (WARN_ON(*role_id != WL12XX_INVALID_ROLE_ID))
|
||||
return -EBUSY;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd join");
|
||||
|
||||
/* Reverse order BSSID */
|
||||
bssid = (u8 *) &join->bssid_lsb;
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
bssid[i] = wl->bssid[ETH_ALEN - i - 1];
|
||||
|
||||
join->rx_config_options = cpu_to_le32(wl->rx_config);
|
||||
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
|
||||
join->bss_type = bss_type;
|
||||
join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
|
||||
join->supported_rate_set = cpu_to_le32(wl->rate_set);
|
||||
|
||||
if (wl->band == IEEE80211_BAND_5GHZ)
|
||||
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
|
||||
|
||||
join->beacon_interval = cpu_to_le16(wl->beacon_int);
|
||||
join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
|
||||
|
||||
join->channel = wl->channel;
|
||||
join->ssid_len = wl->ssid_len;
|
||||
memcpy(join->ssid, wl->ssid, wl->ssid_len);
|
||||
|
||||
join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
|
||||
join->basic_rate_set, join->supported_rate_set);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd join");
|
||||
/* get role id */
|
||||
cmd->role_id = find_first_zero_bit(wl->roles_map, WL12XX_MAX_ROLES);
|
||||
if (cmd->role_id >= WL12XX_MAX_ROLES) {
|
||||
ret = -EBUSY;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
|
||||
if (ret < 0)
|
||||
wl1271_error("cmd join event completion error");
|
||||
memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
|
||||
cmd->role_type = role_type;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role enable");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
__set_bit(cmd->role_id, wl->roles_map);
|
||||
*role_id = cmd->role_id;
|
||||
|
||||
out_free:
|
||||
kfree(join);
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id)
|
||||
{
|
||||
struct wl12xx_cmd_role_disable *cmd;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role disable");
|
||||
|
||||
if (WARN_ON(*role_id == WL12XX_INVALID_ROLE_ID))
|
||||
return -ENOENT;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
cmd->role_id = *role_id;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_DISABLE, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role disable");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
__clear_bit(*role_id, wl->roles_map);
|
||||
*role_id = WL12XX_INVALID_ROLE_ID;
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
|
||||
{
|
||||
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
|
||||
if (link >= WL12XX_MAX_LINKS)
|
||||
return -EBUSY;
|
||||
|
||||
__set_bit(link, wl->links_map);
|
||||
*hlid = link;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
|
||||
{
|
||||
if (*hlid == WL12XX_INVALID_LINK_ID)
|
||||
return;
|
||||
|
||||
__clear_bit(*hlid, wl->links_map);
|
||||
*hlid = WL12XX_INVALID_LINK_ID;
|
||||
}
|
||||
|
||||
static int wl12xx_get_new_session_id(struct wl1271 *wl)
|
||||
{
|
||||
if (wl->session_counter >= SESSION_COUNTER_MAX)
|
||||
wl->session_counter = 0;
|
||||
|
||||
wl->session_counter++;
|
||||
|
||||
return wl->session_counter;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_start *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
|
||||
|
||||
cmd->role_id = wl->dev_role_id;
|
||||
if (wl->band == IEEE80211_BAND_5GHZ)
|
||||
cmd->band = WL12XX_BAND_5GHZ;
|
||||
cmd->channel = wl->channel;
|
||||
|
||||
if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
|
||||
ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
cmd->device.hlid = wl->dev_hlid;
|
||||
cmd->device.session = wl->session_counter;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
|
||||
cmd->role_id, cmd->device.hlid, cmd->device.session);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role enable");
|
||||
goto err_hlid;
|
||||
}
|
||||
|
||||
goto out_free;
|
||||
|
||||
err_hlid:
|
||||
/* clear links on error */
|
||||
__clear_bit(wl->dev_hlid, wl->links_map);
|
||||
wl->dev_hlid = WL12XX_INVALID_LINK_ID;
|
||||
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_stop *cmd;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
|
||||
return -EINVAL;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role stop dev");
|
||||
|
||||
cmd->role_id = wl->dev_role_id;
|
||||
cmd->disc_type = DISCONNECT_IMMEDIATE;
|
||||
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role stop");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
|
||||
if (ret < 0) {
|
||||
wl1271_error("cmd role stop dev event completion error");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
wl12xx_free_link(wl, &wl->dev_hlid);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_start *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
|
||||
|
||||
cmd->role_id = wl->role_id;
|
||||
if (wl->band == IEEE80211_BAND_5GHZ)
|
||||
cmd->band = WL12XX_BAND_5GHZ;
|
||||
cmd->channel = wl->channel;
|
||||
cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
|
||||
cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
|
||||
cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
|
||||
cmd->sta.ssid_len = wl->ssid_len;
|
||||
memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
|
||||
memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
|
||||
cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
|
||||
|
||||
if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
|
||||
ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
cmd->sta.hlid = wl->sta_hlid;
|
||||
cmd->sta.session = wl12xx_get_new_session_id(wl);
|
||||
cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
|
||||
"basic_rate_set: 0x%x, remote_rates: 0x%x",
|
||||
wl->role_id, cmd->sta.hlid, cmd->sta.session,
|
||||
wl->basic_rate_set, wl->rate_set);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role start sta");
|
||||
goto err_hlid;
|
||||
}
|
||||
|
||||
goto out_free;
|
||||
|
||||
err_hlid:
|
||||
/* clear links on error. */
|
||||
wl12xx_free_link(wl, &wl->sta_hlid);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* use this function to stop ibss as well */
|
||||
int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_stop *cmd;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
|
||||
return -EINVAL;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
|
||||
|
||||
cmd->role_id = wl->role_id;
|
||||
cmd->disc_type = DISCONNECT_IMMEDIATE;
|
||||
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role stop sta");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
wl12xx_free_link(wl, &wl->sta_hlid);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_start *cmd;
|
||||
struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
|
||||
|
||||
/*
|
||||
* We currently do not support hidden SSID. The real SSID
|
||||
* should be fetched from mac80211 first.
|
||||
*/
|
||||
if (wl->ssid_len == 0) {
|
||||
wl1271_warning("Hidden SSID currently not supported for AP");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
|
||||
if (ret < 0)
|
||||
goto out_free;
|
||||
|
||||
ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
|
||||
if (ret < 0)
|
||||
goto out_free_global;
|
||||
|
||||
cmd->role_id = wl->role_id;
|
||||
cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
|
||||
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
|
||||
cmd->ap.global_hlid = wl->ap_global_hlid;
|
||||
cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
|
||||
cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
|
||||
cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
|
||||
cmd->ap.dtim_interval = bss_conf->dtim_period;
|
||||
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
|
||||
cmd->channel = wl->channel;
|
||||
cmd->ap.ssid_len = wl->ssid_len;
|
||||
cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
|
||||
memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
|
||||
cmd->ap.local_rates = cpu_to_le32(0xffffffff);
|
||||
|
||||
switch (wl->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
cmd->band = RADIO_BAND_2_4GHZ;
|
||||
break;
|
||||
case IEEE80211_BAND_5GHZ:
|
||||
cmd->band = RADIO_BAND_5GHZ;
|
||||
break;
|
||||
default:
|
||||
wl1271_warning("ap start - unknown band: %d", (int)wl->band);
|
||||
cmd->band = RADIO_BAND_2_4GHZ;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role start ap");
|
||||
goto out_free_bcast;
|
||||
}
|
||||
|
||||
goto out_free;
|
||||
|
||||
out_free_bcast:
|
||||
wl12xx_free_link(wl, &wl->ap_bcast_hlid);
|
||||
|
||||
out_free_global:
|
||||
wl12xx_free_link(wl, &wl->ap_global_hlid);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_stop *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
|
||||
|
||||
cmd->role_id = wl->role_id;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role stop ap");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
wl12xx_free_link(wl, &wl->ap_bcast_hlid);
|
||||
wl12xx_free_link(wl, &wl->ap_global_hlid);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
|
||||
{
|
||||
struct wl12xx_cmd_role_start *cmd;
|
||||
struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
|
||||
int ret;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
|
||||
|
||||
cmd->role_id = wl->role_id;
|
||||
if (wl->band == IEEE80211_BAND_5GHZ)
|
||||
cmd->band = WL12XX_BAND_5GHZ;
|
||||
cmd->channel = wl->channel;
|
||||
cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
|
||||
cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
|
||||
cmd->ibss.dtim_interval = bss_conf->dtim_period;
|
||||
cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
|
||||
cmd->ibss.ssid_len = wl->ssid_len;
|
||||
memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
|
||||
memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
|
||||
cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
|
||||
|
||||
if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
|
||||
ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
cmd->ibss.hlid = wl->sta_hlid;
|
||||
cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
|
||||
"basic_rate_set: 0x%x, remote_rates: 0x%x",
|
||||
wl->role_id, cmd->sta.hlid, cmd->sta.session,
|
||||
wl->basic_rate_set, wl->rate_set);
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd role enable");
|
||||
goto err_hlid;
|
||||
}
|
||||
|
||||
goto out_free;
|
||||
|
||||
err_hlid:
|
||||
/* clear links on error. */
|
||||
wl12xx_free_link(wl, &wl->sta_hlid);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* send test command to firmware
|
||||
*
|
||||
|
@ -567,6 +974,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ps_params->role_id = wl->role_id;
|
||||
ps_params->ps_mode = ps_mode;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
|
||||
|
@ -813,9 +1221,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
|
|||
wl->basic_rate);
|
||||
}
|
||||
|
||||
int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
|
||||
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
|
||||
{
|
||||
struct wl1271_cmd_set_sta_keys *cmd;
|
||||
struct wl1271_cmd_set_keys *cmd;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
|
||||
|
@ -826,7 +1234,9 @@ int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
|
|||
goto out;
|
||||
}
|
||||
|
||||
cmd->id = id;
|
||||
cmd->hlid = hlid;
|
||||
cmd->key_id = id;
|
||||
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
|
||||
cmd->key_action = cpu_to_le16(KEY_SET_ID);
|
||||
cmd->key_type = KEY_WEP;
|
||||
|
||||
|
@ -842,52 +1252,31 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
|
||||
{
|
||||
struct wl1271_cmd_set_ap_keys *cmd;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd->hlid = WL1271_AP_BROADCAST_HLID;
|
||||
cmd->key_id = id;
|
||||
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
|
||||
cmd->key_action = cpu_to_le16(KEY_SET_ID);
|
||||
cmd->key_type = KEY_WEP;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
|
||||
u8 key_size, const u8 *key, const u8 *addr,
|
||||
u32 tx_seq_32, u16 tx_seq_16)
|
||||
{
|
||||
struct wl1271_cmd_set_sta_keys *cmd;
|
||||
struct wl1271_cmd_set_keys *cmd;
|
||||
int ret = 0;
|
||||
|
||||
/* hlid might have already been deleted */
|
||||
if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
|
||||
return 0;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (key_type != KEY_WEP)
|
||||
memcpy(cmd->addr, addr, ETH_ALEN);
|
||||
cmd->hlid = wl->sta_hlid;
|
||||
|
||||
if (key_type == KEY_WEP)
|
||||
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
|
||||
else if (is_broadcast_ether_addr(addr))
|
||||
cmd->lid_key_type = BROADCAST_LID_TYPE;
|
||||
else
|
||||
cmd->lid_key_type = UNICAST_LID_TYPE;
|
||||
|
||||
cmd->key_action = cpu_to_le16(action);
|
||||
cmd->key_size = key_size;
|
||||
|
@ -896,10 +1285,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
|
|||
cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
|
||||
cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
|
||||
|
||||
/* we have only one SSID profile */
|
||||
cmd->ssid_profile = 0;
|
||||
|
||||
cmd->id = id;
|
||||
cmd->key_id = id;
|
||||
|
||||
if (key_type == KEY_TKIP) {
|
||||
/*
|
||||
|
@ -930,11 +1316,15 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: merge with sta/ibss into 1 set_key function.
|
||||
* note there are slight diffs
|
||||
*/
|
||||
int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
|
||||
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
|
||||
u16 tx_seq_16)
|
||||
{
|
||||
struct wl1271_cmd_set_ap_keys *cmd;
|
||||
struct wl1271_cmd_set_keys *cmd;
|
||||
int ret = 0;
|
||||
u8 lid_type;
|
||||
|
||||
|
@ -942,7 +1332,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
|
|||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
if (hlid == WL1271_AP_BROADCAST_HLID) {
|
||||
if (hlid == wl->ap_bcast_hlid) {
|
||||
if (key_type == KEY_WEP)
|
||||
lid_type = WEP_DEFAULT_LID_TYPE;
|
||||
else
|
||||
|
@ -991,47 +1381,12 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_disconnect(struct wl1271 *wl)
|
||||
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
|
||||
{
|
||||
struct wl1271_cmd_disconnect *cmd;
|
||||
struct wl12xx_cmd_set_peer_state *cmd;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd disconnect");
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd->rx_config_options = cpu_to_le32(wl->rx_config);
|
||||
cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
|
||||
/* disconnect reason is not used in immediate disconnections */
|
||||
cmd->type = DISCONNECT_IMMEDIATE;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to send disconnect command");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
|
||||
if (ret < 0)
|
||||
wl1271_error("cmd disconnect event completion error");
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_set_sta_state(struct wl1271 *wl)
|
||||
{
|
||||
struct wl1271_cmd_set_sta_state *cmd;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd set sta state");
|
||||
wl1271_debug(DEBUG_CMD, "cmd set peer state (hlid=%d)", hlid);
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
|
@ -1039,11 +1394,12 @@ int wl1271_cmd_set_sta_state(struct wl1271 *wl)
|
|||
goto out;
|
||||
}
|
||||
|
||||
cmd->hlid = hlid;
|
||||
cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_SET_STA_STATE, cmd, sizeof(*cmd), 0);
|
||||
ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to send set STA state command");
|
||||
wl1271_error("failed to send set peer state command");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1054,105 +1410,13 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_start_bss(struct wl1271 *wl)
|
||||
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
|
||||
{
|
||||
struct wl1271_cmd_bss_start *cmd;
|
||||
struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
|
||||
struct wl12xx_cmd_add_peer *cmd;
|
||||
int ret;
|
||||
u32 sta_rates;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd start bss");
|
||||
|
||||
/*
|
||||
* FIXME: We currently do not support hidden SSID. The real SSID
|
||||
* should be fetched from mac80211 first.
|
||||
*/
|
||||
if (wl->ssid_len == 0) {
|
||||
wl1271_warning("Hidden SSID currently not supported for AP");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
|
||||
|
||||
cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
|
||||
cmd->bss_index = WL1271_AP_BSS_INDEX;
|
||||
cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
|
||||
cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
|
||||
cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
|
||||
cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
|
||||
cmd->dtim_interval = bss_conf->dtim_period;
|
||||
cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
|
||||
cmd->channel = wl->channel;
|
||||
cmd->ssid_len = wl->ssid_len;
|
||||
cmd->ssid_type = SSID_TYPE_PUBLIC;
|
||||
memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
|
||||
|
||||
switch (wl->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
cmd->band = RADIO_BAND_2_4GHZ;
|
||||
break;
|
||||
case IEEE80211_BAND_5GHZ:
|
||||
cmd->band = RADIO_BAND_5GHZ;
|
||||
break;
|
||||
default:
|
||||
wl1271_warning("bss start - unknown band: %d", (int)wl->band);
|
||||
cmd->band = RADIO_BAND_2_4GHZ;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd start bss");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_stop_bss(struct wl1271 *wl)
|
||||
{
|
||||
struct wl1271_cmd_bss_start *cmd;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd stop bss");
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd->bss_index = WL1271_AP_BSS_INDEX;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd stop bss");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
|
||||
{
|
||||
struct wl1271_cmd_add_sta *cmd;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
|
||||
wl1271_debug(DEBUG_CMD, "cmd add peer %d", (int)hlid);
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
|
@ -1169,14 +1433,18 @@ int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
|
|||
cmd->hlid = hlid;
|
||||
cmd->wmm = sta->wme ? 1 : 0;
|
||||
|
||||
cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
|
||||
sta->supp_rates[wl->band]));
|
||||
sta_rates = sta->supp_rates[wl->band];
|
||||
if (sta->ht_cap.ht_supported)
|
||||
sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
|
||||
cmd->supported_rates =
|
||||
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates));
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
|
||||
wl1271_debug(DEBUG_CMD, "new peer rates: 0x%x", cmd->supported_rates);
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_ADD_PEER, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd add sta");
|
||||
wl1271_error("failed to initiate cmd add peer");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1187,12 +1455,12 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
|
||||
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
|
||||
{
|
||||
struct wl1271_cmd_remove_sta *cmd;
|
||||
struct wl12xx_cmd_remove_peer *cmd;
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
|
||||
wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
|
@ -1205,9 +1473,9 @@ int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
|
|||
cmd->reason_opcode = 0;
|
||||
cmd->send_deauth_flag = 0;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
|
||||
ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to initiate cmd remove sta");
|
||||
wl1271_error("failed to initiate cmd remove peer");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1215,7 +1483,8 @@ int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
|
|||
* We are ok with a timeout here. The event is sometimes not sent
|
||||
* due to a firmware bug.
|
||||
*/
|
||||
wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
|
||||
wl1271_cmd_wait_for_event_or_timeout(wl,
|
||||
PEER_REMOVE_COMPLETE_EVENT_ID);
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
@ -1307,3 +1576,115 @@ out_free:
|
|||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
|
||||
{
|
||||
struct wl12xx_cmd_roc *cmd;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
|
||||
|
||||
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
|
||||
return -EINVAL;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd->role_id = role_id;
|
||||
cmd->channel = wl->channel;
|
||||
switch (wl->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
cmd->band = RADIO_BAND_2_4GHZ;
|
||||
break;
|
||||
case IEEE80211_BAND_5GHZ:
|
||||
cmd->band = RADIO_BAND_5GHZ;
|
||||
break;
|
||||
default:
|
||||
wl1271_error("roc - unknown band: %d", (int)wl->band);
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_REMAIN_ON_CHANNEL, cmd, sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to send ROC command");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wl12xx_cmd_croc(struct wl1271 *wl, u8 role_id)
|
||||
{
|
||||
struct wl12xx_cmd_croc *cmd;
|
||||
int ret = 0;
|
||||
|
||||
wl1271_debug(DEBUG_CMD, "cmd croc (%d)", role_id);
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
cmd->role_id = role_id;
|
||||
|
||||
ret = wl1271_cmd_send(wl, CMD_CANCEL_REMAIN_ON_CHANNEL, cmd,
|
||||
sizeof(*cmd), 0);
|
||||
if (ret < 0) {
|
||||
wl1271_error("failed to send ROC command");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(cmd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_roc(struct wl1271 *wl, u8 role_id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(test_bit(role_id, wl->roc_map)))
|
||||
return 0;
|
||||
|
||||
ret = wl12xx_cmd_roc(wl, role_id);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = wl1271_cmd_wait_for_event(wl,
|
||||
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
|
||||
if (ret < 0) {
|
||||
wl1271_error("cmd roc event completion error");
|
||||
goto out;
|
||||
}
|
||||
|
||||
__set_bit(role_id, wl->roc_map);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int wl12xx_croc(struct wl1271 *wl, u8 role_id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!test_bit(role_id, wl->roc_map)))
|
||||
return 0;
|
||||
|
||||
ret = wl12xx_cmd_croc(wl, role_id);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
__clear_bit(role_id, wl->roc_map);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,15 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
|
|||
int wl1271_cmd_radio_parms(struct wl1271 *wl);
|
||||
int wl128x_cmd_radio_parms(struct wl1271 *wl);
|
||||
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
|
||||
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
|
||||
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
|
||||
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
|
||||
int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
|
||||
int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
|
||||
int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
|
||||
int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
|
||||
int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
|
||||
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
|
||||
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
|
||||
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
|
||||
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
|
||||
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
|
||||
|
@ -56,20 +64,18 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
|
|||
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
|
||||
int wl1271_build_qos_null_data(struct wl1271 *wl);
|
||||
int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
|
||||
int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
|
||||
int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
|
||||
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
|
||||
int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
|
||||
u8 key_size, const u8 *key, const u8 *addr,
|
||||
u32 tx_seq_32, u16 tx_seq_16);
|
||||
int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
|
||||
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
|
||||
u16 tx_seq_16);
|
||||
int wl1271_cmd_disconnect(struct wl1271 *wl);
|
||||
int wl1271_cmd_set_sta_state(struct wl1271 *wl);
|
||||
int wl1271_cmd_start_bss(struct wl1271 *wl);
|
||||
int wl1271_cmd_stop_bss(struct wl1271 *wl);
|
||||
int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
|
||||
int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
|
||||
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
|
||||
int wl12xx_roc(struct wl1271 *wl, u8 role_id);
|
||||
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
|
||||
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
|
||||
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
|
||||
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
|
||||
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
|
||||
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
|
||||
|
@ -83,25 +89,21 @@ enum wl1271_commands {
|
|||
CMD_DISABLE_TX = 6,
|
||||
CMD_SCAN = 8,
|
||||
CMD_STOP_SCAN = 9,
|
||||
CMD_START_JOIN = 11,
|
||||
CMD_SET_KEYS = 12,
|
||||
CMD_READ_MEMORY = 13,
|
||||
CMD_WRITE_MEMORY = 14,
|
||||
CMD_SET_TEMPLATE = 19,
|
||||
CMD_TEST = 23,
|
||||
CMD_NOISE_HIST = 28,
|
||||
CMD_LNA_CONTROL = 32,
|
||||
CMD_QUIET_ELEMENT_SET_STATE = 29,
|
||||
CMD_SET_BCN_MODE = 33,
|
||||
CMD_MEASUREMENT = 34,
|
||||
CMD_STOP_MEASUREMENT = 35,
|
||||
CMD_DISCONNECT = 36,
|
||||
CMD_SET_PS_MODE = 37,
|
||||
CMD_CHANNEL_SWITCH = 38,
|
||||
CMD_STOP_CHANNEL_SWICTH = 39,
|
||||
CMD_AP_DISCOVERY = 40,
|
||||
CMD_STOP_AP_DISCOVERY = 41,
|
||||
CMD_SPS_SCAN = 42,
|
||||
CMD_STOP_SPS_SCAN = 43,
|
||||
CMD_HEALTH_CHECK = 45,
|
||||
CMD_DEBUG = 46,
|
||||
CMD_TRIGGER_SCAN_TO = 47,
|
||||
|
@ -109,16 +111,30 @@ enum wl1271_commands {
|
|||
CMD_CONNECTION_SCAN_SSID_CFG = 49,
|
||||
CMD_START_PERIODIC_SCAN = 50,
|
||||
CMD_STOP_PERIODIC_SCAN = 51,
|
||||
CMD_SET_STA_STATE = 52,
|
||||
CMD_CONFIG_FWLOGGER = 53,
|
||||
CMD_START_FWLOGGER = 54,
|
||||
CMD_STOP_FWLOGGER = 55,
|
||||
CMD_SET_PEER_STATE = 52,
|
||||
CMD_REMAIN_ON_CHANNEL = 53,
|
||||
CMD_CANCEL_REMAIN_ON_CHANNEL = 54,
|
||||
|
||||
/* AP mode commands */
|
||||
CMD_BSS_START = 60,
|
||||
CMD_BSS_STOP = 61,
|
||||
CMD_ADD_STA = 62,
|
||||
CMD_REMOVE_STA = 63,
|
||||
CMD_CONFIG_FWLOGGER = 55,
|
||||
CMD_START_FWLOGGER = 56,
|
||||
CMD_STOP_FWLOGGER = 57,
|
||||
|
||||
/* AP commands */
|
||||
CMD_ADD_PEER = 62,
|
||||
CMD_REMOVE_PEER = 63,
|
||||
|
||||
/* Role API */
|
||||
CMD_ROLE_ENABLE = 70,
|
||||
CMD_ROLE_DISABLE = 71,
|
||||
CMD_ROLE_START = 72,
|
||||
CMD_ROLE_STOP = 73,
|
||||
|
||||
/* WIFI Direct */
|
||||
CMD_WFD_START_DISCOVERY = 80,
|
||||
CMD_WFD_STOP_DISCOVERY = 81,
|
||||
CMD_WFD_ATTRIBUTE_CONFIG = 82,
|
||||
|
||||
CMD_NOP = 100,
|
||||
|
||||
NUM_COMMANDS,
|
||||
MAX_COMMAND_ID = 0xFFFF,
|
||||
|
@ -147,21 +163,20 @@ enum cmd_templ {
|
|||
CMD_TEMPL_CTS, /*
|
||||
* For CTS-to-self (FastCTS) mechanism
|
||||
* for BT/WLAN coexistence (SoftGemini). */
|
||||
CMD_TEMPL_ARP_RSP,
|
||||
CMD_TEMPL_LINK_MEASUREMENT_REPORT,
|
||||
|
||||
/* AP-mode specific */
|
||||
CMD_TEMPL_AP_BEACON = 13,
|
||||
CMD_TEMPL_AP_BEACON,
|
||||
CMD_TEMPL_AP_PROBE_RESPONSE,
|
||||
CMD_TEMPL_AP_ARP_RSP,
|
||||
CMD_TEMPL_ARP_RSP,
|
||||
CMD_TEMPL_DEAUTH_AP,
|
||||
CMD_TEMPL_TEMPORARY,
|
||||
CMD_TEMPL_LINK_MEASUREMENT_REPORT,
|
||||
|
||||
CMD_TEMPL_MAX = 0xff
|
||||
};
|
||||
|
||||
/* unit ms */
|
||||
#define WL1271_COMMAND_TIMEOUT 2000
|
||||
#define WL1271_CMD_TEMPL_MAX_SIZE 252
|
||||
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
|
||||
#define WL1271_CMD_TEMPL_MAX_SIZE 548
|
||||
#define WL1271_EVENT_TIMEOUT 750
|
||||
|
||||
struct wl1271_cmd_header {
|
||||
|
@ -193,6 +208,8 @@ enum {
|
|||
CMD_STATUS_WRONG_NESTING = 19,
|
||||
CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/
|
||||
CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/
|
||||
CMD_STATUS_TEMPLATE_OOM = 23,
|
||||
CMD_STATUS_NO_RX_BA_SESSION = 24,
|
||||
MAX_COMMAND_STATUS = 0xff
|
||||
};
|
||||
|
||||
|
@ -210,38 +227,114 @@ enum {
|
|||
#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
|
||||
#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
|
||||
|
||||
struct wl1271_cmd_join {
|
||||
struct wl12xx_cmd_role_enable {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
__le32 bssid_lsb;
|
||||
__le16 bssid_msb;
|
||||
__le16 beacon_interval; /* in TBTTs */
|
||||
__le32 rx_config_options;
|
||||
__le32 rx_filter_options;
|
||||
u8 role_id;
|
||||
u8 role_type;
|
||||
u8 mac_address[ETH_ALEN];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* The target uses this field to determine the rate at
|
||||
* which to transmit control frame responses (such as
|
||||
* ACK or CTS frames).
|
||||
*/
|
||||
__le32 basic_rate_set;
|
||||
__le32 supported_rate_set;
|
||||
u8 dtim_interval;
|
||||
/*
|
||||
* bits 0-2: This bitwise field specifies the type
|
||||
* of BSS to start or join (BSS_TYPE_*).
|
||||
* bit 4: Band - The radio band in which to join
|
||||
* or start.
|
||||
* 0 - 2.4GHz band
|
||||
* 1 - 5GHz band
|
||||
* bits 3, 5-7: Reserved
|
||||
*/
|
||||
u8 bss_type;
|
||||
struct wl12xx_cmd_role_disable {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 padding[3];
|
||||
} __packed;
|
||||
|
||||
enum wl12xx_band {
|
||||
WL12XX_BAND_2_4GHZ = 0,
|
||||
WL12XX_BAND_5GHZ = 1,
|
||||
WL12XX_BAND_JAPAN_4_9_GHZ = 2,
|
||||
WL12XX_BAND_DEFAULT = WL12XX_BAND_2_4GHZ,
|
||||
WL12XX_BAND_INVALID = 0x7E,
|
||||
WL12XX_BAND_MAX_RADIO = 0x7F,
|
||||
};
|
||||
|
||||
struct wl12xx_cmd_role_start {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 band;
|
||||
u8 channel;
|
||||
u8 ssid_len;
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
u8 ctrl; /* JOIN_CMD_CTRL_* */
|
||||
u8 reserved[3];
|
||||
u8 padding;
|
||||
|
||||
union {
|
||||
struct {
|
||||
u8 hlid;
|
||||
u8 session;
|
||||
u8 padding_1[54];
|
||||
} __packed device;
|
||||
/* sta & p2p_cli use the same struct */
|
||||
struct {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 hlid; /* data hlid */
|
||||
u8 session;
|
||||
__le32 remote_rates; /* remote supported rates */
|
||||
|
||||
/*
|
||||
* The target uses this field to determine the rate at
|
||||
* which to transmit control frame responses (such as
|
||||
* ACK or CTS frames).
|
||||
*/
|
||||
__le32 basic_rate_set;
|
||||
__le32 local_rates; /* local supported rates */
|
||||
|
||||
u8 ssid_type;
|
||||
u8 ssid_len;
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
|
||||
__le16 beacon_interval; /* in TBTTs */
|
||||
} __packed sta;
|
||||
struct {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 hlid; /* data hlid */
|
||||
u8 dtim_interval;
|
||||
__le32 remote_rates; /* remote supported rates */
|
||||
|
||||
__le32 basic_rate_set;
|
||||
__le32 local_rates; /* local supported rates */
|
||||
|
||||
u8 ssid_type;
|
||||
u8 ssid_len;
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
|
||||
__le16 beacon_interval; /* in TBTTs */
|
||||
|
||||
u8 padding_1[4];
|
||||
} __packed ibss;
|
||||
/* ap & p2p_go use the same struct */
|
||||
struct {
|
||||
__le16 aging_period; /* in secs */
|
||||
u8 beacon_expiry; /* in ms */
|
||||
u8 bss_index;
|
||||
/* The host link id for the AP's global queue */
|
||||
u8 global_hlid;
|
||||
/* The host link id for the AP's broadcast queue */
|
||||
u8 broadcast_hlid;
|
||||
|
||||
__le16 beacon_interval; /* in TBTTs */
|
||||
|
||||
__le32 basic_rate_set;
|
||||
__le32 local_rates; /* local supported rates */
|
||||
|
||||
u8 dtim_interval;
|
||||
|
||||
u8 ssid_type;
|
||||
u8 ssid_len;
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
|
||||
u8 padding_1[5];
|
||||
} __packed ap;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
struct wl12xx_cmd_role_stop {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 disc_type; /* only STA and P2P_CLI */
|
||||
__le16 reason; /* only STA and P2P_CLI */
|
||||
} __packed;
|
||||
|
||||
struct cmd_enabledisable_path {
|
||||
|
@ -287,8 +380,9 @@ enum wl1271_cmd_ps_mode {
|
|||
struct wl1271_cmd_ps_params {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 ps_mode; /* STATION_* */
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
/* HW encryption keys */
|
||||
|
@ -301,6 +395,12 @@ enum wl1271_cmd_key_action {
|
|||
MAX_KEY_ACTION = 0xffff,
|
||||
};
|
||||
|
||||
enum wl1271_cmd_lid_key_type {
|
||||
UNICAST_LID_TYPE = 0,
|
||||
BROADCAST_LID_TYPE = 1,
|
||||
WEP_DEFAULT_LID_TYPE = 2
|
||||
};
|
||||
|
||||
enum wl1271_cmd_key_type {
|
||||
KEY_NONE = 0,
|
||||
KEY_WEP = 1,
|
||||
|
@ -309,44 +409,7 @@ enum wl1271_cmd_key_type {
|
|||
KEY_GEM = 4,
|
||||
};
|
||||
|
||||
/* FIXME: Add description for key-types */
|
||||
|
||||
struct wl1271_cmd_set_sta_keys {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
/* Ignored for default WEP key */
|
||||
u8 addr[ETH_ALEN];
|
||||
|
||||
/* key_action_e */
|
||||
__le16 key_action;
|
||||
|
||||
__le16 reserved_1;
|
||||
|
||||
/* key size in bytes */
|
||||
u8 key_size;
|
||||
|
||||
/* key_type_e */
|
||||
u8 key_type;
|
||||
u8 ssid_profile;
|
||||
|
||||
/*
|
||||
* TKIP, AES: frame's key id field.
|
||||
* For WEP default key: key id;
|
||||
*/
|
||||
u8 id;
|
||||
u8 reserved_2[6];
|
||||
u8 key[MAX_KEY_SIZE];
|
||||
__le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
|
||||
__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
|
||||
} __packed;
|
||||
|
||||
enum wl1271_cmd_lid_key_type {
|
||||
UNICAST_LID_TYPE = 0,
|
||||
BROADCAST_LID_TYPE = 1,
|
||||
WEP_DEFAULT_LID_TYPE = 2
|
||||
};
|
||||
|
||||
struct wl1271_cmd_set_ap_keys {
|
||||
struct wl1271_cmd_set_keys {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
/*
|
||||
|
@ -496,69 +559,39 @@ enum wl1271_disconnect_type {
|
|||
DISCONNECT_DISASSOC
|
||||
};
|
||||
|
||||
struct wl1271_cmd_disconnect {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
__le32 rx_config_options;
|
||||
__le32 rx_filter_options;
|
||||
|
||||
__le16 reason;
|
||||
u8 type;
|
||||
|
||||
u8 padding;
|
||||
} __packed;
|
||||
|
||||
#define WL1271_CMD_STA_STATE_CONNECTED 1
|
||||
|
||||
struct wl1271_cmd_set_sta_state {
|
||||
struct wl12xx_cmd_set_peer_state {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 hlid;
|
||||
u8 state;
|
||||
u8 padding[3];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
enum wl1271_ssid_type {
|
||||
SSID_TYPE_PUBLIC = 0,
|
||||
SSID_TYPE_HIDDEN = 1
|
||||
struct wl12xx_cmd_roc {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 role_id;
|
||||
u8 channel;
|
||||
u8 band;
|
||||
u8 padding;
|
||||
};
|
||||
|
||||
struct wl1271_cmd_bss_start {
|
||||
struct wl12xx_cmd_croc {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
/* wl1271_ssid_type */
|
||||
u8 ssid_type;
|
||||
u8 ssid_len;
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
u8 padding_1[2];
|
||||
u8 role_id;
|
||||
u8 padding[3];
|
||||
};
|
||||
|
||||
/* Basic rate set */
|
||||
__le32 basic_rate_set;
|
||||
/* Aging period in seconds*/
|
||||
__le16 aging_period;
|
||||
enum wl12xx_ssid_type {
|
||||
WL12XX_SSID_TYPE_PUBLIC = 0,
|
||||
WL12XX_SSID_TYPE_HIDDEN = 1,
|
||||
WL12XX_SSID_TYPE_ANY = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* This field specifies the time between target beacon
|
||||
* transmission times (TBTTs), in time units (TUs).
|
||||
* Valid values are 1 to 1024.
|
||||
*/
|
||||
__le16 beacon_interval;
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 bss_index;
|
||||
/* Radio band */
|
||||
u8 band;
|
||||
u8 channel;
|
||||
/* The host link id for the AP's global queue */
|
||||
u8 global_hlid;
|
||||
/* The host link id for the AP's broadcast queue */
|
||||
u8 broadcast_hlid;
|
||||
/* DTIM count */
|
||||
u8 dtim_interval;
|
||||
/* Beacon expiry time in ms */
|
||||
u8 beacon_expiry;
|
||||
u8 padding_2[3];
|
||||
} __packed;
|
||||
|
||||
struct wl1271_cmd_add_sta {
|
||||
struct wl12xx_cmd_add_peer {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 addr[ETH_ALEN];
|
||||
|
@ -572,7 +605,7 @@ struct wl1271_cmd_add_sta {
|
|||
u8 padding1;
|
||||
} __packed;
|
||||
|
||||
struct wl1271_cmd_remove_sta {
|
||||
struct wl12xx_cmd_remove_peer {
|
||||
struct wl1271_cmd_header header;
|
||||
|
||||
u8 hlid;
|
||||
|
|
|
@ -99,40 +99,75 @@ enum {
|
|||
|
||||
enum {
|
||||
/*
|
||||
* PER threshold in PPM of the BT voice
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN / BT master basic rate
|
||||
*
|
||||
* Range: 0 - 10000000
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_BT_PER_THRESHOLD = 0,
|
||||
CONF_SG_ACL_BT_MASTER_MIN_BR = 0,
|
||||
CONF_SG_ACL_BT_MASTER_MAX_BR,
|
||||
|
||||
/*
|
||||
* Number of consequent RX_ACTIVE activities to override BT voice
|
||||
* frames to ensure WLAN connection
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN / BT slave basic rate
|
||||
*
|
||||
* Range: 0 - 100
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_HV3_MAX_OVERRIDE,
|
||||
CONF_SG_ACL_BT_SLAVE_MIN_BR,
|
||||
CONF_SG_ACL_BT_SLAVE_MAX_BR,
|
||||
|
||||
/*
|
||||
* Defines the PER threshold of the BT voice
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN / BT master EDR
|
||||
*
|
||||
* Range: 0 - 65000
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_BT_NFS_SAMPLE_INTERVAL,
|
||||
CONF_SG_ACL_BT_MASTER_MIN_EDR,
|
||||
CONF_SG_ACL_BT_MASTER_MAX_EDR,
|
||||
|
||||
/*
|
||||
* Defines the load ratio of BT
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN / BT slave EDR
|
||||
*
|
||||
* Range: 0 - 100 (%)
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_BT_LOAD_RATIO,
|
||||
CONF_SG_ACL_BT_SLAVE_MIN_EDR,
|
||||
CONF_SG_ACL_BT_SLAVE_MAX_EDR,
|
||||
|
||||
/*
|
||||
* Defines whether the SG will force WLAN host to enter/exit PSM
|
||||
* The maximum time WLAN can gain the antenna
|
||||
* in WLAN PSM / BT master/slave BR
|
||||
*
|
||||
* Range: 1 - SG can force, 0 - host handles PSM
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_AUTO_PS_MODE,
|
||||
CONF_SG_ACL_WLAN_PS_MASTER_BR,
|
||||
CONF_SG_ACL_WLAN_PS_SLAVE_BR,
|
||||
|
||||
/*
|
||||
* The maximum time WLAN can gain the antenna
|
||||
* in WLAN PSM / BT master/slave EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_ACL_WLAN_PS_MASTER_EDR,
|
||||
CONF_SG_ACL_WLAN_PS_SLAVE_EDR,
|
||||
|
||||
/* TODO: explain these values */
|
||||
CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR,
|
||||
CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR,
|
||||
|
||||
CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR,
|
||||
CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR,
|
||||
CONF_SG_ACL_PASSIVE_SCAN_BT_BR,
|
||||
CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR,
|
||||
CONF_SG_ACL_PASSIVE_SCAN_BT_EDR,
|
||||
CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR,
|
||||
|
||||
/*
|
||||
* Compensation percentage of probe requests when scan initiated
|
||||
|
@ -150,6 +185,50 @@ enum {
|
|||
*/
|
||||
CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN active scan window if initiated
|
||||
* during BT A2DP
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN passive scan window if initiated
|
||||
* during BT A2DP BR
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN passive scan window if initiated
|
||||
* during BT A2DP EDR
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN passive scan window if initiated
|
||||
* during BT voice
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
|
||||
|
||||
/* TODO: explain these values */
|
||||
CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN,
|
||||
CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN,
|
||||
CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN,
|
||||
|
||||
/*
|
||||
* Defines whether the SG will force WLAN host to enter/exit PSM
|
||||
*
|
||||
* Range: 1 - SG can force, 0 - host handles PSM
|
||||
*/
|
||||
CONF_SG_STA_FORCE_PS_IN_BT_SCO,
|
||||
|
||||
/*
|
||||
* Defines antenna configuration (single/dual antenna)
|
||||
*
|
||||
|
@ -158,7 +237,7 @@ enum {
|
|||
CONF_SG_ANTENNA_CONFIGURATION,
|
||||
|
||||
/*
|
||||
* The threshold (percent) of max consequtive beacon misses before
|
||||
* The threshold (percent) of max consecutive beacon misses before
|
||||
* increasing priority of beacon reception.
|
||||
*
|
||||
* Range: 0 - 100 (%)
|
||||
|
@ -166,87 +245,11 @@ enum {
|
|||
CONF_SG_BEACON_MISS_PERCENT,
|
||||
|
||||
/*
|
||||
* The rate threshold below which receiving a data frame from the AP
|
||||
* will increase the priority of the data frame above BT traffic.
|
||||
* Protection time of the DHCP procedure.
|
||||
*
|
||||
* Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
|
||||
* Range: 0 - 100000 (ms)
|
||||
*/
|
||||
CONF_SG_RATE_ADAPT_THRESH,
|
||||
|
||||
/*
|
||||
* Not used currently.
|
||||
*
|
||||
* Range: 0
|
||||
*/
|
||||
CONF_SG_RATE_ADAPT_SNR,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN PSM / BT master basic rate
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
|
||||
CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
|
||||
|
||||
/*
|
||||
* The time after it expires no new WLAN trigger frame is trasmitted
|
||||
* in WLAN PSM / BT master basic rate
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN PSM / BT slave basic rate
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
|
||||
CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
|
||||
|
||||
/*
|
||||
* The time after it expires no new WLAN trigger frame is trasmitted
|
||||
* in WLAN PSM / BT slave basic rate
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN PSM / BT master EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
|
||||
CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
|
||||
|
||||
/*
|
||||
* The time after it expires no new WLAN trigger frame is trasmitted
|
||||
* in WLAN PSM / BT master EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN PSM / BT slave EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
|
||||
CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
|
||||
|
||||
/*
|
||||
* The time after it expires no new WLAN trigger frame is trasmitted
|
||||
* in WLAN PSM / BT slave EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
|
||||
CONF_SG_DHCP_TIME,
|
||||
|
||||
/*
|
||||
* RX guard time before the beginning of a new BT voice frame during
|
||||
|
@ -273,6 +276,16 @@ enum {
|
|||
*/
|
||||
CONF_SG_ADAPTIVE_RXT_TXT,
|
||||
|
||||
/* TODO: explain this value */
|
||||
CONF_SG_GENERAL_USAGE_BIT_MAP,
|
||||
|
||||
/*
|
||||
* Number of consecutive BT voice frames not interrupted by WLAN
|
||||
*
|
||||
* Range: 0 - 100
|
||||
*/
|
||||
CONF_SG_HV3_MAX_SERVED,
|
||||
|
||||
/*
|
||||
* The used WLAN legacy service period during active BT ACL link
|
||||
*
|
||||
|
@ -287,152 +300,35 @@ enum {
|
|||
*/
|
||||
CONF_SG_UPSD_TIMEOUT,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN Active / BT master EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
|
||||
CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
|
||||
CONF_SG_CONSECUTIVE_CTS_THRESHOLD,
|
||||
CONF_SG_STA_RX_WINDOW_AFTER_DTIM,
|
||||
CONF_SG_STA_CONNECTION_PROTECTION_TIME,
|
||||
|
||||
/*
|
||||
* The maximum time WLAN can gain the antenna for
|
||||
* in WLAN Active / BT master EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
|
||||
/* AP params */
|
||||
CONF_AP_BEACON_MISS_TX,
|
||||
CONF_AP_RX_WINDOW_AFTER_BEACON,
|
||||
CONF_AP_BEACON_WINDOW_INTERVAL,
|
||||
CONF_AP_CONNECTION_PROTECTION_TIME,
|
||||
CONF_AP_BT_ACL_VAL_BT_SERVE_TIME,
|
||||
CONF_AP_BT_ACL_VAL_WL_SERVE_TIME,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN Active / BT slave EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
|
||||
CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
|
||||
|
||||
/*
|
||||
* The maximum time WLAN can gain the antenna for
|
||||
* in WLAN Active / BT slave EDR
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
|
||||
|
||||
/*
|
||||
* Configure the min and max time BT gains the antenna
|
||||
* in WLAN Active / BT basic rate
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
|
||||
CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
|
||||
|
||||
/*
|
||||
* The maximum time WLAN can gain the antenna for
|
||||
* in WLAN Active / BT basic rate
|
||||
*
|
||||
* Range: 0 - 255 (ms)
|
||||
*/
|
||||
CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN passive scan window if initiated
|
||||
* during BT voice
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN passive scan window if initiated
|
||||
* during BT A2DP
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
|
||||
|
||||
/*
|
||||
* Fixed time ensured for BT traffic to gain the antenna during WLAN
|
||||
* passive scan.
|
||||
*
|
||||
* Range: 0 - 1000 ms
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
|
||||
|
||||
/*
|
||||
* Fixed time ensured for WLAN traffic to gain the antenna during WLAN
|
||||
* passive scan.
|
||||
*
|
||||
* Range: 0 - 1000 ms
|
||||
*/
|
||||
CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
|
||||
|
||||
/*
|
||||
* Number of consequent BT voice frames not interrupted by WLAN
|
||||
*
|
||||
* Range: 0 - 100
|
||||
*/
|
||||
CONF_SG_HV3_MAX_SERVED,
|
||||
|
||||
/*
|
||||
* Protection time of the DHCP procedure.
|
||||
*
|
||||
* Range: 0 - 100000 (ms)
|
||||
*/
|
||||
CONF_SG_DHCP_TIME,
|
||||
|
||||
/*
|
||||
* Compensation percentage of WLAN active scan window if initiated
|
||||
* during BT A2DP
|
||||
*
|
||||
* Range: 0 - 1000 (%)
|
||||
*/
|
||||
CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
|
||||
CONF_SG_TEMP_PARAM_1,
|
||||
CONF_SG_TEMP_PARAM_2,
|
||||
CONF_SG_TEMP_PARAM_3,
|
||||
CONF_SG_TEMP_PARAM_4,
|
||||
CONF_SG_TEMP_PARAM_5,
|
||||
|
||||
/*
|
||||
* AP beacon miss
|
||||
*
|
||||
* Range: 0 - 255
|
||||
*/
|
||||
CONF_SG_AP_BEACON_MISS_TX,
|
||||
|
||||
/*
|
||||
* AP RX window length
|
||||
*
|
||||
* Range: 0 - 50
|
||||
*/
|
||||
CONF_SG_RX_WINDOW_LENGTH,
|
||||
|
||||
/*
|
||||
* AP connection protection time
|
||||
*
|
||||
* Range: 0 - 5000
|
||||
*/
|
||||
CONF_SG_AP_CONNECTION_PROTECTION_TIME,
|
||||
|
||||
CONF_SG_TEMP_PARAM_6,
|
||||
CONF_SG_TEMP_PARAM_7,
|
||||
CONF_SG_TEMP_PARAM_8,
|
||||
CONF_SG_TEMP_PARAM_9,
|
||||
CONF_SG_TEMP_PARAM_10,
|
||||
|
||||
CONF_SG_STA_PARAMS_MAX = CONF_SG_TEMP_PARAM_5 + 1,
|
||||
CONF_SG_AP_PARAMS_MAX = CONF_SG_TEMP_PARAM_10 + 1,
|
||||
|
||||
CONF_SG_PARAMS_MAX,
|
||||
CONF_SG_PARAMS_ALL = 0xff
|
||||
};
|
||||
|
||||
struct conf_sg_settings {
|
||||
u32 sta_params[CONF_SG_STA_PARAMS_MAX];
|
||||
u32 ap_params[CONF_SG_AP_PARAMS_MAX];
|
||||
u32 params[CONF_SG_PARAMS_MAX];
|
||||
u8 state;
|
||||
};
|
||||
|
||||
|
@ -545,6 +441,11 @@ struct conf_rx_settings {
|
|||
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
|
||||
CONF_HW_BIT_RATE_54MBPS)
|
||||
|
||||
#define CONF_TX_MCS_RATES (CONF_HW_BIT_RATE_MCS_0 | \
|
||||
CONF_HW_BIT_RATE_MCS_1 | CONF_HW_BIT_RATE_MCS_2 | \
|
||||
CONF_HW_BIT_RATE_MCS_3 | CONF_HW_BIT_RATE_MCS_4 | \
|
||||
CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
|
||||
CONF_HW_BIT_RATE_MCS_7)
|
||||
|
||||
/*
|
||||
* Default rates for management traffic when operating in AP mode. This
|
||||
|
@ -661,6 +562,9 @@ struct conf_tx_ac_category {
|
|||
|
||||
#define CONF_TX_MAX_TID_COUNT 8
|
||||
|
||||
/* Allow TX BA on all TIDs but 6,7. These are currently reserved in the FW */
|
||||
#define CONF_TX_BA_ENABLED_TID_BITMAP 0x3F
|
||||
|
||||
enum {
|
||||
CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
|
||||
CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/
|
||||
|
@ -913,7 +817,7 @@ struct conf_conn_settings {
|
|||
struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
|
||||
|
||||
/*
|
||||
* The number of consequtive beacons to lose, before the firmware
|
||||
* The number of consecutive beacons to lose, before the firmware
|
||||
* becomes out of synch.
|
||||
*
|
||||
* Range: u32
|
||||
|
@ -951,7 +855,7 @@ struct conf_conn_settings {
|
|||
u8 rx_broadcast_in_ps;
|
||||
|
||||
/*
|
||||
* Consequtive PS Poll failures before sending event to driver
|
||||
* Consecutive PS Poll failures before sending event to driver
|
||||
*
|
||||
* Range: u8
|
||||
*/
|
||||
|
@ -1199,8 +1103,12 @@ struct conf_rf_settings {
|
|||
};
|
||||
|
||||
struct conf_ht_setting {
|
||||
u16 tx_ba_win_size;
|
||||
u8 rx_ba_win_size;
|
||||
u8 tx_ba_win_size;
|
||||
u16 inactivity_timeout;
|
||||
|
||||
/* bitmap of enabled TIDs for TX BA sessions */
|
||||
u8 tx_ba_tid_bitmap;
|
||||
};
|
||||
|
||||
struct conf_memory_settings {
|
||||
|
@ -1309,6 +1217,25 @@ struct conf_fwlog {
|
|||
u8 threshold;
|
||||
};
|
||||
|
||||
#define ACX_RATE_MGMT_NUM_OF_RATES 13
|
||||
struct conf_rate_policy_settings {
|
||||
u16 rate_retry_score;
|
||||
u16 per_add;
|
||||
u16 per_th1;
|
||||
u16 per_th2;
|
||||
u16 max_per;
|
||||
u8 inverse_curiosity_factor;
|
||||
u8 tx_fail_low_th;
|
||||
u8 tx_fail_high_th;
|
||||
u8 per_alpha_shift;
|
||||
u8 per_add_shift;
|
||||
u8 per_beta1_shift;
|
||||
u8 per_beta2_shift;
|
||||
u8 rate_check_up;
|
||||
u8 rate_check_down;
|
||||
u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
|
||||
};
|
||||
|
||||
struct conf_drv_settings {
|
||||
struct conf_sg_settings sg;
|
||||
struct conf_rx_settings rx;
|
||||
|
@ -1326,6 +1253,7 @@ struct conf_drv_settings {
|
|||
struct conf_fm_coex fm_coex;
|
||||
struct conf_rx_streaming_settings rx_streaming;
|
||||
struct conf_fwlog fwlog;
|
||||
struct conf_rate_policy_settings rate;
|
||||
u8 hci_io_ds;
|
||||
};
|
||||
|
||||
|
|
|
@ -339,10 +339,11 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
|
|||
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
|
||||
|
||||
DRIVER_STATE_PRINT_INT(tx_blocks_available);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[0]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[1]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[2]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[3]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[1]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[2]);
|
||||
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[3]);
|
||||
DRIVER_STATE_PRINT_INT(tx_frames_cnt);
|
||||
DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
|
||||
DRIVER_STATE_PRINT_INT(tx_queue_count[0]);
|
||||
|
@ -352,10 +353,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
|
|||
DRIVER_STATE_PRINT_INT(tx_packets_count);
|
||||
DRIVER_STATE_PRINT_INT(tx_results_count);
|
||||
DRIVER_STATE_PRINT_LHEX(flags);
|
||||
DRIVER_STATE_PRINT_INT(tx_blocks_freed[0]);
|
||||
DRIVER_STATE_PRINT_INT(tx_blocks_freed[1]);
|
||||
DRIVER_STATE_PRINT_INT(tx_blocks_freed[2]);
|
||||
DRIVER_STATE_PRINT_INT(tx_blocks_freed[3]);
|
||||
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
|
||||
DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
|
||||
DRIVER_STATE_PRINT_INT(rx_counter);
|
||||
DRIVER_STATE_PRINT_INT(session_counter);
|
||||
|
@ -369,9 +367,6 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
|
|||
DRIVER_STATE_PRINT_INT(beacon_int);
|
||||
DRIVER_STATE_PRINT_INT(psm_entry_retry);
|
||||
DRIVER_STATE_PRINT_INT(ps_poll_failures);
|
||||
DRIVER_STATE_PRINT_HEX(filters);
|
||||
DRIVER_STATE_PRINT_HEX(rx_config);
|
||||
DRIVER_STATE_PRINT_HEX(rx_filter);
|
||||
DRIVER_STATE_PRINT_INT(power_level);
|
||||
DRIVER_STATE_PRINT_INT(rssi_thold);
|
||||
DRIVER_STATE_PRINT_INT(last_rssi_event);
|
||||
|
|
|
@ -285,13 +285,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
|
|||
|
||||
if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) && !is_ap) {
|
||||
wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
|
||||
"ba_allowed = 0x%x", mbox->ba_allowed);
|
||||
"ba_allowed = 0x%x", mbox->rx_ba_allowed);
|
||||
|
||||
if (wl->vif)
|
||||
wl1271_stop_ba_event(wl, mbox->ba_allowed);
|
||||
wl1271_stop_ba_event(wl, mbox->rx_ba_allowed);
|
||||
}
|
||||
|
||||
if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) {
|
||||
if ((vector & DUMMY_PACKET_EVENT_ID)) {
|
||||
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
|
||||
if (wl->vif)
|
||||
wl1271_tx_dummy_packet(wl);
|
||||
|
|
|
@ -49,32 +49,27 @@ enum {
|
|||
MEASUREMENT_START_EVENT_ID = BIT(8),
|
||||
MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
|
||||
SCAN_COMPLETE_EVENT_ID = BIT(10),
|
||||
SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(11),
|
||||
WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
|
||||
AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
|
||||
PS_REPORT_EVENT_ID = BIT(13),
|
||||
PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
|
||||
DISCONNECT_EVENT_COMPLETE_ID = BIT(15),
|
||||
JOIN_EVENT_COMPLETE_ID = BIT(16),
|
||||
/* BIT(16) is reserved */
|
||||
CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
|
||||
BSS_LOSE_EVENT_ID = BIT(18),
|
||||
REGAINED_BSS_EVENT_ID = BIT(19),
|
||||
MAX_TX_RETRY_EVENT_ID = BIT(20),
|
||||
/* STA: dummy paket for dynamic mem blocks */
|
||||
DUMMY_PACKET_EVENT_ID = BIT(21),
|
||||
/* AP: STA remove complete */
|
||||
STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
|
||||
DUMMY_PACKET_EVENT_ID = BIT(21),
|
||||
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
|
||||
/* STA: SG prediction */
|
||||
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
|
||||
/* AP: Inactive STA */
|
||||
INACTIVE_STA_EVENT_ID = BIT(23),
|
||||
CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
|
||||
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
|
||||
PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
|
||||
DBG_EVENT_ID = BIT(26),
|
||||
HEALTH_CHECK_REPLY_EVENT_ID = BIT(27),
|
||||
INACTIVE_STA_EVENT_ID = BIT(26),
|
||||
PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
|
||||
PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
|
||||
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
|
||||
BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
|
||||
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
|
||||
EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
|
||||
};
|
||||
|
||||
|
@ -83,15 +78,6 @@ enum {
|
|||
EVENT_ENTER_POWER_SAVE_SUCCESS,
|
||||
};
|
||||
|
||||
struct event_debug_report {
|
||||
u8 debug_event_id;
|
||||
u8 num_params;
|
||||
__le16 pad;
|
||||
__le32 report_1;
|
||||
__le32 report_2;
|
||||
__le32 report_3;
|
||||
} __packed;
|
||||
|
||||
#define NUM_OF_RSSI_SNR_TRIGGERS 8
|
||||
|
||||
struct event_mailbox {
|
||||
|
@ -100,49 +86,45 @@ struct event_mailbox {
|
|||
__le32 reserved_1;
|
||||
__le32 reserved_2;
|
||||
|
||||
u8 dbg_event_id;
|
||||
u8 num_relevant_params;
|
||||
__le16 reserved_3;
|
||||
__le32 event_report_p1;
|
||||
__le32 event_report_p2;
|
||||
__le32 event_report_p3;
|
||||
|
||||
u8 number_of_scan_results;
|
||||
u8 scan_tag;
|
||||
u8 reserved_4[2];
|
||||
__le32 compl_scheduled_scan_status;
|
||||
u8 completed_scan_status;
|
||||
u8 reserved_3;
|
||||
|
||||
__le16 scheduled_scan_attended_channels;
|
||||
u8 soft_gemini_sense_info;
|
||||
u8 soft_gemini_protective_info;
|
||||
s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
|
||||
u8 channel_switch_status;
|
||||
u8 scheduled_scan_status;
|
||||
u8 ps_status;
|
||||
/* tuned channel (roc) */
|
||||
u8 roc_channel;
|
||||
|
||||
/* AP FW only */
|
||||
u8 hlid_removed;
|
||||
__le16 hlid_removed_bitmap;
|
||||
|
||||
/* a bitmap of hlids for stations that have been inactive too long */
|
||||
/* bitmap of aged stations (by HLID) */
|
||||
__le16 sta_aging_status;
|
||||
|
||||
/* a bitmap of hlids for stations which didn't respond to TX */
|
||||
/* bitmap of stations (by HLID) which exceeded max tx retries */
|
||||
__le16 sta_tx_retry_exceeded;
|
||||
|
||||
/*
|
||||
* Bitmap, Each bit set represents the Role ID for which this constraint
|
||||
* is set. Range: 0 - FF, FF means ANY role
|
||||
*/
|
||||
u8 ba_role_id;
|
||||
/*
|
||||
* Bitmap, Each bit set represents the Link ID for which this constraint
|
||||
* is set. Not applicable if ba_role_id is set to ANY role (FF).
|
||||
* Range: 0 - FFFF, FFFF means ANY link in that role
|
||||
*/
|
||||
u8 ba_link_id;
|
||||
u8 ba_allowed;
|
||||
/* discovery completed results */
|
||||
u8 discovery_tag;
|
||||
u8 number_of_preq_results;
|
||||
u8 number_of_prsp_results;
|
||||
u8 reserved_5;
|
||||
|
||||
u8 reserved_5[21];
|
||||
/* rx ba constraint */
|
||||
u8 role_id; /* 0xFF means any role. */
|
||||
u8 rx_ba_allowed;
|
||||
u8 reserved_6[2];
|
||||
|
||||
u8 ps_poll_delivery_failure_role_ids;
|
||||
u8 stopped_role_ids;
|
||||
u8 started_role_ids;
|
||||
u8 change_auto_mode_timeout;
|
||||
|
||||
u8 reserved_7[12];
|
||||
} __packed;
|
||||
|
||||
int wl1271_event_unmask(struct wl1271 *wl);
|
||||
|
|
|
@ -39,13 +39,13 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
|
|||
|
||||
/* send empty templates for fw memory reservation */
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
|
||||
WL1271_CMD_TEMPL_MAX_SIZE,
|
||||
WL1271_CMD_TEMPL_DFLT_SIZE,
|
||||
0, WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
|
||||
NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
|
||||
NULL, WL1271_CMD_TEMPL_DFLT_SIZE, 0,
|
||||
WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -70,15 +70,13 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
|
|||
return ret;
|
||||
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
|
||||
sizeof
|
||||
(struct wl12xx_probe_resp_template),
|
||||
WL1271_CMD_TEMPL_DFLT_SIZE,
|
||||
0, WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
|
||||
sizeof
|
||||
(struct wl12xx_beacon_template),
|
||||
WL1271_CMD_TEMPL_DFLT_SIZE,
|
||||
0, WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -92,7 +90,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
|
|||
|
||||
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
|
||||
WL1271_CMD_TEMPL_MAX_SIZE, i,
|
||||
WL1271_CMD_TEMPL_DFLT_SIZE, i,
|
||||
WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -191,15 +189,13 @@ static int wl1271_ap_init_templates_config(struct wl1271 *wl)
|
|||
* reserve memory for later.
|
||||
*/
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
|
||||
sizeof
|
||||
(struct wl12xx_probe_resp_template),
|
||||
WL1271_CMD_TEMPL_MAX_SIZE,
|
||||
0, WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
|
||||
sizeof
|
||||
(struct wl12xx_beacon_template),
|
||||
WL1271_CMD_TEMPL_MAX_SIZE,
|
||||
0, WL1271_RATE_AUTOMATIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -227,7 +223,7 @@ static int wl1271_ap_init_templates_config(struct wl1271 *wl)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
|
||||
static int wl12xx_init_rx_config(struct wl1271 *wl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -235,10 +231,6 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wl1271_acx_rx_config(wl, config, filter);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -285,10 +277,7 @@ int wl1271_init_pta(struct wl1271 *wl)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS)
|
||||
ret = wl1271_acx_ap_sg_cfg(wl);
|
||||
else
|
||||
ret = wl1271_acx_sta_sg_cfg(wl);
|
||||
ret = wl12xx_acx_sg_cfg(wl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -392,7 +381,7 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wl1271_acx_sta_mem_cfg(wl);
|
||||
ret = wl12xx_acx_mem_cfg(wl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -408,12 +397,6 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
|
|||
{
|
||||
int ret, i;
|
||||
|
||||
ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
|
||||
if (ret < 0) {
|
||||
wl1271_warning("couldn't set default key");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* disable all keep-alive templates */
|
||||
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
|
||||
ret = wl1271_acx_keep_alive_config(wl, i,
|
||||
|
@ -451,7 +434,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wl1271_acx_ap_mem_cfg(wl);
|
||||
ret = wl12xx_acx_mem_cfg(wl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -483,7 +466,7 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
|
|||
* when operating as AP we want to receive external beacons for
|
||||
* configuring ERP protection.
|
||||
*/
|
||||
ret = wl1271_acx_set_ap_beacon_filter(wl, false);
|
||||
ret = wl1271_acx_beacon_filter_opt(wl, false);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -532,6 +515,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
|
|||
else
|
||||
supported_rates = CONF_TX_AP_ENABLED_RATES;
|
||||
|
||||
/* unconditionally enable HT rates */
|
||||
supported_rates |= CONF_TX_MCS_RATES;
|
||||
|
||||
/* configure unicast TX rate classes */
|
||||
for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
|
||||
rc.enabled_rates = supported_rates;
|
||||
|
@ -546,41 +532,24 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void wl1271_check_ba_support(struct wl1271 *wl)
|
||||
{
|
||||
/* validate FW cose ver x.x.x.50-60.x */
|
||||
if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
|
||||
(wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
|
||||
wl->ba_support = true;
|
||||
return;
|
||||
}
|
||||
|
||||
wl->ba_support = false;
|
||||
}
|
||||
|
||||
static int wl1271_set_ba_policies(struct wl1271 *wl)
|
||||
{
|
||||
u8 tid_index;
|
||||
int ret = 0;
|
||||
|
||||
/* Reset the BA RX indicators */
|
||||
wl->ba_rx_bitmap = 0;
|
||||
wl->ba_allowed = true;
|
||||
wl->ba_rx_session_count = 0;
|
||||
|
||||
/* validate that FW support BA */
|
||||
wl1271_check_ba_support(wl);
|
||||
/* BA is supported in STA/AP modes */
|
||||
if (wl->bss_type != BSS_TYPE_AP_BSS &&
|
||||
wl->bss_type != BSS_TYPE_STA_BSS) {
|
||||
wl->ba_support = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (wl->ba_support)
|
||||
/* 802.11n initiator BA session setting */
|
||||
for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
|
||||
++tid_index) {
|
||||
ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
|
||||
tid_index, true);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
wl->ba_support = true;
|
||||
|
||||
return ret;
|
||||
/* 802.11n initiator BA session setting */
|
||||
return wl12xx_acx_set_ba_initiator_policy(wl);
|
||||
}
|
||||
|
||||
int wl1271_chip_specific_init(struct wl1271 *wl)
|
||||
|
@ -650,11 +619,7 @@ int wl1271_hw_init(struct wl1271 *wl)
|
|||
return ret;
|
||||
|
||||
/* RX config */
|
||||
ret = wl1271_init_rx_config(wl,
|
||||
RX_CFG_PROMISCUOUS | RX_CFG_TSF,
|
||||
RX_FILTER_OPTION_DEF);
|
||||
/* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
|
||||
RX_FILTER_OPTION_FILTER_ALL); */
|
||||
ret = wl12xx_init_rx_config(wl);
|
||||
if (ret < 0)
|
||||
goto out_free_memmap;
|
||||
|
||||
|
@ -733,6 +698,10 @@ int wl1271_hw_init(struct wl1271 *wl)
|
|||
if (ret < 0)
|
||||
goto out_free_memmap;
|
||||
|
||||
ret = wl12xx_acx_set_rate_mgmt_params(wl);
|
||||
if (ret < 0)
|
||||
goto out_free_memmap;
|
||||
|
||||
/* Configure initiator BA sessions policies */
|
||||
ret = wl1271_set_ba_policies(wl);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -186,6 +186,5 @@ int wl1271_free_hw(struct wl1271 *wl);
|
|||
irqreturn_t wl1271_irq(int irq, void *data);
|
||||
bool wl1271_set_block_size(struct wl1271 *wl);
|
||||
int wl1271_tx_dummy_packet(struct wl1271 *wl);
|
||||
void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters);
|
||||
|
||||
#endif
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -226,8 +226,8 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
|
|||
if (test_bit(hlid, &wl->ap_ps_map))
|
||||
return;
|
||||
|
||||
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
|
||||
"clean_queues %d", hlid, wl->links[hlid].allocated_blks,
|
||||
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
|
||||
"clean_queues %d", hlid, wl->links[hlid].allocated_pkts,
|
||||
clean_queues);
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -296,81 +296,6 @@
|
|||
===============================================*/
|
||||
#define REG_EVENT_MAILBOX_PTR (SCR_PAD1)
|
||||
|
||||
|
||||
/* Misc */
|
||||
|
||||
#define REG_ENABLE_TX_RX (ENABLE)
|
||||
/*
|
||||
* Rx configuration (filter) information element
|
||||
* ---------------------------------------------
|
||||
*/
|
||||
#define REG_RX_CONFIG (RX_CFG)
|
||||
#define REG_RX_FILTER (RX_FILTER_CFG)
|
||||
|
||||
|
||||
#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002
|
||||
|
||||
/* promiscuous - receives all valid frames */
|
||||
#define RX_CFG_PROMISCUOUS 0x0008
|
||||
|
||||
/* receives frames from any BSSID */
|
||||
#define RX_CFG_BSSID 0x0020
|
||||
|
||||
/* receives frames destined to any MAC address */
|
||||
#define RX_CFG_MAC 0x0010
|
||||
|
||||
#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010
|
||||
#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000
|
||||
#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020
|
||||
#define RX_CFG_ENABLE_ANY_BSSID 0x0000
|
||||
|
||||
/* discards all broadcast frames */
|
||||
#define RX_CFG_DISABLE_BCAST 0x0200
|
||||
|
||||
#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400
|
||||
#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800
|
||||
#define RX_CFG_COPY_RX_STATUS 0x2000
|
||||
#define RX_CFG_TSF 0x10000
|
||||
|
||||
#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
|
||||
RX_CFG_ENABLE_ONLY_MY_BSSID)
|
||||
|
||||
#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
|
||||
| RX_CFG_ENABLE_ANY_BSSID)
|
||||
|
||||
#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
|
||||
RX_CFG_ENABLE_ANY_BSSID)
|
||||
|
||||
#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
|
||||
| RX_CFG_ENABLE_ONLY_MY_BSSID)
|
||||
|
||||
#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \
|
||||
| RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \
|
||||
| RX_CFG_COPY_RX_STATUS | RX_CFG_TSF)
|
||||
|
||||
#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC)
|
||||
|
||||
#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \
|
||||
RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
|
||||
|
||||
#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \
|
||||
RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
|
||||
|
||||
#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
|
||||
| CFG_RX_CTL_EN | CFG_RX_BCN_EN\
|
||||
| CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
|
||||
|
||||
#define RX_FILTER_OPTION_FILTER_ALL 0
|
||||
|
||||
#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\
|
||||
| CFG_RX_RCTS_ACK | CFG_RX_BCN_EN)
|
||||
|
||||
#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
|
||||
| CFG_RX_BCN_EN | CFG_RX_AUTH_EN\
|
||||
| CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\
|
||||
| CFG_RX_PRSP_EN)
|
||||
|
||||
|
||||
/*===============================================
|
||||
EEPROM Read/Write Request 32bit RW
|
||||
------------------------------------------
|
||||
|
|
|
@ -30,20 +30,28 @@
|
|||
#include "rx.h"
|
||||
#include "io.h"
|
||||
|
||||
static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
|
||||
static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
|
||||
u32 drv_rx_counter)
|
||||
{
|
||||
return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
|
||||
RX_MEM_BLOCK_MASK;
|
||||
}
|
||||
|
||||
static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
|
||||
static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status,
|
||||
u32 drv_rx_counter)
|
||||
{
|
||||
return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
|
||||
RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
|
||||
}
|
||||
|
||||
static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status,
|
||||
u32 drv_rx_counter)
|
||||
{
|
||||
/* Convert the value to bool */
|
||||
return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
|
||||
RX_BUF_UNALIGNED_PAYLOAD);
|
||||
}
|
||||
|
||||
static void wl1271_rx_status(struct wl1271 *wl,
|
||||
struct wl1271_rx_descriptor *desc,
|
||||
struct ieee80211_rx_status *status,
|
||||
|
@ -89,7 +97,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
|
|||
}
|
||||
}
|
||||
|
||||
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
|
||||
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
|
||||
bool unaligned)
|
||||
{
|
||||
struct wl1271_rx_descriptor *desc;
|
||||
struct sk_buff *skb;
|
||||
|
@ -97,6 +106,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
|
|||
u8 *buf;
|
||||
u8 beacon = 0;
|
||||
u8 is_data = 0;
|
||||
u8 reserved = unaligned ? NET_IP_ALIGN : 0;
|
||||
|
||||
/*
|
||||
* In PLT mode we seem to get frames and mac80211 warns about them,
|
||||
|
@ -131,17 +141,25 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
skb = __dev_alloc_skb(length, GFP_KERNEL);
|
||||
/* skb length not included rx descriptor */
|
||||
skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
wl1271_error("Couldn't allocate RX frame");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
buf = skb_put(skb, length);
|
||||
memcpy(buf, data, length);
|
||||
/* reserve the unaligned payload(if any) */
|
||||
skb_reserve(skb, reserved);
|
||||
|
||||
/* now we pull the descriptor out of the buffer */
|
||||
skb_pull(skb, sizeof(*desc));
|
||||
buf = skb_put(skb, length - sizeof(*desc));
|
||||
|
||||
/*
|
||||
* Copy packets from aggregation buffer to the skbs without rx
|
||||
* descriptor and with packet payload aligned care. In case of unaligned
|
||||
* packets copy the packets in offset of 2 bytes guarantee IP header
|
||||
* payload aligned to 4 bytes.
|
||||
*/
|
||||
memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
if (ieee80211_is_beacon(hdr->frame_control))
|
||||
|
@ -163,7 +181,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
|
|||
return is_data;
|
||||
}
|
||||
|
||||
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
|
||||
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
|
||||
{
|
||||
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
|
||||
u32 buf_size;
|
||||
|
@ -175,12 +193,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
|
|||
u32 pkt_offset;
|
||||
bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
|
||||
bool had_data = false;
|
||||
bool unaligned = false;
|
||||
|
||||
while (drv_rx_counter != fw_rx_counter) {
|
||||
buf_size = 0;
|
||||
rx_counter = drv_rx_counter;
|
||||
while (rx_counter != fw_rx_counter) {
|
||||
pkt_length = wl1271_rx_get_buf_size(status, rx_counter);
|
||||
pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
|
||||
if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
|
||||
break;
|
||||
buf_size += pkt_length;
|
||||
|
@ -199,7 +218,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
|
|||
* For aggregated packets, only the first memory block
|
||||
* should be retrieved. The FW takes care of the rest.
|
||||
*/
|
||||
mem_block = wl1271_rx_get_mem_block(status,
|
||||
mem_block = wl12xx_rx_get_mem_block(status,
|
||||
drv_rx_counter);
|
||||
|
||||
wl->rx_mem_pool_addr.addr = (mem_block << 8) +
|
||||
|
@ -220,8 +239,12 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
|
|||
/* Split data into separate packets */
|
||||
pkt_offset = 0;
|
||||
while (pkt_offset < buf_size) {
|
||||
pkt_length = wl1271_rx_get_buf_size(status,
|
||||
pkt_length = wl12xx_rx_get_buf_size(status,
|
||||
drv_rx_counter);
|
||||
|
||||
unaligned = wl12xx_rx_get_unaligned(status,
|
||||
drv_rx_counter);
|
||||
|
||||
/*
|
||||
* the handle data call can only fail in memory-outage
|
||||
* conditions, in that case the received frame will just
|
||||
|
@ -229,7 +252,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
|
|||
*/
|
||||
if (wl1271_rx_handle_data(wl,
|
||||
wl->aggr_buf + pkt_offset,
|
||||
pkt_length) == 1)
|
||||
pkt_length, unaligned) == 1)
|
||||
had_data = true;
|
||||
|
||||
wl->rx_counter++;
|
||||
|
@ -260,14 +283,3 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
|
|||
jiffies + msecs_to_jiffies(timeout));
|
||||
}
|
||||
}
|
||||
|
||||
void wl1271_set_default_filters(struct wl1271 *wl)
|
||||
{
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS) {
|
||||
wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
|
||||
wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
|
||||
} else {
|
||||
wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
|
||||
wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,16 +86,18 @@
|
|||
* Bits 3-5 - process_id tag (AP mode FW)
|
||||
* Bits 6-7 - reserved
|
||||
*/
|
||||
#define WL1271_RX_DESC_STATUS_MASK 0x07
|
||||
#define WL1271_RX_DESC_STATUS_MASK 0x03
|
||||
|
||||
#define WL1271_RX_DESC_SUCCESS 0x00
|
||||
#define WL1271_RX_DESC_DECRYPT_FAIL 0x01
|
||||
#define WL1271_RX_DESC_MIC_FAIL 0x02
|
||||
#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03
|
||||
|
||||
#define RX_MEM_BLOCK_MASK 0xFF
|
||||
#define RX_BUF_SIZE_MASK 0xFFF00
|
||||
#define RX_BUF_SIZE_SHIFT_DIV 6
|
||||
#define RX_MEM_BLOCK_MASK 0xFF
|
||||
#define RX_BUF_SIZE_MASK 0xFFF00
|
||||
#define RX_BUF_SIZE_SHIFT_DIV 6
|
||||
/* If set, the start of IP payload is not 4 bytes aligned */
|
||||
#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
|
||||
|
||||
enum {
|
||||
WL12XX_RX_CLASS_UNKNOWN,
|
||||
|
@ -119,16 +121,12 @@ struct wl1271_rx_descriptor {
|
|||
u8 snr;
|
||||
__le32 timestamp;
|
||||
u8 packet_class;
|
||||
union {
|
||||
u8 process_id; /* STA FW */
|
||||
u8 hlid; /* AP FW */
|
||||
} __packed;
|
||||
u8 hlid;
|
||||
u8 pad_len;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
|
||||
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status);
|
||||
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
|
||||
void wl1271_set_default_filters(struct wl1271 *wl);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,6 +33,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
|
|||
{
|
||||
struct delayed_work *dwork;
|
||||
struct wl1271 *wl;
|
||||
int ret;
|
||||
bool is_sta, is_ibss;
|
||||
|
||||
dwork = container_of(work, struct delayed_work, work);
|
||||
wl = container_of(dwork, struct wl1271, scan_complete_work);
|
||||
|
@ -50,21 +52,34 @@ void wl1271_scan_complete_work(struct work_struct *work)
|
|||
wl->scan.state = WL1271_SCAN_STATE_IDLE;
|
||||
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
|
||||
wl->scan.req = NULL;
|
||||
ieee80211_scan_completed(wl->hw, false);
|
||||
|
||||
/* restore hardware connection monitoring template */
|
||||
ret = wl1271_ps_elp_wakeup(wl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
|
||||
if (wl1271_ps_elp_wakeup(wl) == 0) {
|
||||
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
|
||||
wl1271_ps_elp_sleep(wl);
|
||||
}
|
||||
/* restore hardware connection monitoring template */
|
||||
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
|
||||
}
|
||||
|
||||
/* return to ROC if needed */
|
||||
is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
|
||||
is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
|
||||
if ((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
|
||||
(is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) {
|
||||
/* restore remain on channel */
|
||||
wl12xx_cmd_role_start_dev(wl);
|
||||
wl12xx_roc(wl, wl->dev_role_id);
|
||||
}
|
||||
wl1271_ps_elp_sleep(wl);
|
||||
|
||||
if (wl->scan.failed) {
|
||||
wl1271_info("Scan completed due to error.");
|
||||
wl12xx_queue_recovery_work(wl);
|
||||
}
|
||||
|
||||
ieee80211_scan_completed(wl->hw, false);
|
||||
|
||||
out:
|
||||
mutex_unlock(&wl->mutex);
|
||||
|
||||
|
@ -156,6 +171,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
|
|||
if (passive || wl->scan.req->n_ssids == 0)
|
||||
scan_options |= WL1271_SCAN_OPT_PASSIVE;
|
||||
|
||||
if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
cmd->params.role_id = wl->role_id;
|
||||
cmd->params.scan_options = cpu_to_le16(scan_options);
|
||||
|
||||
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
|
||||
|
@ -167,10 +187,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
|
|||
}
|
||||
|
||||
cmd->params.tx_rate = cpu_to_le32(basic_rate);
|
||||
cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
|
||||
cmd->params.rx_filter_options =
|
||||
cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
|
||||
|
||||
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
|
||||
cmd->params.tx_rate = cpu_to_le32(basic_rate);
|
||||
cmd->params.tid_trigger = 0;
|
||||
|
@ -186,6 +202,8 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
|
|||
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
|
||||
}
|
||||
|
||||
memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
|
||||
|
||||
ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
|
||||
wl->scan.req->ie, wl->scan.req->ie_len,
|
||||
band);
|
||||
|
|
|
@ -46,7 +46,10 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl);
|
|||
#define WL1271_SCAN_CURRENT_TX_PWR 0
|
||||
#define WL1271_SCAN_OPT_ACTIVE 0
|
||||
#define WL1271_SCAN_OPT_PASSIVE 1
|
||||
#define WL1271_SCAN_OPT_TRIGGERED_SCAN 2
|
||||
#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
|
||||
/* scan even if we fail to enter psm */
|
||||
#define WL1271_SCAN_OPT_FORCE 8
|
||||
#define WL1271_SCAN_BAND_2_4_GHZ 0
|
||||
#define WL1271_SCAN_BAND_5_GHZ 1
|
||||
|
||||
|
@ -62,27 +65,27 @@ enum {
|
|||
};
|
||||
|
||||
struct basic_scan_params {
|
||||
__le32 rx_config_options;
|
||||
__le32 rx_filter_options;
|
||||
/* Scan option flags (WL1271_SCAN_OPT_*) */
|
||||
__le16 scan_options;
|
||||
u8 role_id;
|
||||
/* Number of scan channels in the list (maximum 30) */
|
||||
u8 n_ch;
|
||||
/* This field indicates the number of probe requests to send
|
||||
per channel for an active scan */
|
||||
u8 n_probe_reqs;
|
||||
/* Rate bit field for sending the probes */
|
||||
__le32 tx_rate;
|
||||
u8 tid_trigger;
|
||||
u8 ssid_len;
|
||||
/* in order to align */
|
||||
u8 padding1[2];
|
||||
u8 use_ssid_list;
|
||||
|
||||
/* Rate bit field for sending the probes */
|
||||
__le32 tx_rate;
|
||||
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
/* Band to scan */
|
||||
u8 band;
|
||||
u8 use_ssid_list;
|
||||
|
||||
u8 scan_tag;
|
||||
u8 padding2;
|
||||
u8 padding2[2];
|
||||
} __packed;
|
||||
|
||||
struct basic_scan_channel_params {
|
||||
|
@ -105,6 +108,10 @@ struct wl1271_cmd_scan {
|
|||
|
||||
struct basic_scan_params params;
|
||||
struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
|
||||
|
||||
/* src mac address */
|
||||
u8 addr[ETH_ALEN];
|
||||
u8 padding[2];
|
||||
} __packed;
|
||||
|
||||
struct wl1271_cmd_trigger_scan_to {
|
||||
|
@ -184,7 +191,7 @@ struct wl1271_cmd_sched_scan_config {
|
|||
} __packed;
|
||||
|
||||
|
||||
#define SCHED_SCAN_MAX_SSIDS 8
|
||||
#define SCHED_SCAN_MAX_SSIDS 16
|
||||
|
||||
enum {
|
||||
SCAN_SSID_TYPE_PUBLIC = 0,
|
||||
|
|
|
@ -412,7 +412,5 @@ module_exit(wl1271_exit);
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
|
||||
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
|
||||
MODULE_FIRMWARE(WL1271_FW_NAME);
|
||||
MODULE_FIRMWARE(WL127X_FW_NAME);
|
||||
MODULE_FIRMWARE(WL128X_FW_NAME);
|
||||
MODULE_FIRMWARE(WL127X_AP_FW_NAME);
|
||||
MODULE_FIRMWARE(WL128X_AP_FW_NAME);
|
||||
|
|
|
@ -193,7 +193,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
|
|||
ret = request_firmware(&fw, WL128X_FW_NAME,
|
||||
wl1271_wl_to_dev(wl));
|
||||
else
|
||||
ret = request_firmware(&fw, WL1271_FW_NAME,
|
||||
ret = request_firmware(&fw, WL127X_FW_NAME,
|
||||
wl1271_wl_to_dev(wl));
|
||||
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -486,8 +486,6 @@ module_exit(wl1271_exit);
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
|
||||
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
|
||||
MODULE_FIRMWARE(WL1271_FW_NAME);
|
||||
MODULE_FIRMWARE(WL127X_FW_NAME);
|
||||
MODULE_FIRMWARE(WL128X_FW_NAME);
|
||||
MODULE_FIRMWARE(WL127X_AP_FW_NAME);
|
||||
MODULE_FIRMWARE(WL128X_AP_FW_NAME);
|
||||
MODULE_ALIAS("spi:wl1271");
|
||||
|
|
|
@ -37,9 +37,10 @@ static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
|
|||
bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
|
||||
|
||||
if (is_ap)
|
||||
ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
|
||||
ret = wl12xx_cmd_set_default_wep_key(wl, id,
|
||||
wl->ap_bcast_hlid);
|
||||
else
|
||||
ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
|
||||
ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -77,6 +78,7 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr;
|
||||
int ret;
|
||||
|
||||
hdr = (struct ieee80211_hdr *)(skb->data +
|
||||
sizeof(struct wl1271_tx_hw_descr));
|
||||
|
@ -90,9 +92,19 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
|
|||
if (!ieee80211_is_auth(hdr->frame_control))
|
||||
return 0;
|
||||
|
||||
wl1271_configure_filters(wl, FIF_OTHER_BSS);
|
||||
if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
|
||||
goto out;
|
||||
|
||||
return wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
|
||||
wl1271_debug(DEBUG_CMD, "starting device role for roaming");
|
||||
ret = wl12xx_cmd_role_start_dev(wl);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = wl12xx_roc(wl, wl->dev_role_id);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
|
||||
|
@ -114,24 +126,29 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
|
|||
static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
|
||||
{
|
||||
bool fw_ps;
|
||||
u8 tx_blks;
|
||||
u8 tx_pkts;
|
||||
|
||||
/* only regulate station links */
|
||||
if (hlid < WL1271_AP_STA_HLID_START)
|
||||
return;
|
||||
|
||||
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
|
||||
tx_blks = wl->links[hlid].allocated_blks;
|
||||
tx_pkts = wl->links[hlid].allocated_pkts;
|
||||
|
||||
/*
|
||||
* if in FW PS and there is enough data in FW we can put the link
|
||||
* into high-level PS and clean out its TX queues.
|
||||
*/
|
||||
if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
|
||||
if (fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
|
||||
wl1271_ps_link_start(wl, hlid, true);
|
||||
}
|
||||
|
||||
u8 wl1271_tx_get_hlid(struct sk_buff *skb)
|
||||
static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
|
||||
{
|
||||
return wl->dummy_packet == skb;
|
||||
}
|
||||
|
||||
u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
|
||||
|
||||
|
@ -144,14 +161,32 @@ u8 wl1271_tx_get_hlid(struct sk_buff *skb)
|
|||
} else {
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
||||
if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
|
||||
return wl->system_hlid;
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
if (ieee80211_is_mgmt(hdr->frame_control))
|
||||
return WL1271_AP_GLOBAL_HLID;
|
||||
return wl->ap_global_hlid;
|
||||
else
|
||||
return WL1271_AP_BROADCAST_HLID;
|
||||
return wl->ap_bcast_hlid;
|
||||
}
|
||||
}
|
||||
|
||||
static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
|
||||
{
|
||||
if (wl12xx_is_dummy_packet(wl, skb))
|
||||
return wl->system_hlid;
|
||||
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS)
|
||||
return wl12xx_tx_get_hlid_ap(wl, skb);
|
||||
|
||||
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
|
||||
test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))
|
||||
return wl->sta_hlid;
|
||||
else
|
||||
return wl->dev_hlid;
|
||||
}
|
||||
|
||||
static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
|
||||
unsigned int packet_length)
|
||||
{
|
||||
|
@ -169,12 +204,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
|
|||
u32 len;
|
||||
u32 total_blocks;
|
||||
int id, ret = -EBUSY, ac;
|
||||
u32 spare_blocks;
|
||||
|
||||
if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
|
||||
spare_blocks = 2;
|
||||
else
|
||||
spare_blocks = 1;
|
||||
/* we use 1 spare block */
|
||||
u32 spare_blocks = 1;
|
||||
|
||||
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
|
||||
return -EAGAIN;
|
||||
|
@ -206,12 +238,14 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
|
|||
desc->id = id;
|
||||
|
||||
wl->tx_blocks_available -= total_blocks;
|
||||
wl->tx_allocated_blocks += total_blocks;
|
||||
|
||||
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
|
||||
wl->tx_allocated_blocks[ac] += total_blocks;
|
||||
wl->tx_allocated_pkts[ac]++;
|
||||
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS)
|
||||
wl->links[hlid].allocated_blks += total_blocks;
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS &&
|
||||
hlid >= WL1271_AP_STA_HLID_START)
|
||||
wl->links[hlid].allocated_pkts++;
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
@ -225,11 +259,6 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
|
||||
{
|
||||
return wl->dummy_packet == skb;
|
||||
}
|
||||
|
||||
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
|
||||
u32 extra, struct ieee80211_tx_info *control,
|
||||
u8 hlid)
|
||||
|
@ -280,9 +309,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
|
|||
wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
|
||||
}
|
||||
|
||||
if (wl->bss_type != BSS_TYPE_AP_BSS) {
|
||||
desc->aid = hlid;
|
||||
desc->hlid = hlid;
|
||||
|
||||
if (wl->bss_type != BSS_TYPE_AP_BSS) {
|
||||
/* if the packets are destined for AP (have a STA entry)
|
||||
send them with AP rate policies, otherwise use default
|
||||
basic rates */
|
||||
|
@ -291,18 +320,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
|
|||
else
|
||||
rate_idx = ACX_TX_BASIC_RATE;
|
||||
} else {
|
||||
desc->hlid = hlid;
|
||||
switch (hlid) {
|
||||
case WL1271_AP_GLOBAL_HLID:
|
||||
if (hlid == wl->ap_global_hlid)
|
||||
rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
|
||||
break;
|
||||
case WL1271_AP_BROADCAST_HLID:
|
||||
else if (hlid == wl->ap_bcast_hlid)
|
||||
rate_idx = ACX_TX_AP_MODE_BCST_RATE;
|
||||
break;
|
||||
default:
|
||||
else
|
||||
rate_idx = ac;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
|
||||
|
@ -376,10 +399,11 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
if (wl->bss_type == BSS_TYPE_AP_BSS)
|
||||
hlid = wl1271_tx_get_hlid(skb);
|
||||
else
|
||||
hlid = TX_HW_DEFAULT_AID;
|
||||
hlid = wl1271_tx_get_hlid(wl, skb);
|
||||
if (hlid == WL12XX_INVALID_LINK_ID) {
|
||||
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
|
||||
if (ret < 0)
|
||||
|
@ -462,20 +486,24 @@ void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
|
|||
static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
|
||||
struct sk_buff_head *queues)
|
||||
{
|
||||
int i, q = -1;
|
||||
u32 min_blks = 0xffffffff;
|
||||
int i, q = -1, ac;
|
||||
u32 min_pkts = 0xffffffff;
|
||||
|
||||
/*
|
||||
* Find a non-empty ac where:
|
||||
* 1. There are packets to transmit
|
||||
* 2. The FW has the least allocated blocks
|
||||
*
|
||||
* We prioritize the ACs according to VO>VI>BE>BK
|
||||
*/
|
||||
for (i = 0; i < NUM_TX_QUEUES; i++)
|
||||
if (!skb_queue_empty(&queues[i]) &&
|
||||
(wl->tx_allocated_blocks[i] < min_blks)) {
|
||||
q = i;
|
||||
min_blks = wl->tx_allocated_blocks[q];
|
||||
for (i = 0; i < NUM_TX_QUEUES; i++) {
|
||||
ac = wl1271_tx_get_queue(i);
|
||||
if (!skb_queue_empty(&queues[ac]) &&
|
||||
(wl->tx_allocated_pkts[ac] < min_pkts)) {
|
||||
q = ac;
|
||||
min_pkts = wl->tx_allocated_pkts[q];
|
||||
}
|
||||
}
|
||||
|
||||
if (q == -1)
|
||||
return NULL;
|
||||
|
@ -579,7 +607,7 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
|
|||
if (wl12xx_is_dummy_packet(wl, skb)) {
|
||||
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
|
||||
} else if (wl->bss_type == BSS_TYPE_AP_BSS) {
|
||||
u8 hlid = wl1271_tx_get_hlid(skb);
|
||||
u8 hlid = wl1271_tx_get_hlid(wl, skb);
|
||||
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
|
||||
|
||||
/* make sure we dequeue the same packet next time */
|
||||
|
@ -826,10 +854,14 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
|
|||
total[i] = 0;
|
||||
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
|
||||
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
info->status.rates[0].idx = -1;
|
||||
info->status.rates[0].count = 0;
|
||||
ieee80211_tx_status_ni(wl->hw, skb);
|
||||
|
||||
if (!wl12xx_is_dummy_packet(wl, skb)) {
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
info->status.rates[0].idx = -1;
|
||||
info->status.rates[0].count = 0;
|
||||
ieee80211_tx_status_ni(wl->hw, skb);
|
||||
}
|
||||
|
||||
total[i]++;
|
||||
}
|
||||
}
|
||||
|
@ -853,8 +885,8 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
|
|||
if (wl->bss_type == BSS_TYPE_AP_BSS) {
|
||||
for (i = 0; i < AP_MAX_LINKS; i++) {
|
||||
wl1271_tx_reset_link_queues(wl, i);
|
||||
wl->links[i].allocated_blks = 0;
|
||||
wl->links[i].prev_freed_blks = 0;
|
||||
wl->links[i].allocated_pkts = 0;
|
||||
wl->links[i].prev_freed_pkts = 0;
|
||||
}
|
||||
|
||||
wl->last_tx_hlid = 0;
|
||||
|
|
|
@ -29,9 +29,6 @@
|
|||
|
||||
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
|
||||
#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
|
||||
/* The chipset reference driver states, that the "aid" value 1
|
||||
* is for infra-BSS, but is still always used */
|
||||
#define TX_HW_DEFAULT_AID 1
|
||||
|
||||
#define TX_HW_ATTR_SAVE_RETRIES BIT(0)
|
||||
#define TX_HW_ATTR_HEADER_PAD BIT(1)
|
||||
|
@ -116,12 +113,8 @@ struct wl1271_tx_hw_descr {
|
|||
u8 id;
|
||||
/* The packet TID value (as User-Priority) */
|
||||
u8 tid;
|
||||
union {
|
||||
/* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
|
||||
u8 aid;
|
||||
/* AP - host link ID (HLID) */
|
||||
u8 hlid;
|
||||
} __packed;
|
||||
/* host link ID (HLID) */
|
||||
u8 hlid;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
|
@ -133,7 +126,8 @@ enum wl1271_tx_hw_res_status {
|
|||
TX_TIMEOUT = 4,
|
||||
TX_KEY_NOT_FOUND = 5,
|
||||
TX_PEER_NOT_FOUND = 6,
|
||||
TX_SESSION_MISMATCH = 7
|
||||
TX_SESSION_MISMATCH = 7,
|
||||
TX_LINK_NOT_VALID = 8,
|
||||
};
|
||||
|
||||
struct wl1271_tx_hw_res_descr {
|
||||
|
@ -216,7 +210,7 @@ void wl1271_tx_flush(struct wl1271 *wl);
|
|||
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
|
||||
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
|
||||
u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
|
||||
u8 wl1271_tx_get_hlid(struct sk_buff *skb);
|
||||
u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
|
||||
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
|
||||
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
|
||||
|
||||
|
|
|
@ -112,28 +112,8 @@ extern u32 wl12xx_debug_level;
|
|||
true); \
|
||||
} while (0)
|
||||
|
||||
#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
|
||||
CFG_BSSID_FILTER_EN | \
|
||||
CFG_MC_FILTER_EN)
|
||||
|
||||
#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
|
||||
CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
|
||||
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
|
||||
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
|
||||
|
||||
#define WL1271_DEFAULT_AP_RX_CONFIG 0
|
||||
|
||||
#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
|
||||
CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
|
||||
CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
|
||||
CFG_RX_ASSOC_EN)
|
||||
|
||||
|
||||
|
||||
#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
|
||||
#define WL128X_FW_NAME "ti-connectivity/wl128x-fw.bin"
|
||||
#define WL127X_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
|
||||
#define WL128X_AP_FW_NAME "ti-connectivity/wl128x-fw-ap.bin"
|
||||
#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
|
||||
#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
|
||||
|
||||
/*
|
||||
* wl127x and wl128x are using the same NVS file name. However, the
|
||||
|
@ -157,25 +137,34 @@ extern u32 wl12xx_debug_level;
|
|||
#define WL1271_DEFAULT_BEACON_INT 100
|
||||
#define WL1271_DEFAULT_DTIM_PERIOD 1
|
||||
|
||||
#define WL1271_AP_GLOBAL_HLID 0
|
||||
#define WL1271_AP_BROADCAST_HLID 1
|
||||
#define WL1271_AP_STA_HLID_START 2
|
||||
#define WL12XX_MAX_ROLES 4
|
||||
#define WL12XX_MAX_LINKS 8
|
||||
#define WL12XX_INVALID_ROLE_ID 0xff
|
||||
#define WL12XX_INVALID_LINK_ID 0xff
|
||||
|
||||
/* Defined by FW as 0. Will not be freed or allocated. */
|
||||
#define WL12XX_SYSTEM_HLID 0
|
||||
|
||||
/*
|
||||
* When in AP-mode, we allow (at least) this number of mem-blocks
|
||||
* TODO: we currently don't support multirole. remove
|
||||
* this constant from the code when we do.
|
||||
*/
|
||||
#define WL1271_AP_STA_HLID_START 3
|
||||
|
||||
/*
|
||||
* When in AP-mode, we allow (at least) this number of packets
|
||||
* to be transmitted to FW for a STA in PS-mode. Only when packets are
|
||||
* present in the FW buffers it will wake the sleeping STA. We want to put
|
||||
* enough packets for the driver to transmit all of its buffered data before
|
||||
* the STA goes to sleep again. But we don't want to take too much mem-blocks
|
||||
* the STA goes to sleep again. But we don't want to take too much memory
|
||||
* as it might hurt the throughput of active STAs.
|
||||
* The number of blocks (18) is enough for 2 large packets.
|
||||
*/
|
||||
#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
|
||||
#define WL1271_PS_STA_MAX_PACKETS 2
|
||||
|
||||
#define WL1271_AP_BSS_INDEX 0
|
||||
#define WL1271_AP_DEF_BEACON_EXP 20
|
||||
|
||||
#define ACX_TX_DESCRIPTORS 32
|
||||
#define ACX_TX_DESCRIPTORS 16
|
||||
|
||||
#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
|
||||
|
||||
|
@ -247,26 +236,22 @@ struct wl1271_stats {
|
|||
|
||||
#define AP_MAX_STATIONS 5
|
||||
|
||||
/* Broadcast and Global links + links to stations */
|
||||
#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
|
||||
/* Broadcast and Global links + system link + links to stations */
|
||||
/*
|
||||
* TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
|
||||
* the places that use this.
|
||||
*/
|
||||
#define AP_MAX_LINKS (AP_MAX_STATIONS + 3)
|
||||
|
||||
/* FW status registers common for AP/STA */
|
||||
struct wl1271_fw_common_status {
|
||||
/* FW status registers */
|
||||
struct wl12xx_fw_status {
|
||||
__le32 intr;
|
||||
u8 fw_rx_counter;
|
||||
u8 drv_rx_counter;
|
||||
u8 reserved;
|
||||
u8 tx_results_counter;
|
||||
__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
|
||||
__le32 tx_released_blks[NUM_TX_QUEUES];
|
||||
__le32 fw_localtime;
|
||||
} __packed;
|
||||
|
||||
/* FW status registers for AP */
|
||||
struct wl1271_fw_ap_status {
|
||||
struct wl1271_fw_common_status common;
|
||||
|
||||
/* Next fields valid only in AP FW */
|
||||
|
||||
/*
|
||||
* A bitmap (where each bit represents a single HLID)
|
||||
|
@ -274,30 +259,30 @@ struct wl1271_fw_ap_status {
|
|||
*/
|
||||
__le32 link_ps_bitmap;
|
||||
|
||||
/* Number of freed MBs per HLID */
|
||||
u8 tx_lnk_free_blks[AP_MAX_LINKS];
|
||||
u8 padding_1[1];
|
||||
} __packed;
|
||||
/*
|
||||
* A bitmap (where each bit represents a single HLID) to indicate
|
||||
* if the station is in Fast mode
|
||||
*/
|
||||
__le32 link_fast_bitmap;
|
||||
|
||||
/* FW status registers for STA */
|
||||
struct wl1271_fw_sta_status {
|
||||
struct wl1271_fw_common_status common;
|
||||
/* Cumulative counter of total released mem blocks since FW-reset */
|
||||
__le32 total_released_blks;
|
||||
|
||||
u8 tx_total;
|
||||
u8 reserved1;
|
||||
__le16 reserved2;
|
||||
/* Size (in Memory Blocks) of TX pool */
|
||||
__le32 tx_total;
|
||||
|
||||
/* Cumulative counter of released packets per AC */
|
||||
u8 tx_released_pkts[NUM_TX_QUEUES];
|
||||
|
||||
/* Cumulative counter of freed packets per HLID */
|
||||
u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
|
||||
|
||||
/* Cumulative counter of released Voice memory blocks */
|
||||
u8 tx_voice_released_blks;
|
||||
u8 padding_1[7];
|
||||
__le32 log_start_addr;
|
||||
} __packed;
|
||||
|
||||
struct wl1271_fw_full_status {
|
||||
union {
|
||||
struct wl1271_fw_common_status common;
|
||||
struct wl1271_fw_sta_status sta;
|
||||
struct wl1271_fw_ap_status ap;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
|
||||
struct wl1271_rx_mem_pool_addr {
|
||||
u32 addr;
|
||||
u32 addr_extra;
|
||||
|
@ -342,7 +327,7 @@ struct wl1271_ap_key {
|
|||
|
||||
enum wl12xx_flags {
|
||||
WL1271_FLAG_STA_ASSOCIATED,
|
||||
WL1271_FLAG_JOINED,
|
||||
WL1271_FLAG_IBSS_JOINED,
|
||||
WL1271_FLAG_GPIO_POWER,
|
||||
WL1271_FLAG_TX_QUEUE_STOPPED,
|
||||
WL1271_FLAG_TX_PENDING,
|
||||
|
@ -369,11 +354,14 @@ struct wl1271_link {
|
|||
/* AP-mode - TX queue per AC in link */
|
||||
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
|
||||
|
||||
/* accounting for allocated / available TX blocks in FW */
|
||||
u8 allocated_blks;
|
||||
u8 prev_freed_blks;
|
||||
/* accounting for allocated / freed packets in FW */
|
||||
u8 allocated_pkts;
|
||||
u8 prev_freed_pkts;
|
||||
|
||||
u8 addr[ETH_ALEN];
|
||||
|
||||
/* bitmap of TIDs where RX BA sessions are active for this link */
|
||||
u8 ba_bitmap;
|
||||
};
|
||||
|
||||
struct wl1271 {
|
||||
|
@ -405,7 +393,6 @@ struct wl1271 {
|
|||
|
||||
u8 *fw;
|
||||
size_t fw_len;
|
||||
u8 fw_bss_type;
|
||||
void *nvs;
|
||||
size_t nvs_len;
|
||||
|
||||
|
@ -418,15 +405,30 @@ struct wl1271 {
|
|||
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
|
||||
u8 ssid_len;
|
||||
int channel;
|
||||
u8 role_id;
|
||||
u8 dev_role_id;
|
||||
u8 system_hlid;
|
||||
u8 sta_hlid;
|
||||
u8 dev_hlid;
|
||||
u8 ap_global_hlid;
|
||||
u8 ap_bcast_hlid;
|
||||
|
||||
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
|
||||
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
|
||||
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
|
||||
|
||||
struct wl1271_acx_mem_map *target_mem_map;
|
||||
|
||||
/* Accounting for allocated / available TX blocks on HW */
|
||||
u32 tx_blocks_freed[NUM_TX_QUEUES];
|
||||
u32 tx_blocks_freed;
|
||||
u32 tx_blocks_available;
|
||||
u32 tx_allocated_blocks[NUM_TX_QUEUES];
|
||||
u32 tx_allocated_blocks;
|
||||
u32 tx_results_count;
|
||||
|
||||
/* Accounting for allocated / available Tx packets in HW */
|
||||
u32 tx_pkts_freed[NUM_TX_QUEUES];
|
||||
u32 tx_allocated_pkts[NUM_TX_QUEUES];
|
||||
|
||||
/* Transmitted TX packets counter for chipset interface */
|
||||
u32 tx_packets_count;
|
||||
|
||||
|
@ -535,10 +537,6 @@ struct wl1271 {
|
|||
struct work_struct rx_streaming_disable_work;
|
||||
struct timer_list rx_streaming_timer;
|
||||
|
||||
unsigned int filters;
|
||||
unsigned int rx_config;
|
||||
unsigned int rx_filter;
|
||||
|
||||
struct completion *elp_compl;
|
||||
struct completion *ps_compl;
|
||||
struct delayed_work elp_work;
|
||||
|
@ -562,7 +560,7 @@ struct wl1271 {
|
|||
u32 buffer_cmd;
|
||||
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
|
||||
|
||||
struct wl1271_fw_full_status *fw_status;
|
||||
struct wl12xx_fw_status *fw_status;
|
||||
struct wl1271_tx_hw_res_if *tx_res_if;
|
||||
|
||||
struct ieee80211_vif *vif;
|
||||
|
@ -622,6 +620,9 @@ struct wl1271 {
|
|||
|
||||
/* Platform limitations */
|
||||
unsigned int platform_quirks;
|
||||
|
||||
/* number of currently active RX BA sessions */
|
||||
int ba_rx_session_count;
|
||||
};
|
||||
|
||||
struct wl1271_station {
|
||||
|
@ -659,12 +660,6 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
|
|||
/* Each RX/TX transaction requires an end-of-transaction transfer */
|
||||
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
|
||||
|
||||
/*
|
||||
* Older firmwares use 2 spare TX blocks
|
||||
* (for STA < 6.1.3.50.58 or for AP < 6.2.0.0.47)
|
||||
*/
|
||||
#define WL12XX_QUIRK_USE_2_SPARE_BLOCKS BIT(1)
|
||||
|
||||
/* WL128X requires aggregated packets to be aligned to the SDIO block size */
|
||||
#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
|
||||
|
||||
|
|
|
@ -105,18 +105,6 @@ struct wl12xx_ie_country {
|
|||
|
||||
/* Templates */
|
||||
|
||||
struct wl12xx_beacon_template {
|
||||
struct ieee80211_header header;
|
||||
__le32 time_stamp[2];
|
||||
__le16 beacon_interval;
|
||||
__le16 capability;
|
||||
struct wl12xx_ie_ssid ssid;
|
||||
struct wl12xx_ie_rates rates;
|
||||
struct wl12xx_ie_rates ext_rates;
|
||||
struct wl12xx_ie_ds_params ds_params;
|
||||
struct wl12xx_ie_country country;
|
||||
} __packed;
|
||||
|
||||
struct wl12xx_null_data_template {
|
||||
struct ieee80211_header header;
|
||||
} __packed;
|
||||
|
@ -146,19 +134,6 @@ struct wl12xx_arp_rsp_template {
|
|||
__be32 target_ip;
|
||||
} __packed;
|
||||
|
||||
|
||||
struct wl12xx_probe_resp_template {
|
||||
struct ieee80211_header header;
|
||||
__le32 time_stamp[2];
|
||||
__le16 beacon_interval;
|
||||
__le16 capability;
|
||||
struct wl12xx_ie_ssid ssid;
|
||||
struct wl12xx_ie_rates rates;
|
||||
struct wl12xx_ie_rates ext_rates;
|
||||
struct wl12xx_ie_ds_params ds_params;
|
||||
struct wl12xx_ie_country country;
|
||||
} __packed;
|
||||
|
||||
struct wl12xx_disconn_template {
|
||||
struct ieee80211_header header;
|
||||
__le16 disconn_reason;
|
||||
|
|
|
@ -1792,6 +1792,7 @@ struct wiphy_wowlan_support {
|
|||
* @debugfsdir: debugfs directory used for this wiphy, will be renamed
|
||||
* automatically on wiphy renames
|
||||
* @dev: (virtual) struct device for this wiphy
|
||||
* @registered: helps synchronize suspend/resume with wiphy unregister
|
||||
* @wext: wireless extension handlers
|
||||
* @priv: driver private data (sized according to wiphy_new() parameter)
|
||||
* @interface_modes: bitmask of interfaces types valid for this wiphy,
|
||||
|
|
|
@ -195,20 +195,12 @@ static ssize_t uapsd_queues_write(struct file *file,
|
|||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ieee80211_local *local = file->private_data;
|
||||
unsigned long val;
|
||||
char buf[10];
|
||||
size_t len;
|
||||
u8 val;
|
||||
int ret;
|
||||
|
||||
len = min(count, sizeof(buf) - 1);
|
||||
if (copy_from_user(buf, user_buf, len))
|
||||
return -EFAULT;
|
||||
buf[len] = '\0';
|
||||
|
||||
ret = strict_strtoul(buf, 0, &val);
|
||||
|
||||
ret = kstrtou8_from_user(user_buf, count, 0, &val);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
|
||||
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
|
||||
return -ERANGE;
|
||||
|
|
|
@ -307,14 +307,14 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
|
|||
|
||||
while (num_skbs--) {
|
||||
skb = __skb_dequeue(&failq);
|
||||
if (copy)
|
||||
if (copy) {
|
||||
cp_skb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (cp_skb)
|
||||
__skb_queue_tail(&failq, cp_skb);
|
||||
}
|
||||
|
||||
prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
|
||||
__skb_queue_tail(&gateq, skb);
|
||||
|
||||
if (copy && cp_skb)
|
||||
__skb_queue_tail(&failq, cp_skb);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
|
||||
|
|
Загрузка…
Ссылка в новой задаче