iwl3945: use iwl_rx_queue in iwl3945

The patch replaces iwl3945_rx_queue with iwl_rx_queue.

Signed-off-by: Abhijeet Kolekar <abhijeet.kolekar@intel.com>
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Abhijeet Kolekar 2008-12-19 10:37:25 +08:00 коммит произвёл John W. Linville
Родитель 6100b58806
Коммит cc2f362c36
3 изменённых файлов: 13 добавлений и 40 удалений

Просмотреть файл

@ -961,7 +961,7 @@ static int iwl3945_nic_set_pwr_src(struct iwl3945_priv *priv, int pwr_max)
return rc;
}
static int iwl3945_rx_init(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
static int iwl3945_rx_init(struct iwl3945_priv *priv, struct iwl_rx_queue *rxq)
{
int rc;
unsigned long flags;
@ -1082,7 +1082,7 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
u8 rev_id;
int rc;
unsigned long flags;
struct iwl3945_rx_queue *rxq = &priv->rxq;
struct iwl_rx_queue *rxq = &priv->rxq;
iwl3945_power_init_handle(priv);

Просмотреть файл

@ -213,33 +213,6 @@ struct iwl3945_host_cmd {
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
/**
* struct iwl3945_rx_queue - Rx queue
* @processed: Internal index to last handled Rx packet
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
* @rx_free: list of free SKBs for use
* @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl3945_rx_queue {
__le32 *bd;
dma_addr_t dma_addr;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 processed;
u32 read;
u32 write;
u32 free_count;
struct list_head rx_free;
struct list_head rx_used;
int need_update;
spinlock_t lock;
};
#define IWL_SUPPORTED_RATES_IE_LEN 8
#define SCAN_INTERVAL 100
@ -333,7 +306,7 @@ extern int iwl3945_power_init_handle(struct iwl3945_priv *priv);
extern int iwl3945_eeprom_init(struct iwl3945_priv *priv);
extern int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv);
extern void iwl3945_rx_queue_reset(struct iwl3945_priv *priv,
struct iwl3945_rx_queue *rxq);
struct iwl_rx_queue *rxq);
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern int iwl3945_tx_queue_init(struct iwl3945_priv *priv,
@ -347,7 +320,7 @@ extern int __must_check iwl3945_send_cmd(struct iwl3945_priv *priv,
extern unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
struct ieee80211_hdr *hdr,int left);
extern int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv,
struct iwl3945_rx_queue *q);
struct iwl_rx_queue *q);
extern int iwl3945_send_statistics_request(struct iwl3945_priv *priv);
extern void iwl3945_set_decrypted_flag(struct iwl3945_priv *priv, struct sk_buff *skb,
u32 decrypt_res,
@ -564,7 +537,7 @@ struct iwl3945_priv {
int activity_timer_active;
/* Rx and Tx DMA processing queues */
struct iwl3945_rx_queue rxq;
struct iwl_rx_queue rxq;
struct iwl3945_tx_queue txq[IWL39_MAX_NUM_QUEUES];
unsigned long status;

Просмотреть файл

@ -3358,7 +3358,7 @@ static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
/**
* iwl3945_rx_queue_space - Return number of free slots available in queue.
*/
static int iwl3945_rx_queue_space(const struct iwl3945_rx_queue *q)
static int iwl3945_rx_queue_space(const struct iwl_rx_queue *q)
{
int s = q->read - q->write;
if (s <= 0)
@ -3373,7 +3373,7 @@ static int iwl3945_rx_queue_space(const struct iwl3945_rx_queue *q)
/**
* iwl3945_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv, struct iwl3945_rx_queue *q)
int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv, struct iwl_rx_queue *q)
{
u32 reg = 0;
int rc = 0;
@ -3438,7 +3438,7 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl3945_priv *priv,
*/
static int iwl3945_rx_queue_restock(struct iwl3945_priv *priv)
{
struct iwl3945_rx_queue *rxq = &priv->rxq;
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
@ -3490,7 +3490,7 @@ static int iwl3945_rx_queue_restock(struct iwl3945_priv *priv)
*/
static void iwl3945_rx_allocate(struct iwl3945_priv *priv)
{
struct iwl3945_rx_queue *rxq = &priv->rxq;
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
@ -3562,7 +3562,7 @@ void iwl3945_rx_replenish(void *data)
* This free routine walks the list of POOL entries and if SKB is set to
* non NULL it is unmapped and freed
*/
static void iwl3945_rx_queue_free(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
static void iwl3945_rx_queue_free(struct iwl3945_priv *priv, struct iwl_rx_queue *rxq)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
@ -3581,7 +3581,7 @@ static void iwl3945_rx_queue_free(struct iwl3945_priv *priv, struct iwl3945_rx_q
int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv)
{
struct iwl3945_rx_queue *rxq = &priv->rxq;
struct iwl_rx_queue *rxq = &priv->rxq;
struct pci_dev *dev = priv->pci_dev;
int i;
@ -3606,7 +3606,7 @@ int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv)
return 0;
}
void iwl3945_rx_queue_reset(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
void iwl3945_rx_queue_reset(struct iwl3945_priv *priv, struct iwl_rx_queue *rxq)
{
unsigned long flags;
int i;
@ -3724,7 +3724,7 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
{
struct iwl_rx_mem_buffer *rxb;
struct iwl_rx_packet *pkt;
struct iwl3945_rx_queue *rxq = &priv->rxq;
struct iwl_rx_queue *rxq = &priv->rxq;
u32 r, i;
int reclaim;
unsigned long flags;