bna: Add Callback to Fix RXQ Stop

Change details:
 - Add a callback in the BNA, which is called before sending FW command to stop
   RxQs. After this callback is called, driver should not post anymore Rx
   buffers to the RxQ. This addresses a small window where driver posts Rx
   buffers while FW is stopping/has stopped the RxQ.
 - Registering callback function, rx_stall_cbfn, during bna_rx_create.
   Invoking callback function, rx_stall_cbfn, before sending rx_cfg_clr
   command to FW
 - Bnad_cb_rx_stall implementation - set a flag in the Rxq to mark buffer
   posting disabled state. While posting buffers check for the above flag.

Signed-off-by: Gurunatha Karaje <gkaraje@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rasesh Mody 2011-09-27 10:39:10 +00:00 коммит произвёл David S. Miller
Родитель aafd5c2c3c
Коммит 5bcf6ac036
4 изменённых файлов: 38 добавлений и 1 удалений

Просмотреть файл

@ -1335,6 +1335,12 @@ do { \
} \ } \
} while (0) } while (0)
#define call_rx_stall_cbfn(rx) \
do { \
if ((rx)->rx_stall_cbfn) \
(rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
} while (0)
#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
do { \ do { \
struct bna_dma_addr cur_q_addr = \ struct bna_dma_addr cur_q_addr = \
@ -1467,6 +1473,7 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
case RX_E_FAIL: case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
bna_rxf_fail(&rx->rxf); bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx); rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break; break;
@ -1476,6 +1483,7 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
case RX_E_RXF_STOPPED: case RX_E_RXF_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
call_rx_stall_cbfn(rx);
bna_rx_enet_stop(rx); bna_rx_enet_stop(rx);
break; break;
@ -1516,6 +1524,7 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
bfa_fsm_set_state(rx, bna_rx_sm_failed); bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_ethport_cb_rx_stopped(&rx->bna->ethport); bna_ethport_cb_rx_stopped(&rx->bna->ethport);
bna_rxf_fail(&rx->rxf); bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx); rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break; break;
@ -1536,6 +1545,7 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
case RX_E_FAIL: case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed); bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_rxf_fail(&rx->rxf); bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx); rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break; break;
@ -2369,6 +2379,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
/* Following callbacks are mandatory */ /* Following callbacks are mandatory */
rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;

Просмотреть файл

@ -847,6 +847,7 @@ struct bna_rx {
void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
@ -864,6 +865,7 @@ struct bna_rx_event_cbfn {
void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
/* Mandatory */ /* Mandatory */
void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);

Просмотреть файл

@ -396,7 +396,7 @@ finishing:
unmap_q->producer_index = unmap_prod; unmap_q->producer_index = unmap_prod;
rcb->producer_index = unmap_prod; rcb->producer_index = unmap_prod;
smp_mb(); smp_mb();
if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
bna_rxq_prod_indx_doorbell(rcb); bna_rxq_prod_indx_doorbell(rcb);
} }
} }
@ -955,6 +955,27 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
bna_tx_cleanup_complete(tx); bna_tx_cleanup_complete(tx);
} }
static void
bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
{
struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
ccb = rx_ctrl->ccb;
if (!ccb)
continue;
clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
if (ccb->rcb[1])
clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
}
}
static void static void
bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
{ {
@ -1009,6 +1030,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
bnad_free_all_rxbufs(bnad, rcb); bnad_free_all_rxbufs(bnad, rcb);
set_bit(BNAD_RXQ_STARTED, &rcb->flags); set_bit(BNAD_RXQ_STARTED, &rcb->flags);
set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
unmap_q = rcb->unmap_q; unmap_q = rcb->unmap_q;
/* Now allocate & post buffers for this RCB */ /* Now allocate & post buffers for this RCB */
@ -1898,6 +1920,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
.rcb_destroy_cbfn = bnad_cb_rcb_destroy, .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
.ccb_setup_cbfn = bnad_cb_ccb_setup, .ccb_setup_cbfn = bnad_cb_ccb_setup,
.ccb_destroy_cbfn = bnad_cb_ccb_destroy, .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
.rx_stall_cbfn = bnad_cb_rx_stall,
.rx_cleanup_cbfn = bnad_cb_rx_cleanup, .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
.rx_post_cbfn = bnad_cb_rx_post, .rx_post_cbfn = bnad_cb_rx_post,
}; };

Просмотреть файл

@ -103,6 +103,7 @@ struct bnad_rx_ctrl {
/* Bit positions for rcb->flags */ /* Bit positions for rcb->flags */
#define BNAD_RXQ_REFILL 0 #define BNAD_RXQ_REFILL 0
#define BNAD_RXQ_STARTED 1 #define BNAD_RXQ_STARTED 1
#define BNAD_RXQ_POST_OK 2
/* Resource limits */ /* Resource limits */
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) #define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)