crypto: qat - remove intermediate tasklet for vf2pf
The PF driver uses the tasklet vf2pf_bh_tasklet to schedule a workqueue to handle the vf2vf protocol (pf2vf_resp_wq). Since the tasklet is only used to schedule the workqueue, this patch removes it and schedules the pf2vf_resp_wq workqueue directly for the top half. Signed-off-by: Svyatoslav Pankratov <svyatoslav.pankratov@intel.com> Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Marco Chiappero <marco.chiappero@intel.com> Reviewed-by: Fiona Trahe <fiona.trahe@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
506a166429
Коммит
e6eefd12dd
|
@ -225,7 +225,6 @@ struct adf_fw_loader_data {
|
|||
|
||||
struct adf_accel_vf_info {
|
||||
struct adf_accel_dev *accel_dev;
|
||||
struct tasklet_struct vf2pf_bh_tasklet;
|
||||
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
|
||||
struct ratelimit_state vf2pf_ratelimit;
|
||||
u32 vf_nr;
|
||||
|
|
|
@ -197,6 +197,7 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
|||
u32 vf_mask);
|
||||
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
|
||||
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
|
||||
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
|
||||
|
||||
int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
|
||||
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
|
||||
|
|
|
@ -106,9 +106,8 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
|
|||
adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
|
||||
|
||||
/*
|
||||
* Schedule tasklets to handle VF2PF interrupt BHs
|
||||
* unless the VF is malicious and is attempting to
|
||||
* flood the host OS with VF2PF interrupts.
|
||||
* Handle VF2PF interrupt unless the VF is malicious and
|
||||
* is attempting to flood the host OS with VF2PF interrupts.
|
||||
*/
|
||||
for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
|
||||
vf_info = accel_dev->pf.vf_info + i;
|
||||
|
@ -120,8 +119,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Tasklet will re-enable ints from this VF */
|
||||
tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
|
||||
adf_schedule_vf2pf_handler(vf_info);
|
||||
irq_handled = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,9 +24,8 @@ static void adf_iov_send_resp(struct work_struct *work)
|
|||
kfree(pf2vf_resp);
|
||||
}
|
||||
|
||||
static void adf_vf2pf_bh_handler(void *data)
|
||||
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
|
||||
{
|
||||
struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
|
||||
struct adf_pf2vf_resp *pf2vf_resp;
|
||||
|
||||
pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
|
||||
|
@ -52,9 +51,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
|
|||
vf_info->accel_dev = accel_dev;
|
||||
vf_info->vf_nr = i;
|
||||
|
||||
tasklet_init(&vf_info->vf2pf_bh_tasklet,
|
||||
(void *)adf_vf2pf_bh_handler,
|
||||
(unsigned long)vf_info);
|
||||
mutex_init(&vf_info->pf2vf_lock);
|
||||
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
|
||||
DEFAULT_RATELIMIT_INTERVAL,
|
||||
|
@ -110,8 +106,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
|||
hw_data->configure_iov_threads(accel_dev, false);
|
||||
|
||||
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
|
||||
tasklet_disable(&vf->vf2pf_bh_tasklet);
|
||||
tasklet_kill(&vf->vf2pf_bh_tasklet);
|
||||
mutex_destroy(&vf->pf2vf_lock);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче