IB/mlx4: Optimize do_slave_init
There is little chance our memory allocation will fail, so we can combine initializing the work structs with allocating them instead of looping through all of them once to allocate and again to initialize. Then when we need to actually find out if our device is up or in the process of going down, have all of our work structs batched up, take the spin_lock once and only once, and do all of the batch under the one spin_lock invocation instead of incurring all of the locked memory cycles we would otherwise incur to take/release the spin_lock over and over again. Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Родитель
9bbf282da8
Коммит
d9a047aeff
|
@ -2681,20 +2681,22 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
|
|||
kfree(dm[i]);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
/* initialize or tear down tunnel QPs for the slave */
|
||||
for (i = 0; i < ports; i++) {
|
||||
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
|
||||
dm[i]->port = first_port + i + 1;
|
||||
dm[i]->slave = slave;
|
||||
dm[i]->do_init = do_init;
|
||||
dm[i]->dev = ibdev;
|
||||
spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
|
||||
if (!ibdev->sriov.is_going_down)
|
||||
}
|
||||
/* initialize or tear down tunnel QPs for the slave */
|
||||
spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
|
||||
if (!ibdev->sriov.is_going_down) {
|
||||
for (i = 0; i < ports; i++)
|
||||
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
|
||||
else
|
||||
kfree(dm[i]);
|
||||
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
|
||||
for (i = 0; i < ports; i++)
|
||||
kfree(dm[i]);
|
||||
}
|
||||
out:
|
||||
kfree(dm);
|
||||
|
|
Загрузка…
Ссылка в новой задаче