scsi: scsi_debug: mq_poll support
Add support of the mq_poll interface to scsi_debug. This feature requires shared host tag support in kernel and driver. Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com> Acked-by: Douglas Gilbert <dgilbert@interlog.com> Tested-by: Douglas Gilbert <dgilbert@interlog.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Link: https://lore.kernel.org/r/20210215074048.19424-4-kashyap.desai@broadcom.com Cc: dgilbert@interlog.com Cc: linux-block@vger.kernel.org Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Родитель
9e4bec5b2a
Коммит
c4b57d89ba
|
@ -829,6 +829,7 @@ static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
|
|||
static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
|
||||
|
||||
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
|
||||
static int poll_queues; /* iouring iopoll interface.*/
|
||||
static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
|
||||
|
||||
static DEFINE_RWLOCK(atomic_rw);
|
||||
|
@ -5432,6 +5433,14 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
|||
cmnd->host_scribble = (unsigned char *)sqcp;
|
||||
sd_dp = sqcp->sd_dp;
|
||||
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
||||
|
||||
/* Do not complete IO from default completion path.
|
||||
* Let it to be on queue.
|
||||
* Completion should happen from mq_poll interface.
|
||||
*/
|
||||
if ((sqp - sdebug_q_arr) >= (submit_queues - poll_queues))
|
||||
return 0;
|
||||
|
||||
if (!sd_dp) {
|
||||
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
|
||||
if (!sd_dp) {
|
||||
|
@ -5615,6 +5624,7 @@ module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
|
|||
module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
|
||||
module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
|
||||
module_param_named(submit_queues, submit_queues, int, S_IRUGO);
|
||||
module_param_named(poll_queues, poll_queues, int, S_IRUGO);
|
||||
module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
|
||||
module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
|
||||
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
|
||||
|
@ -5677,6 +5687,7 @@ MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent
|
|||
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
|
||||
MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
|
||||
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
|
||||
MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1)");
|
||||
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
|
||||
MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
|
||||
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
|
||||
|
@ -7202,6 +7213,104 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
|
|||
return check_condition_result;
|
||||
}
|
||||
|
||||
static int sdebug_map_queues(struct Scsi_Host *shost)
|
||||
{
|
||||
int i, qoff;
|
||||
|
||||
if (shost->nr_hw_queues == 1)
|
||||
return 0;
|
||||
|
||||
for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
|
||||
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
|
||||
|
||||
map->nr_queues = 0;
|
||||
|
||||
if (i == HCTX_TYPE_DEFAULT)
|
||||
map->nr_queues = submit_queues - poll_queues;
|
||||
else if (i == HCTX_TYPE_POLL)
|
||||
map->nr_queues = poll_queues;
|
||||
|
||||
if (!map->nr_queues) {
|
||||
BUG_ON(i == HCTX_TYPE_DEFAULT);
|
||||
continue;
|
||||
}
|
||||
|
||||
map->queue_offset = qoff;
|
||||
blk_mq_map_queues(map);
|
||||
|
||||
qoff += map->nr_queues;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
||||
{
|
||||
int qc_idx;
|
||||
int retiring = 0;
|
||||
unsigned long iflags;
|
||||
struct sdebug_queue *sqp;
|
||||
struct sdebug_queued_cmd *sqcp;
|
||||
struct scsi_cmnd *scp;
|
||||
struct sdebug_dev_info *devip;
|
||||
int num_entries = 0;
|
||||
|
||||
sqp = sdebug_q_arr + queue_num;
|
||||
|
||||
do {
|
||||
spin_lock_irqsave(&sqp->qc_lock, iflags);
|
||||
qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
|
||||
if (unlikely((qc_idx < 0) || (qc_idx >= sdebug_max_queue)))
|
||||
goto out;
|
||||
|
||||
sqcp = &sqp->qc_arr[qc_idx];
|
||||
scp = sqcp->a_cmnd;
|
||||
if (unlikely(scp == NULL)) {
|
||||
pr_err("scp is NULL, queue_num=%d, qc_idx=%d from %s\n",
|
||||
queue_num, qc_idx, __func__);
|
||||
goto out;
|
||||
}
|
||||
devip = (struct sdebug_dev_info *)scp->device->hostdata;
|
||||
if (likely(devip))
|
||||
atomic_dec(&devip->num_in_q);
|
||||
else
|
||||
pr_err("devip=NULL from %s\n", __func__);
|
||||
if (unlikely(atomic_read(&retired_max_queue) > 0))
|
||||
retiring = 1;
|
||||
|
||||
sqcp->a_cmnd = NULL;
|
||||
if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
|
||||
pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%d from %s\n",
|
||||
sqp, queue_num, qc_idx, __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(retiring)) { /* user has reduced max_queue */
|
||||
int k, retval;
|
||||
|
||||
retval = atomic_read(&retired_max_queue);
|
||||
if (qc_idx >= retval) {
|
||||
pr_err("index %d too large\n", retval);
|
||||
goto out;
|
||||
}
|
||||
k = find_last_bit(sqp->in_use_bm, retval);
|
||||
if ((k < sdebug_max_queue) || (k == retval))
|
||||
atomic_set(&retired_max_queue, 0);
|
||||
else
|
||||
atomic_set(&retired_max_queue, k + 1);
|
||||
}
|
||||
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
||||
scp->scsi_done(scp); /* callback to mid level */
|
||||
num_entries++;
|
||||
} while (1);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
||||
return num_entries;
|
||||
}
|
||||
|
||||
|
||||
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *scp)
|
||||
{
|
||||
|
@ -7381,6 +7490,8 @@ static struct scsi_host_template sdebug_driver_template = {
|
|||
.ioctl = scsi_debug_ioctl,
|
||||
.queuecommand = scsi_debug_queuecommand,
|
||||
.change_queue_depth = sdebug_change_qdepth,
|
||||
.map_queues = sdebug_map_queues,
|
||||
.mq_poll = sdebug_blk_mq_poll,
|
||||
.eh_abort_handler = scsi_debug_abort,
|
||||
.eh_device_reset_handler = scsi_debug_device_reset,
|
||||
.eh_target_reset_handler = scsi_debug_target_reset,
|
||||
|
@ -7428,6 +7539,25 @@ static int sdebug_driver_probe(struct device *dev)
|
|||
if (sdebug_host_max_queue)
|
||||
hpnt->host_tagset = 1;
|
||||
|
||||
/* poll queues are possible for nr_hw_queues > 1 */
|
||||
if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
|
||||
pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
|
||||
my_name, poll_queues, hpnt->nr_hw_queues);
|
||||
poll_queues = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Poll queues don't need interrupts, but we need at least one I/O queue
|
||||
* left over for non-polled I/O.
|
||||
* If condition not met, trim poll_queues to 1 (just for simplicity).
|
||||
*/
|
||||
if (poll_queues >= submit_queues) {
|
||||
pr_warn("%s: trim poll_queues to 1\n", my_name);
|
||||
poll_queues = 1;
|
||||
}
|
||||
if (poll_queues)
|
||||
hpnt->nr_maps = 3;
|
||||
|
||||
sdbg_host->shost = hpnt;
|
||||
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
|
||||
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
|
||||
|
|
Загрузка…
Ссылка в новой задаче