target: remove struct se_task
We can use struct se_cmd for everything it did. Make sure to pass the S/G list and data direction to the execution function to ease adding back BIDI support later on. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Родитель
cf572a9627
Коммит
5787cacd0b
|
@ -244,53 +244,33 @@ static void fd_free_device(void *p)
|
|||
kfree(fd_dev);
|
||||
}
|
||||
|
||||
static inline struct fd_request *FILE_REQ(struct se_task *task)
|
||||
static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents)
|
||||
{
|
||||
return container_of(task, struct fd_request, fd_task);
|
||||
}
|
||||
|
||||
|
||||
static struct se_task *
|
||||
fd_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct fd_request *fd_req;
|
||||
|
||||
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
|
||||
if (!fd_req) {
|
||||
pr_err("Unable to allocate struct fd_request\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &fd_req->fd_task;
|
||||
}
|
||||
|
||||
static int fd_do_readv(struct se_task *task)
|
||||
{
|
||||
struct fd_request *req = FILE_REQ(task);
|
||||
struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct fd_dev *dev = se_dev->dev_ptr;
|
||||
struct file *fd = dev->fd_file;
|
||||
struct scatterlist *sg = task->task_sg;
|
||||
struct scatterlist *sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = (task->task_se_cmd->t_task_lba *
|
||||
loff_t pos = (cmd->t_task_lba *
|
||||
se_dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
int ret = 0, i;
|
||||
|
||||
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
|
||||
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
|
||||
if (!iov) {
|
||||
pr_err("Unable to allocate fd_do_readv iov[]\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
|
||||
for_each_sg(sgl, sg, sgl_nents, i) {
|
||||
iov[i].iov_len = sg->length;
|
||||
iov[i].iov_base = sg_virt(sg);
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
|
||||
ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
|
||||
set_fs(old_fs);
|
||||
|
||||
kfree(iov);
|
||||
|
@ -300,10 +280,10 @@ static int fd_do_readv(struct se_task *task)
|
|||
* block_device.
|
||||
*/
|
||||
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
|
||||
if (ret < 0 || ret != task->task_se_cmd->data_length) {
|
||||
if (ret < 0 || ret != cmd->data_length) {
|
||||
pr_err("vfs_readv() returned %d,"
|
||||
" expecting %d for S_ISBLK\n", ret,
|
||||
(int)task->task_se_cmd->data_length);
|
||||
(int)cmd->data_length);
|
||||
return (ret < 0 ? ret : -EINVAL);
|
||||
}
|
||||
} else {
|
||||
|
@ -317,38 +297,38 @@ static int fd_do_readv(struct se_task *task)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int fd_do_writev(struct se_task *task)
|
||||
static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents)
|
||||
{
|
||||
struct fd_request *req = FILE_REQ(task);
|
||||
struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct fd_dev *dev = se_dev->dev_ptr;
|
||||
struct file *fd = dev->fd_file;
|
||||
struct scatterlist *sg = task->task_sg;
|
||||
struct scatterlist *sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = (task->task_se_cmd->t_task_lba *
|
||||
loff_t pos = (cmd->t_task_lba *
|
||||
se_dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
int ret, i = 0;
|
||||
|
||||
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
|
||||
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
|
||||
if (!iov) {
|
||||
pr_err("Unable to allocate fd_do_writev iov[]\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
|
||||
for_each_sg(sgl, sg, sgl_nents, i) {
|
||||
iov[i].iov_len = sg->length;
|
||||
iov[i].iov_base = sg_virt(sg);
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
|
||||
ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
|
||||
set_fs(old_fs);
|
||||
|
||||
kfree(iov);
|
||||
|
||||
if (ret < 0 || ret != task->task_se_cmd->data_length) {
|
||||
if (ret < 0 || ret != cmd->data_length) {
|
||||
pr_err("vfs_writev() returned %d\n", ret);
|
||||
return (ret < 0 ? ret : -EINVAL);
|
||||
}
|
||||
|
@ -369,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
|
|||
* for this SYNCHRONIZE_CACHE op
|
||||
*/
|
||||
if (immed)
|
||||
transport_complete_sync_cache(cmd, 1);
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
|
||||
/*
|
||||
* Determine if we will be flushing the entire device.
|
||||
|
@ -389,35 +369,37 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
|
|||
if (ret != 0)
|
||||
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
|
||||
|
||||
if (!immed)
|
||||
transport_complete_sync_cache(cmd, ret == 0);
|
||||
if (immed)
|
||||
return;
|
||||
|
||||
if (ret) {
|
||||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
|
||||
} else {
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* WRITE Force Unit Access (FUA) emulation on a per struct se_task
|
||||
* LBA range basis..
|
||||
*/
|
||||
static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
|
||||
static void fd_emulate_write_fua(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
loff_t start = task->task_se_cmd->t_task_lba *
|
||||
loff_t start = cmd->t_task_lba *
|
||||
dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
loff_t end = start + task->task_se_cmd->data_length;
|
||||
loff_t end = start + cmd->data_length;
|
||||
int ret;
|
||||
|
||||
pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
|
||||
task->task_se_cmd->t_task_lba,
|
||||
task->task_se_cmd->data_length);
|
||||
cmd->t_task_lba, cmd->data_length);
|
||||
|
||||
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
|
||||
if (ret != 0)
|
||||
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
|
||||
}
|
||||
|
||||
static int fd_do_task(struct se_task *task)
|
||||
static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents, enum dma_data_direction data_direction)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -425,10 +407,10 @@ static int fd_do_task(struct se_task *task)
|
|||
* Call vectorized fileio functions to map struct scatterlist
|
||||
* physical memory addresses to struct iovec virtual memory.
|
||||
*/
|
||||
if (task->task_data_direction == DMA_FROM_DEVICE) {
|
||||
ret = fd_do_readv(task);
|
||||
if (data_direction == DMA_FROM_DEVICE) {
|
||||
ret = fd_do_readv(cmd, sgl, sgl_nents);
|
||||
} else {
|
||||
ret = fd_do_writev(task);
|
||||
ret = fd_do_writev(cmd, sgl, sgl_nents);
|
||||
|
||||
if (ret > 0 &&
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
|
||||
|
@ -439,7 +421,7 @@ static int fd_do_task(struct se_task *task)
|
|||
* and return some sense data to let the initiator
|
||||
* know the FUA WRITE cache sync failed..?
|
||||
*/
|
||||
fd_emulate_write_fua(cmd, task);
|
||||
fd_emulate_write_fua(cmd);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -448,24 +430,11 @@ static int fd_do_task(struct se_task *task)
|
|||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return ret;
|
||||
}
|
||||
if (ret) {
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
}
|
||||
if (ret)
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fd_free_task(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
*
|
||||
*/
|
||||
static void fd_free_task(struct se_task *task)
|
||||
{
|
||||
struct fd_request *req = FILE_REQ(task);
|
||||
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
enum {
|
||||
Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
|
||||
};
|
||||
|
@ -618,10 +587,8 @@ static struct se_subsystem_api fileio_template = {
|
|||
.allocate_virtdevice = fd_allocate_virtdevice,
|
||||
.create_virtdevice = fd_create_virtdevice,
|
||||
.free_device = fd_free_device,
|
||||
.alloc_task = fd_alloc_task,
|
||||
.do_task = fd_do_task,
|
||||
.execute_cmd = fd_execute_cmd,
|
||||
.do_sync_cache = fd_emulate_sync_cache,
|
||||
.free_task = fd_free_task,
|
||||
.check_configfs_dev_params = fd_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = fd_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = fd_show_configfs_dev_params,
|
||||
|
|
|
@ -12,10 +12,6 @@
|
|||
#define RRF_EMULATE_CDB 0x01
|
||||
#define RRF_GOT_LBA 0x02
|
||||
|
||||
struct fd_request {
|
||||
struct se_task fd_task;
|
||||
};
|
||||
|
||||
#define FBDF_HAS_PATH 0x01
|
||||
#define FBDF_HAS_SIZE 0x02
|
||||
#define FDBD_USE_BUFFERED_IO 0x04
|
||||
|
|
|
@ -189,26 +189,6 @@ static void iblock_free_device(void *p)
|
|||
kfree(ib_dev);
|
||||
}
|
||||
|
||||
static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
|
||||
{
|
||||
return container_of(task, struct iblock_req, ib_task);
|
||||
}
|
||||
|
||||
static struct se_task *
|
||||
iblock_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct iblock_req *ib_req;
|
||||
|
||||
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
||||
if (!ib_req) {
|
||||
pr_err("Unable to allocate memory for struct iblock_req\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_set(&ib_req->pending, 1);
|
||||
return &ib_req->ib_task;
|
||||
}
|
||||
|
||||
static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
struct se_device *dev,
|
||||
struct block_device *bd,
|
||||
|
@ -295,8 +275,16 @@ static void iblock_end_io_flush(struct bio *bio, int err)
|
|||
if (err)
|
||||
pr_err("IBLOCK: cache flush failed: %d\n", err);
|
||||
|
||||
if (cmd)
|
||||
transport_complete_sync_cache(cmd, err == 0);
|
||||
if (cmd) {
|
||||
if (err) {
|
||||
cmd->scsi_sense_reason =
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
|
||||
} else {
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
}
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
|
@ -315,7 +303,7 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd)
|
|||
* for this SYNCHRONIZE_CACHE op.
|
||||
*/
|
||||
if (immed)
|
||||
transport_complete_sync_cache(cmd, 1);
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, 0);
|
||||
bio->bi_end_io = iblock_end_io_flush;
|
||||
|
@ -334,11 +322,6 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
|
|||
return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
|
||||
}
|
||||
|
||||
static void iblock_free_task(struct se_task *task)
|
||||
{
|
||||
kfree(IBLOCK_REQ(task));
|
||||
}
|
||||
|
||||
enum {
|
||||
Opt_udev_path, Opt_force, Opt_err
|
||||
};
|
||||
|
@ -447,19 +430,35 @@ static ssize_t iblock_show_configfs_dev_params(
|
|||
return bl;
|
||||
}
|
||||
|
||||
static void iblock_complete_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
struct iblock_req *ibr = cmd->priv;
|
||||
u8 status;
|
||||
|
||||
if (!atomic_dec_and_test(&ibr->pending))
|
||||
return;
|
||||
|
||||
if (atomic_read(&ibr->ib_bio_err_cnt))
|
||||
status = SAM_STAT_CHECK_CONDITION;
|
||||
else
|
||||
status = SAM_STAT_GOOD;
|
||||
|
||||
target_complete_cmd(cmd, status);
|
||||
kfree(ibr);
|
||||
}
|
||||
|
||||
static void iblock_bio_destructor(struct bio *bio)
|
||||
{
|
||||
struct se_task *task = bio->bi_private;
|
||||
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
|
||||
struct se_cmd *cmd = bio->bi_private;
|
||||
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
|
||||
|
||||
bio_free(bio, ib_dev->ibd_bio_set);
|
||||
}
|
||||
|
||||
static struct bio *
|
||||
iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
|
||||
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
|
||||
{
|
||||
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
|
||||
struct iblock_req *ib_req = IBLOCK_REQ(task);
|
||||
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
|
||||
struct bio *bio;
|
||||
|
||||
/*
|
||||
|
@ -475,20 +474,11 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
|
||||
" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
|
||||
pr_debug("Allocated bio: %p task_size: %u\n", bio,
|
||||
task->task_se_cmd->data_length);
|
||||
|
||||
bio->bi_bdev = ib_dev->ibd_bd;
|
||||
bio->bi_private = task;
|
||||
bio->bi_private = cmd;
|
||||
bio->bi_destructor = iblock_bio_destructor;
|
||||
bio->bi_end_io = &iblock_bio_done;
|
||||
bio->bi_sector = lba;
|
||||
atomic_inc(&ib_req->pending);
|
||||
|
||||
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
|
||||
pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
|
||||
return bio;
|
||||
}
|
||||
|
||||
|
@ -503,20 +493,21 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
|
|||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
static int iblock_do_task(struct se_task *task)
|
||||
static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents, enum dma_data_direction data_direction)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_req *ibr = IBLOCK_REQ(task);
|
||||
struct iblock_req *ibr;
|
||||
struct bio *bio;
|
||||
struct bio_list list;
|
||||
struct scatterlist *sg;
|
||||
u32 i, sg_num = task->task_sg_nents;
|
||||
u32 sg_num = sgl_nents;
|
||||
sector_t block_lba;
|
||||
unsigned bio_cnt;
|
||||
int rw;
|
||||
int i;
|
||||
|
||||
if (task->task_data_direction == DMA_TO_DEVICE) {
|
||||
if (data_direction == DMA_TO_DEVICE) {
|
||||
/*
|
||||
* Force data to disk if we pretend to not have a volatile
|
||||
* write cache, or the initiator set the Force Unit Access bit.
|
||||
|
@ -532,8 +523,8 @@ static int iblock_do_task(struct se_task *task)
|
|||
}
|
||||
|
||||
/*
|
||||
* Do starting conversion up from non 512-byte blocksize with
|
||||
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
|
||||
* Convert the blocksize advertised to the initiator to the 512 byte
|
||||
* units unconditionally used by the Linux block layer.
|
||||
*/
|
||||
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
|
||||
block_lba = (cmd->t_task_lba << 3);
|
||||
|
@ -550,17 +541,22 @@ static int iblock_do_task(struct se_task *task)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
bio = iblock_get_bio(task, block_lba, sg_num);
|
||||
if (!bio) {
|
||||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
||||
if (!ibr)
|
||||
goto fail;
|
||||
cmd->priv = ibr;
|
||||
|
||||
bio = iblock_get_bio(cmd, block_lba, sgl_nents);
|
||||
if (!bio)
|
||||
goto fail_free_ibr;
|
||||
|
||||
bio_list_init(&list);
|
||||
bio_list_add(&list, bio);
|
||||
|
||||
atomic_set(&ibr->pending, 2);
|
||||
bio_cnt = 1;
|
||||
|
||||
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
|
||||
for_each_sg(sgl, sg, sgl_nents, i) {
|
||||
/*
|
||||
* XXX: if the length the device accepts is shorter than the
|
||||
* length of the S/G list entry this will cause and
|
||||
|
@ -573,9 +569,11 @@ static int iblock_do_task(struct se_task *task)
|
|||
bio_cnt = 0;
|
||||
}
|
||||
|
||||
bio = iblock_get_bio(task, block_lba, sg_num);
|
||||
bio = iblock_get_bio(cmd, block_lba, sg_num);
|
||||
if (!bio)
|
||||
goto fail;
|
||||
goto fail_put_bios;
|
||||
|
||||
atomic_inc(&ibr->pending);
|
||||
bio_list_add(&list, bio);
|
||||
bio_cnt++;
|
||||
}
|
||||
|
@ -586,17 +584,16 @@ static int iblock_do_task(struct se_task *task)
|
|||
}
|
||||
|
||||
iblock_submit_bios(&list, rw);
|
||||
|
||||
if (atomic_dec_and_test(&ibr->pending)) {
|
||||
transport_complete_task(task,
|
||||
!atomic_read(&ibr->ib_bio_err_cnt));
|
||||
}
|
||||
iblock_complete_cmd(cmd);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fail_put_bios:
|
||||
while ((bio = bio_list_pop(&list)))
|
||||
bio_put(bio);
|
||||
fail_free_ibr:
|
||||
kfree(ibr);
|
||||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -621,8 +618,8 @@ static sector_t iblock_get_blocks(struct se_device *dev)
|
|||
|
||||
static void iblock_bio_done(struct bio *bio, int err)
|
||||
{
|
||||
struct se_task *task = bio->bi_private;
|
||||
struct iblock_req *ibr = IBLOCK_REQ(task);
|
||||
struct se_cmd *cmd = bio->bi_private;
|
||||
struct iblock_req *ibr = cmd->priv;
|
||||
|
||||
/*
|
||||
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
|
||||
|
@ -642,14 +639,7 @@ static void iblock_bio_done(struct bio *bio, int err)
|
|||
|
||||
bio_put(bio);
|
||||
|
||||
if (!atomic_dec_and_test(&ibr->pending))
|
||||
return;
|
||||
|
||||
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
|
||||
task, bio, task->task_se_cmd->t_task_lba,
|
||||
(unsigned long long)bio->bi_sector, err);
|
||||
|
||||
transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
|
||||
iblock_complete_cmd(cmd);
|
||||
}
|
||||
|
||||
static struct se_subsystem_api iblock_template = {
|
||||
|
@ -663,11 +653,9 @@ static struct se_subsystem_api iblock_template = {
|
|||
.allocate_virtdevice = iblock_allocate_virtdevice,
|
||||
.create_virtdevice = iblock_create_virtdevice,
|
||||
.free_device = iblock_free_device,
|
||||
.alloc_task = iblock_alloc_task,
|
||||
.do_task = iblock_do_task,
|
||||
.execute_cmd = iblock_execute_cmd,
|
||||
.do_discard = iblock_do_discard,
|
||||
.do_sync_cache = iblock_emulate_sync_cache,
|
||||
.free_task = iblock_free_task,
|
||||
.check_configfs_dev_params = iblock_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = iblock_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = iblock_show_configfs_dev_params,
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#define IBLOCK_LBA_SHIFT 9
|
||||
|
||||
struct iblock_req {
|
||||
struct se_task ib_task;
|
||||
atomic_t pending;
|
||||
atomic_t ib_bio_err_cnt;
|
||||
} ____cacheline_aligned;
|
||||
|
|
|
@ -663,22 +663,12 @@ static void pscsi_free_device(void *p)
|
|||
kfree(pdv);
|
||||
}
|
||||
|
||||
static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
|
||||
static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
|
||||
{
|
||||
return container_of(task, struct pscsi_plugin_task, pscsi_task);
|
||||
}
|
||||
|
||||
|
||||
/* pscsi_transport_complete():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int pscsi_transport_complete(struct se_task *task)
|
||||
{
|
||||
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
|
||||
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
|
||||
struct scsi_device *sd = pdv->pdv_sd;
|
||||
int result;
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
struct pscsi_plugin_task *pt = cmd->priv;
|
||||
unsigned char *cdb = &pt->pscsi_cdb[0];
|
||||
|
||||
result = pt->pscsi_result;
|
||||
|
@ -688,12 +678,11 @@ static int pscsi_transport_complete(struct se_task *task)
|
|||
*/
|
||||
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
|
||||
(status_byte(result) << 1) == SAM_STAT_GOOD) {
|
||||
if (!task->task_se_cmd->se_deve)
|
||||
if (!cmd->se_deve)
|
||||
goto after_mode_sense;
|
||||
|
||||
if (task->task_se_cmd->se_deve->lun_flags &
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY) {
|
||||
unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
|
||||
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
|
||||
unsigned char *buf = transport_kmap_data_sg(cmd);
|
||||
|
||||
if (cdb[0] == MODE_SENSE_10) {
|
||||
if (!(buf[3] & 0x80))
|
||||
|
@ -703,7 +692,7 @@ static int pscsi_transport_complete(struct se_task *task)
|
|||
buf[2] |= 0x80;
|
||||
}
|
||||
|
||||
transport_kunmap_data_sg(task->task_se_cmd);
|
||||
transport_kunmap_data_sg(cmd);
|
||||
}
|
||||
}
|
||||
after_mode_sense:
|
||||
|
@ -722,7 +711,6 @@ after_mode_sense:
|
|||
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
|
||||
(status_byte(result) << 1) == SAM_STAT_GOOD) {
|
||||
unsigned char *buf;
|
||||
struct scatterlist *sg = task->task_sg;
|
||||
u16 bdl;
|
||||
u32 blocksize;
|
||||
|
||||
|
@ -757,35 +745,6 @@ after_mode_select:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct se_task *
|
||||
pscsi_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct pscsi_plugin_task *pt;
|
||||
|
||||
/*
|
||||
* Dynamically alloc cdb space, since it may be larger than
|
||||
* TCM_MAX_COMMAND_SIZE
|
||||
*/
|
||||
pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
|
||||
if (!pt) {
|
||||
pr_err("Unable to allocate struct pscsi_plugin_task\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &pt->pscsi_task;
|
||||
}
|
||||
|
||||
static void pscsi_free_task(struct se_task *task)
|
||||
{
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
|
||||
/*
|
||||
* We do not release the bio(s) here associated with this task, as
|
||||
* this is handled by bio_put() and pscsi_bi_endio().
|
||||
*/
|
||||
kfree(pt);
|
||||
}
|
||||
|
||||
enum {
|
||||
Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
|
||||
Opt_scsi_lun_id, Opt_err
|
||||
|
@ -958,26 +917,25 @@ static inline struct bio *pscsi_get_bio(int sg_num)
|
|||
return bio;
|
||||
}
|
||||
|
||||
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
|
||||
static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents, enum dma_data_direction data_direction,
|
||||
struct bio **hbio)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
|
||||
u32 task_sg_num = task->task_sg_nents;
|
||||
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
|
||||
struct bio *bio = NULL, *tbio = NULL;
|
||||
struct page *page;
|
||||
struct scatterlist *sg;
|
||||
u32 data_len = cmd->data_length, i, len, bytes, off;
|
||||
int nr_pages = (cmd->data_length + task_sg[0].offset +
|
||||
int nr_pages = (cmd->data_length + sgl[0].offset +
|
||||
PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
int nr_vecs = 0, rc;
|
||||
int rw = (task->task_data_direction == DMA_TO_DEVICE);
|
||||
int rw = (data_direction == DMA_TO_DEVICE);
|
||||
|
||||
*hbio = NULL;
|
||||
|
||||
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
|
||||
|
||||
for_each_sg(task_sg, sg, task_sg_num, i) {
|
||||
for_each_sg(sgl, sg, sgl_nents, i) {
|
||||
page = sg_page(sg);
|
||||
off = sg->offset;
|
||||
len = sg->length;
|
||||
|
@ -1009,7 +967,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
|
|||
* Set *hbio pointer to handle the case:
|
||||
* nr_pages > BIO_MAX_PAGES, where additional
|
||||
* bios need to be added to complete a given
|
||||
* struct se_task
|
||||
* command.
|
||||
*/
|
||||
if (!*hbio)
|
||||
*hbio = tbio = bio;
|
||||
|
@ -1049,7 +1007,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
|
|||
}
|
||||
}
|
||||
|
||||
return task->task_sg_nents;
|
||||
return sgl_nents;
|
||||
fail:
|
||||
while (*hbio) {
|
||||
bio = *hbio;
|
||||
|
@ -1061,53 +1019,61 @@ fail:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int pscsi_do_task(struct se_task *task)
|
||||
static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents, enum dma_data_direction data_direction)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
|
||||
struct pscsi_plugin_task *pt;
|
||||
struct request *req;
|
||||
struct bio *hbio;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Dynamically alloc cdb space, since it may be larger than
|
||||
* TCM_MAX_COMMAND_SIZE
|
||||
*/
|
||||
pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
|
||||
if (!pt) {
|
||||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
cmd->priv = pt;
|
||||
|
||||
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
|
||||
scsi_command_size(cmd->t_task_cdb));
|
||||
|
||||
if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
|
||||
if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
|
||||
req = blk_get_request(pdv->pdv_sd->request_queue,
|
||||
(task->task_data_direction == DMA_TO_DEVICE),
|
||||
(data_direction == DMA_TO_DEVICE),
|
||||
GFP_KERNEL);
|
||||
if (!req || IS_ERR(req)) {
|
||||
pr_err("PSCSI: blk_get_request() failed: %ld\n",
|
||||
req ? IS_ERR(req) : -ENOMEM);
|
||||
cmd->scsi_sense_reason =
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
BUG_ON(!cmd->data_length);
|
||||
|
||||
/*
|
||||
* Setup the main struct request for the task->task_sg[] payload
|
||||
*/
|
||||
ret = pscsi_map_sg(task, task->task_sg, &hbio);
|
||||
ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
|
||||
if (ret < 0) {
|
||||
cmd->scsi_sense_reason =
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return ret;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(req)) {
|
||||
pr_err("pSCSI: blk_make_request() failed\n");
|
||||
goto fail;
|
||||
goto fail_free_bio;
|
||||
}
|
||||
}
|
||||
|
||||
req->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
req->end_io = pscsi_req_done;
|
||||
req->end_io_data = task;
|
||||
req->end_io_data = cmd;
|
||||
req->cmd_len = scsi_command_size(pt->pscsi_cdb);
|
||||
req->cmd = &pt->pscsi_cdb[0];
|
||||
req->sense = &pt->pscsi_sense[0];
|
||||
|
@ -1119,12 +1085,12 @@ static int pscsi_do_task(struct se_task *task)
|
|||
req->retries = PS_RETRY;
|
||||
|
||||
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
|
||||
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
|
||||
(cmd->sam_task_attr == MSG_HEAD_TAG),
|
||||
pscsi_req_done);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fail_free_bio:
|
||||
while (hbio) {
|
||||
struct bio *bio = hbio;
|
||||
hbio = hbio->bi_next;
|
||||
|
@ -1132,16 +1098,14 @@ fail:
|
|||
bio_endio(bio, 0); /* XXX: should be error */
|
||||
}
|
||||
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
fail:
|
||||
kfree(pt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* pscsi_get_sense_buffer():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
|
||||
static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd)
|
||||
{
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
struct pscsi_plugin_task *pt = cmd->priv;
|
||||
|
||||
return pt->pscsi_sense;
|
||||
}
|
||||
|
@ -1181,48 +1145,36 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* pscsi_handle_SAM_STATUS_failures():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static inline void pscsi_process_SAM_status(
|
||||
struct se_task *task,
|
||||
struct pscsi_plugin_task *pt)
|
||||
static void pscsi_req_done(struct request *req, int uptodate)
|
||||
{
|
||||
task->task_scsi_status = status_byte(pt->pscsi_result);
|
||||
if (task->task_scsi_status) {
|
||||
task->task_scsi_status <<= 1;
|
||||
pr_debug("PSCSI Status Byte exception at task: %p CDB:"
|
||||
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
|
||||
struct se_cmd *cmd = req->end_io_data;
|
||||
struct pscsi_plugin_task *pt = cmd->priv;
|
||||
|
||||
pt->pscsi_result = req->errors;
|
||||
pt->pscsi_resid = req->resid_len;
|
||||
|
||||
cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
|
||||
if (cmd->scsi_status) {
|
||||
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
|
||||
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
|
||||
pt->pscsi_result);
|
||||
}
|
||||
|
||||
switch (host_byte(pt->pscsi_result)) {
|
||||
case DID_OK:
|
||||
transport_complete_task(task, (!task->task_scsi_status));
|
||||
target_complete_cmd(cmd, cmd->scsi_status);
|
||||
break;
|
||||
default:
|
||||
pr_debug("PSCSI Host Byte exception at task: %p CDB:"
|
||||
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
|
||||
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
|
||||
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
|
||||
pt->pscsi_result);
|
||||
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||
task->task_se_cmd->scsi_sense_reason =
|
||||
TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
transport_complete_task(task, 0);
|
||||
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void pscsi_req_done(struct request *req, int uptodate)
|
||||
{
|
||||
struct se_task *task = req->end_io_data;
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
|
||||
pt->pscsi_result = req->errors;
|
||||
pt->pscsi_resid = req->resid_len;
|
||||
|
||||
pscsi_process_SAM_status(task, pt);
|
||||
__blk_put_request(req->q, req);
|
||||
kfree(pt);
|
||||
}
|
||||
|
||||
static struct se_subsystem_api pscsi_template = {
|
||||
|
@ -1236,9 +1188,7 @@ static struct se_subsystem_api pscsi_template = {
|
|||
.create_virtdevice = pscsi_create_virtdevice,
|
||||
.free_device = pscsi_free_device,
|
||||
.transport_complete = pscsi_transport_complete,
|
||||
.alloc_task = pscsi_alloc_task,
|
||||
.do_task = pscsi_do_task,
|
||||
.free_task = pscsi_free_task,
|
||||
.execute_cmd = pscsi_execute_cmd,
|
||||
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <linux/kobject.h>
|
||||
|
||||
struct pscsi_plugin_task {
|
||||
struct se_task pscsi_task;
|
||||
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
|
||||
int pscsi_direction;
|
||||
int pscsi_result;
|
||||
|
|
|
@ -266,12 +266,6 @@ static void rd_free_device(void *p)
|
|||
kfree(rd_dev);
|
||||
}
|
||||
|
||||
static struct se_task *
|
||||
rd_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
return kzalloc(sizeof(struct se_task), GFP_KERNEL);
|
||||
}
|
||||
|
||||
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
||||
{
|
||||
u32 i;
|
||||
|
@ -290,9 +284,10 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int rd_do_task(struct se_task *task)
|
||||
static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents, enum dma_data_direction data_direction)
|
||||
{
|
||||
struct se_device *se_dev = task->task_se_cmd->se_dev;
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct rd_dev *dev = se_dev->dev_ptr;
|
||||
struct rd_dev_sg_table *table;
|
||||
struct scatterlist *rd_sg;
|
||||
|
@ -303,11 +298,10 @@ static int rd_do_task(struct se_task *task)
|
|||
u32 src_len;
|
||||
u64 tmp;
|
||||
|
||||
tmp = task->task_se_cmd->t_task_lba *
|
||||
se_dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
rd_offset = do_div(tmp, PAGE_SIZE);
|
||||
rd_page = tmp;
|
||||
rd_size = task->task_se_cmd->data_length;
|
||||
rd_size = cmd->data_length;
|
||||
|
||||
table = rd_get_sg_table(dev, rd_page);
|
||||
if (!table)
|
||||
|
@ -317,14 +311,12 @@ static int rd_do_task(struct se_task *task)
|
|||
|
||||
pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
|
||||
dev->rd_dev_id,
|
||||
task->task_data_direction == DMA_FROM_DEVICE ?
|
||||
"Read" : "Write",
|
||||
task->task_se_cmd->t_task_lba,
|
||||
rd_size, rd_page, rd_offset);
|
||||
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
|
||||
cmd->t_task_lba, rd_size, rd_page, rd_offset);
|
||||
|
||||
src_len = PAGE_SIZE - rd_offset;
|
||||
sg_miter_start(&m, task->task_sg, task->task_sg_nents,
|
||||
task->task_data_direction == DMA_FROM_DEVICE ?
|
||||
sg_miter_start(&m, sgl, sgl_nents,
|
||||
data_direction == DMA_FROM_DEVICE ?
|
||||
SG_MITER_TO_SG : SG_MITER_FROM_SG);
|
||||
while (rd_size) {
|
||||
u32 len;
|
||||
|
@ -336,7 +328,7 @@ static int rd_do_task(struct se_task *task)
|
|||
|
||||
rd_addr = sg_virt(rd_sg) + rd_offset;
|
||||
|
||||
if (task->task_data_direction == DMA_FROM_DEVICE)
|
||||
if (data_direction == DMA_FROM_DEVICE)
|
||||
memcpy(m.addr, rd_addr, len);
|
||||
else
|
||||
memcpy(rd_addr, m.addr, len);
|
||||
|
@ -371,16 +363,10 @@ static int rd_do_task(struct se_task *task)
|
|||
}
|
||||
sg_miter_stop(&m);
|
||||
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rd_free_task(struct se_task *task)
|
||||
{
|
||||
kfree(task);
|
||||
}
|
||||
|
||||
enum {
|
||||
Opt_rd_pages, Opt_err
|
||||
};
|
||||
|
@ -482,9 +468,7 @@ static struct se_subsystem_api rd_mcp_template = {
|
|||
.allocate_virtdevice = rd_allocate_virtdevice,
|
||||
.create_virtdevice = rd_create_virtdevice,
|
||||
.free_device = rd_free_device,
|
||||
.alloc_task = rd_alloc_task,
|
||||
.do_task = rd_do_task,
|
||||
.free_task = rd_free_task,
|
||||
.execute_cmd = rd_execute_cmd,
|
||||
.check_configfs_dev_params = rd_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = rd_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = rd_show_configfs_dev_params,
|
||||
|
|
|
@ -374,13 +374,11 @@ static void core_tmr_drain_cmd_list(
|
|||
struct se_queue_obj *qobj = &dev->dev_queue_obj;
|
||||
struct se_cmd *cmd, *tcmd;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Release all commands remaining in the struct se_device cmd queue.
|
||||
* Release all commands remaining in the per-device command queue.
|
||||
*
|
||||
* This follows the same logic as above for the struct se_device
|
||||
* struct se_task state list, where commands are returned with
|
||||
* TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
|
||||
* reference, otherwise the struct se_cmd is released.
|
||||
* This follows the same logic as above for the state list.
|
||||
*/
|
||||
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
|
||||
list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
|
||||
|
|
|
@ -72,7 +72,6 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
|
|||
static void transport_complete_task_attr(struct se_cmd *cmd);
|
||||
static void transport_handle_queue_full(struct se_cmd *cmd,
|
||||
struct se_device *dev);
|
||||
static void transport_free_dev_tasks(struct se_cmd *cmd);
|
||||
static int transport_generic_get_mem(struct se_cmd *cmd);
|
||||
static void transport_put_cmd(struct se_cmd *cmd);
|
||||
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
|
||||
|
@ -662,28 +661,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
|
|||
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Completion function used by TCM subsystem plugins (such as FILEIO)
|
||||
* for queueing up response from struct se_subsystem_api->do_task()
|
||||
*/
|
||||
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
|
||||
{
|
||||
struct se_task *task = cmd->t_task;
|
||||
|
||||
if (good) {
|
||||
cmd->scsi_status = SAM_STAT_GOOD;
|
||||
task->task_scsi_status = GOOD;
|
||||
} else {
|
||||
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||
task->task_se_cmd->scsi_sense_reason =
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
}
|
||||
|
||||
transport_complete_task(task, good);
|
||||
}
|
||||
EXPORT_SYMBOL(transport_complete_sync_cache);
|
||||
|
||||
static void target_complete_failure_work(struct work_struct *work)
|
||||
{
|
||||
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
|
||||
|
@ -691,35 +668,28 @@ static void target_complete_failure_work(struct work_struct *work)
|
|||
transport_generic_request_failure(cmd);
|
||||
}
|
||||
|
||||
/* transport_complete_task():
|
||||
*
|
||||
* Called from interrupt and non interrupt context depending
|
||||
* on the transport plugin.
|
||||
*/
|
||||
void transport_complete_task(struct se_task *task, int success)
|
||||
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
int success = scsi_status == GOOD;
|
||||
unsigned long flags;
|
||||
|
||||
cmd->scsi_status = scsi_status;
|
||||
|
||||
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
cmd->transport_state &= ~CMD_T_BUSY;
|
||||
|
||||
/*
|
||||
* See if any sense data exists, if so set the TASK_SENSE flag.
|
||||
* Also check for any other post completion work that needs to be
|
||||
* done by the plugins.
|
||||
*/
|
||||
if (dev && dev->transport->transport_complete) {
|
||||
if (dev->transport->transport_complete(task) != 0) {
|
||||
if (dev->transport->transport_complete(cmd,
|
||||
cmd->t_data_sg) != 0) {
|
||||
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
|
||||
success = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* See if we are waiting for outstanding struct se_task
|
||||
* to complete for an exception condition
|
||||
* See if we are waiting to complete for an exception condition.
|
||||
*/
|
||||
if (cmd->transport_state & CMD_T_REQUEST_STOP) {
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
@ -730,15 +700,11 @@ void transport_complete_task(struct se_task *task, int success)
|
|||
if (!success)
|
||||
cmd->transport_state |= CMD_T_FAILED;
|
||||
|
||||
/*
|
||||
* Decrement the outstanding t_task_cdbs_left count. The last
|
||||
* struct se_task from struct se_cmd will complete itself into the
|
||||
* device queue depending upon int success.
|
||||
*/
|
||||
if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for case where an explict ABORT_TASK has been received
|
||||
* and transport_wait_for_tasks() will be waiting for completion..
|
||||
|
@ -761,15 +727,6 @@ void transport_complete_task(struct se_task *task, int success)
|
|||
|
||||
queue_work(target_completion_wq, &cmd->work);
|
||||
}
|
||||
EXPORT_SYMBOL(transport_complete_task);
|
||||
|
||||
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
||||
{
|
||||
struct se_task *task = cmd->t_task;
|
||||
|
||||
task->task_scsi_status = scsi_status;
|
||||
transport_complete_task(task, scsi_status == GOOD);
|
||||
}
|
||||
EXPORT_SYMBOL(target_complete_cmd);
|
||||
|
||||
static void target_add_to_state_list(struct se_cmd *cmd)
|
||||
|
@ -2076,8 +2033,10 @@ check_depth:
|
|||
|
||||
if (cmd->execute_cmd)
|
||||
error = cmd->execute_cmd(cmd);
|
||||
else
|
||||
error = dev->transport->do_task(cmd->t_task);
|
||||
else {
|
||||
error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
|
||||
cmd->t_data_nents, cmd->data_direction);
|
||||
}
|
||||
|
||||
if (error != 0) {
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
|
@ -2312,7 +2271,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
|
|||
{
|
||||
unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_task *task = NULL;
|
||||
unsigned long flags;
|
||||
u32 offset = 0;
|
||||
|
||||
|
@ -2327,9 +2285,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!cmd->t_task)
|
||||
goto out;
|
||||
|
||||
if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
|
||||
goto out;
|
||||
|
||||
|
@ -2338,19 +2293,19 @@ static int transport_get_sense_data(struct se_cmd *cmd)
|
|||
goto out;
|
||||
}
|
||||
|
||||
sense_buffer = dev->transport->get_sense_buffer(task);
|
||||
sense_buffer = dev->transport->get_sense_buffer(cmd);
|
||||
if (!sense_buffer) {
|
||||
pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
|
||||
pr_err("ITT 0x%08x cmd %p: Unable to locate"
|
||||
" sense buffer for task with sense\n",
|
||||
cmd->se_tfo->get_task_tag(cmd), task);
|
||||
cmd->se_tfo->get_task_tag(cmd), cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
|
||||
|
||||
memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
|
||||
cmd->scsi_status = task->task_scsi_status;
|
||||
|
||||
/* Automatically padded */
|
||||
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
|
||||
|
@ -3199,10 +3154,6 @@ static void target_complete_ok_work(struct work_struct *work)
|
|||
if (transport_get_sense_data(cmd) < 0)
|
||||
reason = TCM_NON_EXISTENT_LUN;
|
||||
|
||||
/*
|
||||
* Only set when an struct se_task->task_scsi_status returned
|
||||
* a non GOOD status.
|
||||
*/
|
||||
if (cmd->scsi_status) {
|
||||
ret = transport_send_check_condition_and_sense(
|
||||
cmd, reason, 1);
|
||||
|
@ -3277,15 +3228,6 @@ queue_full:
|
|||
transport_handle_queue_full(cmd, cmd->se_dev);
|
||||
}
|
||||
|
||||
static void transport_free_dev_tasks(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_task *task;
|
||||
|
||||
task = cmd->t_task;
|
||||
if (task && !(cmd->transport_state & CMD_T_BUSY))
|
||||
cmd->se_dev->transport->free_task(task);
|
||||
}
|
||||
|
||||
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
|
@ -3346,7 +3288,6 @@ static void transport_release_cmd(struct se_cmd *cmd)
|
|||
static void transport_put_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
unsigned long flags;
|
||||
int free_tasks = 0;
|
||||
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
if (atomic_read(&cmd->t_fe_count)) {
|
||||
|
@ -3362,13 +3303,9 @@ static void transport_put_cmd(struct se_cmd *cmd)
|
|||
if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
|
||||
cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
|
||||
target_remove_from_state_list(cmd);
|
||||
free_tasks = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
if (free_tasks != 0)
|
||||
transport_free_dev_tasks(cmd);
|
||||
|
||||
transport_free_pages(cmd);
|
||||
transport_release_cmd(cmd);
|
||||
return;
|
||||
|
@ -3526,7 +3463,6 @@ out:
|
|||
int transport_generic_new_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_task *task;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -3572,19 +3508,6 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
|
|||
attr->max_sectors);
|
||||
}
|
||||
|
||||
task = dev->transport->alloc_task(cmd->t_task_cdb);
|
||||
if (!task) {
|
||||
pr_err("Unable to allocate struct se_task\n");
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
task->task_se_cmd = cmd;
|
||||
task->task_data_direction = cmd->data_direction;
|
||||
task->task_sg = cmd->t_data_sg;
|
||||
task->task_sg_nents = cmd->t_data_nents;
|
||||
|
||||
cmd->t_task = task;
|
||||
|
||||
atomic_inc(&cmd->t_fe_count);
|
||||
atomic_inc(&cmd->t_se_count);
|
||||
|
||||
|
@ -3592,19 +3515,17 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
|
|||
atomic_set(&cmd->t_task_cdbs_ex_left, 1);
|
||||
|
||||
/*
|
||||
* For WRITEs, let the fabric know its buffer is ready..
|
||||
* This WRITE struct se_cmd (and all of its associated struct se_task's)
|
||||
* will be added to the struct se_device execution queue after its WRITE
|
||||
* data has arrived. (ie: It gets handled by the transport processing
|
||||
* thread a second time)
|
||||
* For WRITEs, let the fabric know its buffer is ready.
|
||||
*
|
||||
* The command will be added to the execution queue after its write
|
||||
* data has arrived.
|
||||
*/
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
target_add_to_state_list(cmd);
|
||||
return transport_generic_write_pending(cmd);
|
||||
}
|
||||
/*
|
||||
* Everything else but a WRITE, add the struct se_cmd's struct se_task's
|
||||
* to the execution queue.
|
||||
* Everything else but a WRITE, add the command to the execution queue.
|
||||
*/
|
||||
transport_execute_tasks(cmd);
|
||||
return 0;
|
||||
|
@ -3691,8 +3612,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
|
|||
if (cmd->se_lun)
|
||||
transport_lun_remove_cmd(cmd);
|
||||
|
||||
transport_free_dev_tasks(cmd);
|
||||
|
||||
transport_put_cmd(cmd);
|
||||
}
|
||||
}
|
||||
|
@ -3832,7 +3751,6 @@ EXPORT_SYMBOL(target_wait_for_sess_cmds);
|
|||
*/
|
||||
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
|
||||
{
|
||||
struct se_task *task = cmd->t_task;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -3944,7 +3862,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
|
|||
target_remove_from_state_list(cmd);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
||||
|
||||
transport_free_dev_tasks(cmd);
|
||||
/*
|
||||
* The Storage engine stopped this struct se_cmd before it was
|
||||
* send to the fabric frontend for delivery back to the
|
||||
|
|
|
@ -23,12 +23,11 @@ struct se_subsystem_api {
|
|||
struct se_device *(*create_virtdevice)(struct se_hba *,
|
||||
struct se_subsystem_dev *, void *);
|
||||
void (*free_device)(void *);
|
||||
int (*transport_complete)(struct se_task *task);
|
||||
struct se_task *(*alloc_task)(unsigned char *cdb);
|
||||
int (*do_task)(struct se_task *);
|
||||
int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
|
||||
int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
|
||||
enum dma_data_direction);
|
||||
int (*do_discard)(struct se_device *, sector_t, u32);
|
||||
void (*do_sync_cache)(struct se_cmd *);
|
||||
void (*free_task)(struct se_task *);
|
||||
ssize_t (*check_configfs_dev_params)(struct se_hba *,
|
||||
struct se_subsystem_dev *);
|
||||
ssize_t (*set_configfs_dev_params)(struct se_hba *,
|
||||
|
@ -38,7 +37,7 @@ struct se_subsystem_api {
|
|||
u32 (*get_device_rev)(struct se_device *);
|
||||
u32 (*get_device_type)(struct se_device *);
|
||||
sector_t (*get_blocks)(struct se_device *);
|
||||
unsigned char *(*get_sense_buffer)(struct se_task *);
|
||||
unsigned char *(*get_sense_buffer)(struct se_cmd *);
|
||||
};
|
||||
|
||||
int transport_subsystem_register(struct se_subsystem_api *);
|
||||
|
@ -48,8 +47,6 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *,
|
|||
struct se_subsystem_api *, struct se_subsystem_dev *, u32,
|
||||
void *, struct se_dev_limits *, const char *, const char *);
|
||||
|
||||
void transport_complete_sync_cache(struct se_cmd *, int);
|
||||
void transport_complete_task(struct se_task *, int);
|
||||
void target_complete_cmd(struct se_cmd *, u8);
|
||||
|
||||
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
|
||||
|
|
|
@ -477,14 +477,6 @@ struct se_queue_obj {
|
|||
wait_queue_head_t thread_wq;
|
||||
};
|
||||
|
||||
struct se_task {
|
||||
struct se_cmd *task_se_cmd;
|
||||
struct scatterlist *task_sg;
|
||||
u32 task_sg_nents;
|
||||
u8 task_scsi_status;
|
||||
enum dma_data_direction task_data_direction;
|
||||
};
|
||||
|
||||
struct se_tmr_req {
|
||||
/* Task Management function to be performed */
|
||||
u8 function;
|
||||
|
@ -592,7 +584,8 @@ struct se_cmd {
|
|||
/* old task stop completion, consider merging with some of the above */
|
||||
struct completion task_stop_comp;
|
||||
|
||||
struct se_task *t_task;
|
||||
/* backend private data */
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct se_ua {
|
||||
|
|
Загрузка…
Ссылка в новой задаче