Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - add support for SCSI Referrals (Hannes) - add support for T10 DIF into target core (nab + mkp) - add support for T10 DIF emulation in FILEIO + RAMDISK backends (Sagi + nab) - add support for T10 DIF -> bio_integrity passthrough in IBLOCK backend (nab) - prep changes to iser-target for >= v3.15 T10 DIF support (Sagi) - add support for qla2xxx N_Port ID Virtualization - NPIV (Saurav + Quinn) - allow percpu_ida_alloc() to receive task state bitmask (Kent) - fix >= v3.12 iscsi-target session reset hung task regression (nab) - fix >= v3.13 percpu_ref se_lun->lun_ref_active race (nab) - fix a long-standing network portal creation race (Andy)" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits) target: Fix percpu_ref_put race in transport_lun_remove_cmd target/iscsi: Fix network portal creation race target: Report bad sector in sense data for DIF errors iscsi-target: Convert gfp_t parameter to task state bitmask iscsi-target: Fix connection reset hang with percpu_ida_alloc percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask iscsi-target: Pre-allocate more tags to avoid ack starvation qla2xxx: Configure NPIV fc_vport via tcm_qla2xxx_npiv_make_lport qla2xxx: Enhancements to enable NPIV support for QLOGIC ISPs with TCM/LIO. qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine IB/isert: Move fastreg descriptor creation to a function IB/isert: Avoid frwr notation, user fastreg IB/isert: seperate connection protection domains and dma MRs tcm_loop: Enable DIF/DIX modes in SCSI host LLD target/rd: Add DIF protection into rd_execute_rw target/rd: Add support for protection SGL setup + release target/rd: Refactor rd_build_device_space + rd_release_device_space target/file: Add DIF protection support to fd_execute_rw target/file: Add DIF protection init/format support ...
This commit is contained in:
Коммит
4e13c5d021
|
@ -36,7 +36,8 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
|
|||
{
|
||||
int tag;
|
||||
|
||||
tag = percpu_ida_alloc(&tags->free_tags, gfp);
|
||||
tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
|
||||
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
|
||||
if (tag < 0)
|
||||
return BLK_MQ_TAG_FAIL;
|
||||
return tag + tags->nr_reserved_tags;
|
||||
|
@ -52,7 +53,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
|
|||
return BLK_MQ_TAG_FAIL;
|
||||
}
|
||||
|
||||
tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
|
||||
tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
|
||||
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
|
||||
if (tag < 0)
|
||||
return BLK_MQ_TAG_FAIL;
|
||||
return tag;
|
||||
|
|
|
@ -47,10 +47,10 @@ static int
|
|||
isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr);
|
||||
static void
|
||||
isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
|
||||
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
|
||||
static int
|
||||
isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr);
|
||||
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr);
|
||||
|
||||
static void
|
||||
isert_qp_event_callback(struct ib_event *e, void *context)
|
||||
|
@ -227,11 +227,11 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
|
||||
/* asign function handlers */
|
||||
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
|
||||
device->use_frwr = 1;
|
||||
device->reg_rdma_mem = isert_reg_rdma_frwr;
|
||||
device->unreg_rdma_mem = isert_unreg_rdma_frwr;
|
||||
device->use_fastreg = 1;
|
||||
device->reg_rdma_mem = isert_reg_rdma;
|
||||
device->unreg_rdma_mem = isert_unreg_rdma;
|
||||
} else {
|
||||
device->use_frwr = 0;
|
||||
device->use_fastreg = 0;
|
||||
device->reg_rdma_mem = isert_map_rdma;
|
||||
device->unreg_rdma_mem = isert_unmap_cmd;
|
||||
}
|
||||
|
@ -239,9 +239,10 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
device->cqs_used = min_t(int, num_online_cpus(),
|
||||
device->ib_device->num_comp_vectors);
|
||||
device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
|
||||
pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
|
||||
pr_debug("Using %d CQs, device %s supports %d vectors support "
|
||||
"Fast registration %d\n",
|
||||
device->cqs_used, device->ib_device->name,
|
||||
device->ib_device->num_comp_vectors, device->use_frwr);
|
||||
device->ib_device->num_comp_vectors, device->use_fastreg);
|
||||
device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
|
||||
device->cqs_used, GFP_KERNEL);
|
||||
if (!device->cq_desc) {
|
||||
|
@ -250,13 +251,6 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
}
|
||||
cq_desc = device->cq_desc;
|
||||
|
||||
device->dev_pd = ib_alloc_pd(ib_dev);
|
||||
if (IS_ERR(device->dev_pd)) {
|
||||
ret = PTR_ERR(device->dev_pd);
|
||||
pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
|
||||
goto out_cq_desc;
|
||||
}
|
||||
|
||||
for (i = 0; i < device->cqs_used; i++) {
|
||||
cq_desc[i].device = device;
|
||||
cq_desc[i].cq_index = i;
|
||||
|
@ -294,13 +288,6 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
goto out_cq;
|
||||
}
|
||||
|
||||
device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(device->dev_mr)) {
|
||||
ret = PTR_ERR(device->dev_mr);
|
||||
pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_cq:
|
||||
|
@ -316,9 +303,6 @@ out_cq:
|
|||
ib_destroy_cq(device->dev_tx_cq[j]);
|
||||
}
|
||||
}
|
||||
ib_dealloc_pd(device->dev_pd);
|
||||
|
||||
out_cq_desc:
|
||||
kfree(device->cq_desc);
|
||||
|
||||
return ret;
|
||||
|
@ -341,8 +325,6 @@ isert_free_device_ib_res(struct isert_device *device)
|
|||
device->dev_tx_cq[i] = NULL;
|
||||
}
|
||||
|
||||
ib_dereg_mr(device->dev_mr);
|
||||
ib_dealloc_pd(device->dev_pd);
|
||||
kfree(device->cq_desc);
|
||||
}
|
||||
|
||||
|
@ -398,18 +380,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
|
|||
}
|
||||
|
||||
static void
|
||||
isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
|
||||
isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct fast_reg_descriptor *fr_desc, *tmp;
|
||||
int i = 0;
|
||||
|
||||
if (list_empty(&isert_conn->conn_frwr_pool))
|
||||
if (list_empty(&isert_conn->conn_fr_pool))
|
||||
return;
|
||||
|
||||
pr_debug("Freeing conn %p frwr pool", isert_conn);
|
||||
pr_debug("Freeing conn %p fastreg pool", isert_conn);
|
||||
|
||||
list_for_each_entry_safe(fr_desc, tmp,
|
||||
&isert_conn->conn_frwr_pool, list) {
|
||||
&isert_conn->conn_fr_pool, list) {
|
||||
list_del(&fr_desc->list);
|
||||
ib_free_fast_reg_page_list(fr_desc->data_frpl);
|
||||
ib_dereg_mr(fr_desc->data_mr);
|
||||
|
@ -417,20 +399,47 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
|
|||
++i;
|
||||
}
|
||||
|
||||
if (i < isert_conn->conn_frwr_pool_size)
|
||||
if (i < isert_conn->conn_fr_pool_size)
|
||||
pr_warn("Pool still has %d regions registered\n",
|
||||
isert_conn->conn_frwr_pool_size - i);
|
||||
isert_conn->conn_fr_pool_size - i);
|
||||
}
|
||||
|
||||
static int
|
||||
isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
|
||||
isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
|
||||
struct fast_reg_descriptor *fr_desc)
|
||||
{
|
||||
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
|
||||
ISCSI_ISER_SG_TABLESIZE);
|
||||
if (IS_ERR(fr_desc->data_frpl)) {
|
||||
pr_err("Failed to allocate data frpl err=%ld\n",
|
||||
PTR_ERR(fr_desc->data_frpl));
|
||||
return PTR_ERR(fr_desc->data_frpl);
|
||||
}
|
||||
|
||||
fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
|
||||
if (IS_ERR(fr_desc->data_mr)) {
|
||||
pr_err("Failed to allocate data frmr err=%ld\n",
|
||||
PTR_ERR(fr_desc->data_mr));
|
||||
ib_free_fast_reg_page_list(fr_desc->data_frpl);
|
||||
return PTR_ERR(fr_desc->data_mr);
|
||||
}
|
||||
pr_debug("Create fr_desc %p page_list %p\n",
|
||||
fr_desc, fr_desc->data_frpl->page_list);
|
||||
|
||||
fr_desc->valid = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct fast_reg_descriptor *fr_desc;
|
||||
struct isert_device *device = isert_conn->conn_device;
|
||||
int i, ret;
|
||||
|
||||
INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
|
||||
isert_conn->conn_frwr_pool_size = 0;
|
||||
INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
|
||||
isert_conn->conn_fr_pool_size = 0;
|
||||
for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
|
||||
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
|
||||
if (!fr_desc) {
|
||||
|
@ -439,40 +448,25 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
|
|||
goto err;
|
||||
}
|
||||
|
||||
fr_desc->data_frpl =
|
||||
ib_alloc_fast_reg_page_list(device->ib_device,
|
||||
ISCSI_ISER_SG_TABLESIZE);
|
||||
if (IS_ERR(fr_desc->data_frpl)) {
|
||||
pr_err("Failed to allocate fr_pg_list err=%ld\n",
|
||||
PTR_ERR(fr_desc->data_frpl));
|
||||
ret = PTR_ERR(fr_desc->data_frpl);
|
||||
ret = isert_create_fr_desc(device->ib_device,
|
||||
isert_conn->conn_pd, fr_desc);
|
||||
if (ret) {
|
||||
pr_err("Failed to create fastreg descriptor err=%d\n",
|
||||
ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
|
||||
ISCSI_ISER_SG_TABLESIZE);
|
||||
if (IS_ERR(fr_desc->data_mr)) {
|
||||
pr_err("Failed to allocate frmr err=%ld\n",
|
||||
PTR_ERR(fr_desc->data_mr));
|
||||
ret = PTR_ERR(fr_desc->data_mr);
|
||||
ib_free_fast_reg_page_list(fr_desc->data_frpl);
|
||||
goto err;
|
||||
}
|
||||
pr_debug("Create fr_desc %p page_list %p\n",
|
||||
fr_desc, fr_desc->data_frpl->page_list);
|
||||
|
||||
fr_desc->valid = true;
|
||||
list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
|
||||
isert_conn->conn_frwr_pool_size++;
|
||||
list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
|
||||
isert_conn->conn_fr_pool_size++;
|
||||
}
|
||||
|
||||
pr_debug("Creating conn %p frwr pool size=%d",
|
||||
isert_conn, isert_conn->conn_frwr_pool_size);
|
||||
pr_debug("Creating conn %p fastreg pool size=%d",
|
||||
isert_conn, isert_conn->conn_fr_pool_size);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
isert_conn_free_frwr_pool(isert_conn);
|
||||
isert_conn_free_fastreg_pool(isert_conn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -558,14 +552,29 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
}
|
||||
|
||||
isert_conn->conn_device = device;
|
||||
isert_conn->conn_pd = device->dev_pd;
|
||||
isert_conn->conn_mr = device->dev_mr;
|
||||
isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
|
||||
if (IS_ERR(isert_conn->conn_pd)) {
|
||||
ret = PTR_ERR(isert_conn->conn_pd);
|
||||
pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
|
||||
isert_conn, ret);
|
||||
goto out_pd;
|
||||
}
|
||||
|
||||
if (device->use_frwr) {
|
||||
ret = isert_conn_create_frwr_pool(isert_conn);
|
||||
isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(isert_conn->conn_mr)) {
|
||||
ret = PTR_ERR(isert_conn->conn_mr);
|
||||
pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
|
||||
isert_conn, ret);
|
||||
goto out_mr;
|
||||
}
|
||||
|
||||
if (device->use_fastreg) {
|
||||
ret = isert_conn_create_fastreg_pool(isert_conn);
|
||||
if (ret) {
|
||||
pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
|
||||
goto out_frwr;
|
||||
pr_err("Conn: %p failed to create fastreg pool\n",
|
||||
isert_conn);
|
||||
goto out_fastreg;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -582,9 +591,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
return 0;
|
||||
|
||||
out_conn_dev:
|
||||
if (device->use_frwr)
|
||||
isert_conn_free_frwr_pool(isert_conn);
|
||||
out_frwr:
|
||||
if (device->use_fastreg)
|
||||
isert_conn_free_fastreg_pool(isert_conn);
|
||||
out_fastreg:
|
||||
ib_dereg_mr(isert_conn->conn_mr);
|
||||
out_mr:
|
||||
ib_dealloc_pd(isert_conn->conn_pd);
|
||||
out_pd:
|
||||
isert_device_try_release(device);
|
||||
out_rsp_dma_map:
|
||||
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
|
||||
|
@ -608,8 +621,8 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|||
|
||||
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
if (device && device->use_frwr)
|
||||
isert_conn_free_frwr_pool(isert_conn);
|
||||
if (device && device->use_fastreg)
|
||||
isert_conn_free_fastreg_pool(isert_conn);
|
||||
|
||||
if (isert_conn->conn_qp) {
|
||||
cq_index = ((struct isert_cq_desc *)
|
||||
|
@ -623,6 +636,9 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|||
isert_free_rx_descriptors(isert_conn);
|
||||
rdma_destroy_id(isert_conn->conn_cm_id);
|
||||
|
||||
ib_dereg_mr(isert_conn->conn_mr);
|
||||
ib_dealloc_pd(isert_conn->conn_pd);
|
||||
|
||||
if (isert_conn->login_buf) {
|
||||
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
|
||||
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
|
||||
|
@ -1024,13 +1040,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
|
|||
}
|
||||
|
||||
static struct iscsi_cmd
|
||||
*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
|
||||
*isert_allocate_cmd(struct iscsi_conn *conn)
|
||||
{
|
||||
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
||||
struct isert_cmd *isert_cmd;
|
||||
struct iscsi_cmd *cmd;
|
||||
|
||||
cmd = iscsit_allocate_cmd(conn, gfp);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd) {
|
||||
pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
|
||||
return NULL;
|
||||
|
@ -1219,7 +1235,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|||
|
||||
switch (opcode) {
|
||||
case ISCSI_OP_SCSI_CMD:
|
||||
cmd = isert_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = isert_allocate_cmd(conn);
|
||||
if (!cmd)
|
||||
break;
|
||||
|
||||
|
@ -1233,7 +1249,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|||
rx_desc, (unsigned char *)hdr);
|
||||
break;
|
||||
case ISCSI_OP_NOOP_OUT:
|
||||
cmd = isert_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = isert_allocate_cmd(conn);
|
||||
if (!cmd)
|
||||
break;
|
||||
|
||||
|
@ -1246,7 +1262,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|||
(unsigned char *)hdr);
|
||||
break;
|
||||
case ISCSI_OP_SCSI_TMFUNC:
|
||||
cmd = isert_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = isert_allocate_cmd(conn);
|
||||
if (!cmd)
|
||||
break;
|
||||
|
||||
|
@ -1254,7 +1270,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|||
(unsigned char *)hdr);
|
||||
break;
|
||||
case ISCSI_OP_LOGOUT:
|
||||
cmd = isert_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = isert_allocate_cmd(conn);
|
||||
if (!cmd)
|
||||
break;
|
||||
|
||||
|
@ -1265,7 +1281,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|||
HZ);
|
||||
break;
|
||||
case ISCSI_OP_TEXT:
|
||||
cmd = isert_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = isert_allocate_cmd(conn);
|
||||
if (!cmd)
|
||||
break;
|
||||
|
||||
|
@ -1404,25 +1420,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
|||
}
|
||||
|
||||
static void
|
||||
isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
||||
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
||||
{
|
||||
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
||||
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
||||
LIST_HEAD(unmap_list);
|
||||
|
||||
pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
|
||||
pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
|
||||
|
||||
if (wr->fr_desc) {
|
||||
pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
|
||||
pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
|
||||
isert_cmd, wr->fr_desc);
|
||||
spin_lock_bh(&isert_conn->conn_lock);
|
||||
list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
|
||||
list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
|
||||
spin_unlock_bh(&isert_conn->conn_lock);
|
||||
wr->fr_desc = NULL;
|
||||
}
|
||||
|
||||
if (wr->sge) {
|
||||
pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
|
||||
pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
|
||||
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
|
||||
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
|
@ -2163,26 +2179,22 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
|
|||
|
||||
static int
|
||||
isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
|
||||
struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
|
||||
struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
|
||||
struct isert_conn *isert_conn, struct scatterlist *sg_start,
|
||||
struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
||||
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
||||
struct scatterlist *sg_start;
|
||||
u32 sg_off, page_off;
|
||||
struct ib_send_wr fr_wr, inv_wr;
|
||||
struct ib_send_wr *bad_wr, *wr = NULL;
|
||||
int ret, pagelist_len;
|
||||
u32 page_off;
|
||||
u8 key;
|
||||
int ret, sg_nents, pagelist_len;
|
||||
|
||||
sg_off = offset / PAGE_SIZE;
|
||||
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
|
||||
sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
|
||||
ISCSI_ISER_SG_TABLESIZE);
|
||||
sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
|
||||
page_off = offset % PAGE_SIZE;
|
||||
|
||||
pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
|
||||
isert_cmd, fr_desc, sg_nents, sg_off, offset);
|
||||
pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
|
||||
fr_desc, sg_nents, offset);
|
||||
|
||||
pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
|
||||
&fr_desc->data_frpl->page_list[0]);
|
||||
|
@ -2232,8 +2244,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
|
|||
}
|
||||
|
||||
static int
|
||||
isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr)
|
||||
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr)
|
||||
{
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
|
@ -2251,9 +2263,9 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
|
||||
data_left = se_cmd->data_length;
|
||||
} else {
|
||||
sg_off = cmd->write_data_done / PAGE_SIZE;
|
||||
data_left = se_cmd->data_length - cmd->write_data_done;
|
||||
offset = cmd->write_data_done;
|
||||
sg_off = offset / PAGE_SIZE;
|
||||
data_left = se_cmd->data_length - cmd->write_data_done;
|
||||
isert_cmd->tx_desc.isert_cmd = isert_cmd;
|
||||
}
|
||||
|
||||
|
@ -2311,16 +2323,16 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
wr->fr_desc = NULL;
|
||||
} else {
|
||||
spin_lock_irqsave(&isert_conn->conn_lock, flags);
|
||||
fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
|
||||
fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
|
||||
struct fast_reg_descriptor, list);
|
||||
list_del(&fr_desc->list);
|
||||
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
|
||||
wr->fr_desc = fr_desc;
|
||||
|
||||
ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
|
||||
ib_sge, offset, data_len);
|
||||
ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
|
||||
ib_sge, sg_nents, offset, data_len);
|
||||
if (ret) {
|
||||
list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
|
||||
list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
|
||||
goto unmap_sg;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,9 +119,9 @@ struct isert_conn {
|
|||
wait_queue_head_t conn_wait;
|
||||
wait_queue_head_t conn_wait_comp_err;
|
||||
struct kref conn_kref;
|
||||
struct list_head conn_frwr_pool;
|
||||
int conn_frwr_pool_size;
|
||||
/* lock to protect frwr_pool */
|
||||
struct list_head conn_fr_pool;
|
||||
int conn_fr_pool_size;
|
||||
/* lock to protect fastreg pool */
|
||||
spinlock_t conn_lock;
|
||||
#define ISERT_COMP_BATCH_COUNT 8
|
||||
int conn_comp_batch;
|
||||
|
@ -139,13 +139,11 @@ struct isert_cq_desc {
|
|||
};
|
||||
|
||||
struct isert_device {
|
||||
int use_frwr;
|
||||
int use_fastreg;
|
||||
int cqs_used;
|
||||
int refcount;
|
||||
int cq_active_qps[ISERT_MAX_CQ];
|
||||
struct ib_device *ib_device;
|
||||
struct ib_pd *dev_pd;
|
||||
struct ib_mr *dev_mr;
|
||||
struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
|
||||
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
|
||||
struct isert_cq_desc *cq_desc;
|
||||
|
|
|
@ -1990,6 +1990,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
|
|||
|
||||
vha->flags.delete_progress = 1;
|
||||
|
||||
qlt_remove_target(ha, vha);
|
||||
|
||||
fc_remove_host(vha->host);
|
||||
|
||||
scsi_remove_host(vha->host);
|
||||
|
|
|
@ -2750,6 +2750,13 @@ struct qlfc_fw {
|
|||
uint32_t len;
|
||||
};
|
||||
|
||||
struct scsi_qlt_host {
|
||||
void *target_lport_ptr;
|
||||
struct mutex tgt_mutex;
|
||||
struct mutex tgt_host_action_mutex;
|
||||
struct qla_tgt *qla_tgt;
|
||||
};
|
||||
|
||||
struct qlt_hw_data {
|
||||
/* Protected by hw lock */
|
||||
uint32_t enable_class_2:1;
|
||||
|
@ -2765,15 +2772,11 @@ struct qlt_hw_data {
|
|||
uint32_t __iomem *atio_q_in;
|
||||
uint32_t __iomem *atio_q_out;
|
||||
|
||||
void *target_lport_ptr;
|
||||
struct qla_tgt_func_tmpl *tgt_ops;
|
||||
struct qla_tgt *qla_tgt;
|
||||
struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
|
||||
uint16_t current_handle;
|
||||
|
||||
struct qla_tgt_vp_map *tgt_vp_map;
|
||||
struct mutex tgt_mutex;
|
||||
struct mutex tgt_host_action_mutex;
|
||||
|
||||
int saved_set;
|
||||
uint16_t saved_exchange_count;
|
||||
|
@ -3435,6 +3438,7 @@ typedef struct scsi_qla_host {
|
|||
#define VP_ERR_FAB_LOGOUT 4
|
||||
#define VP_ERR_ADAP_NORESOURCES 5
|
||||
struct qla_hw_data *hw;
|
||||
struct scsi_qlt_host vha_tgt;
|
||||
struct req_que *req;
|
||||
int fw_heartbeat_counter;
|
||||
int seconds_since_last_heartbeat;
|
||||
|
|
|
@ -590,7 +590,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|||
|
||||
/* Check to avoid double sessions */
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
|
||||
list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
|
||||
sess_list_entry) {
|
||||
if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
|
||||
|
@ -627,7 +627,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|||
|
||||
return NULL;
|
||||
}
|
||||
sess->tgt = ha->tgt.qla_tgt;
|
||||
sess->tgt = vha->vha_tgt.qla_tgt;
|
||||
sess->vha = vha;
|
||||
sess->s_id = fcport->d_id;
|
||||
sess->loop_id = fcport->loop_id;
|
||||
|
@ -635,7 +635,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|||
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
|
||||
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
|
||||
sess, ha->tgt.qla_tgt);
|
||||
sess, vha->vha_tgt.qla_tgt);
|
||||
|
||||
be_sid[0] = sess->s_id.b.domain;
|
||||
be_sid[1] = sess->s_id.b.area;
|
||||
|
@ -662,8 +662,8 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|||
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
|
||||
ha->tgt.qla_tgt->sess_count++;
|
||||
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
|
||||
vha->vha_tgt.qla_tgt->sess_count++;
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
|
||||
|
@ -682,7 +682,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|||
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
struct qla_tgt_sess *sess;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -692,6 +692,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|||
if (!tgt || (fcport->port_type != FCT_INITIATOR))
|
||||
return;
|
||||
|
||||
if (qla_ini_mode_enabled(vha))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
if (tgt->tgt_stop) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
@ -701,9 +704,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|||
if (!sess) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
mutex_lock(&ha->tgt.tgt_mutex);
|
||||
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
||||
sess = qlt_create_sess(vha, fcport, false);
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
} else {
|
||||
|
@ -739,7 +742,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|||
void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
struct qla_tgt_sess *sess;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -806,12 +809,12 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
|
|||
* Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
|
||||
* Lock is needed, because we still can get an incoming packet.
|
||||
*/
|
||||
mutex_lock(&ha->tgt.tgt_mutex);
|
||||
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
tgt->tgt_stop = 1;
|
||||
qlt_clear_tgt_db(tgt, true);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
||||
|
||||
flush_delayed_work(&tgt->sess_del_work);
|
||||
|
||||
|
@ -845,20 +848,21 @@ EXPORT_SYMBOL(qlt_stop_phase1);
|
|||
void qlt_stop_phase2(struct qla_tgt *tgt)
|
||||
{
|
||||
struct qla_hw_data *ha = tgt->ha;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
unsigned long flags;
|
||||
|
||||
if (tgt->tgt_stopped) {
|
||||
ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
|
||||
"Already in tgt->tgt_stopped state\n");
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
|
||||
"Waiting for %d IRQ commands to complete (tgt %p)",
|
||||
tgt->irq_cmd_count, tgt);
|
||||
|
||||
mutex_lock(&ha->tgt.tgt_mutex);
|
||||
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
while (tgt->irq_cmd_count != 0) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
@ -868,9 +872,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
|
|||
tgt->tgt_stop = 0;
|
||||
tgt->tgt_stopped = 1;
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
||||
|
||||
ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
|
||||
tgt);
|
||||
}
|
||||
EXPORT_SYMBOL(qlt_stop_phase2);
|
||||
|
@ -878,14 +882,14 @@ EXPORT_SYMBOL(qlt_stop_phase2);
|
|||
/* Called from qlt_remove_target() -> qla2x00_remove_one() */
|
||||
static void qlt_release(struct qla_tgt *tgt)
|
||||
{
|
||||
struct qla_hw_data *ha = tgt->ha;
|
||||
scsi_qla_host_t *vha = tgt->vha;
|
||||
|
||||
if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
|
||||
if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
|
||||
qlt_stop_phase2(tgt);
|
||||
|
||||
ha->tgt.qla_tgt = NULL;
|
||||
vha->vha_tgt.qla_tgt = NULL;
|
||||
|
||||
ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
|
||||
"Release of tgt %p finished\n", tgt);
|
||||
|
||||
kfree(tgt);
|
||||
|
@ -949,8 +953,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
|
|||
return;
|
||||
}
|
||||
|
||||
if (ha->tgt.qla_tgt != NULL)
|
||||
ha->tgt.qla_tgt->notify_ack_expected++;
|
||||
if (vha->vha_tgt.qla_tgt != NULL)
|
||||
vha->vha_tgt.qla_tgt->notify_ack_expected++;
|
||||
|
||||
pkt->entry_type = NOTIFY_ACK_TYPE;
|
||||
pkt->entry_count = 1;
|
||||
|
@ -1054,7 +1058,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
|
|||
/* Other bytes are zero */
|
||||
}
|
||||
|
||||
ha->tgt.qla_tgt->abts_resp_expected++;
|
||||
vha->vha_tgt.qla_tgt->abts_resp_expected++;
|
||||
|
||||
qla2x00_start_iocbs(vha, vha->req);
|
||||
}
|
||||
|
@ -1206,7 +1210,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
|
||||
"qla_target(%d): task abort for non-existant session\n",
|
||||
vha->vp_idx);
|
||||
rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
|
||||
rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
|
||||
QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
|
||||
if (rc != 0) {
|
||||
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
|
||||
|
@ -2157,8 +2161,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
|
|||
struct qla_tgt_cmd *cmd, void *ctio)
|
||||
{
|
||||
struct qla_tgt_srr_ctio *sc;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
struct qla_tgt_srr_imm *imm;
|
||||
|
||||
tgt->ctio_srr_id++;
|
||||
|
@ -2474,7 +2477,7 @@ static void qlt_do_work(struct work_struct *work)
|
|||
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
|
||||
scsi_qla_host_t *vha = cmd->vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
struct qla_tgt_sess *sess = NULL;
|
||||
struct atio_from_isp *atio = &cmd->atio;
|
||||
unsigned char *cdb;
|
||||
|
@ -2507,10 +2510,10 @@ static void qlt_do_work(struct work_struct *work)
|
|||
goto out_term;
|
||||
}
|
||||
|
||||
mutex_lock(&ha->tgt.tgt_mutex);
|
||||
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
||||
sess = qlt_make_local_sess(vha, s_id);
|
||||
/* sess has an extra creation ref. */
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
||||
|
||||
if (!sess)
|
||||
goto out_term;
|
||||
|
@ -2576,8 +2579,7 @@ out_term:
|
|||
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
||||
struct atio_from_isp *atio)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
struct qla_tgt_cmd *cmd;
|
||||
|
||||
if (unlikely(tgt->tgt_stop)) {
|
||||
|
@ -2597,7 +2599,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
|||
|
||||
memcpy(&cmd->atio, atio, sizeof(*atio));
|
||||
cmd->state = QLA_TGT_STATE_NEW;
|
||||
cmd->tgt = ha->tgt.qla_tgt;
|
||||
cmd->tgt = vha->vha_tgt.qla_tgt;
|
||||
cmd->vha = vha;
|
||||
|
||||
INIT_WORK(&cmd->work, qlt_do_work);
|
||||
|
@ -2723,7 +2725,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
|
|||
uint32_t lun, unpacked_lun;
|
||||
int lun_size, fn;
|
||||
|
||||
tgt = ha->tgt.qla_tgt;
|
||||
tgt = vha->vha_tgt.qla_tgt;
|
||||
|
||||
lun = a->u.isp24.fcp_cmnd.lun;
|
||||
lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
|
||||
|
@ -2797,7 +2799,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
|
|||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
|
||||
"qla_target(%d): task abort for unexisting "
|
||||
"session\n", vha->vp_idx);
|
||||
return qlt_sched_sess_work(ha->tgt.qla_tgt,
|
||||
return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
|
||||
QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
|
||||
}
|
||||
|
||||
|
@ -2810,7 +2812,6 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
|
|||
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
||||
struct imm_ntfy_from_isp *iocb)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int res = 0;
|
||||
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
|
||||
|
@ -2828,7 +2829,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|||
case ELS_PDISC:
|
||||
case ELS_ADISC:
|
||||
{
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
if (tgt->link_reinit_iocb_pending) {
|
||||
qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
|
||||
0, 0, 0, 0, 0, 0);
|
||||
|
@ -3202,8 +3203,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
|
|||
struct imm_ntfy_from_isp *iocb)
|
||||
{
|
||||
struct qla_tgt_srr_imm *imm;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
struct qla_tgt_srr_ctio *sctio;
|
||||
|
||||
tgt->imm_srr_id++;
|
||||
|
@ -3313,7 +3313,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
|
|||
|
||||
case IMM_NTFY_LIP_LINK_REINIT:
|
||||
{
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
|
||||
"qla_target(%d): LINK REINIT (loop %#x, "
|
||||
"subcode %x)\n", vha->vp_idx,
|
||||
|
@ -3489,7 +3489,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|||
struct atio_from_isp *atio)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
int rc;
|
||||
|
||||
if (unlikely(tgt == NULL)) {
|
||||
|
@ -3591,7 +3591,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|||
static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
|
||||
if (unlikely(tgt == NULL)) {
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe05d,
|
||||
|
@ -3794,7 +3794,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
|
|||
uint16_t *mailbox)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
int login_code;
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe039,
|
||||
|
@ -3924,14 +3924,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
|
|||
static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
|
||||
uint8_t *s_id)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt_sess *sess = NULL;
|
||||
fc_port_t *fcport = NULL;
|
||||
int rc, global_resets;
|
||||
uint16_t loop_id = 0;
|
||||
|
||||
retry:
|
||||
global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
|
||||
global_resets =
|
||||
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
|
||||
|
||||
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
|
||||
if (rc != 0) {
|
||||
|
@ -3958,12 +3958,13 @@ retry:
|
|||
return NULL;
|
||||
|
||||
if (global_resets !=
|
||||
atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
|
||||
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
|
||||
"qla_target(%d): global reset during session discovery "
|
||||
"(counter was %d, new %d), retrying", vha->vp_idx,
|
||||
global_resets,
|
||||
atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
|
||||
atomic_read(&vha->vha_tgt.
|
||||
qla_tgt->tgt_global_resets_count));
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -3998,10 +3999,10 @@ static void qlt_abort_work(struct qla_tgt *tgt,
|
|||
if (!sess) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
mutex_lock(&ha->tgt.tgt_mutex);
|
||||
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
||||
sess = qlt_make_local_sess(vha, s_id);
|
||||
/* sess has got an extra creation ref */
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
if (!sess)
|
||||
|
@ -4052,10 +4053,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
|||
if (!sess) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
mutex_lock(&ha->tgt.tgt_mutex);
|
||||
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
||||
sess = qlt_make_local_sess(vha, s_id);
|
||||
/* sess has got an extra creation ref */
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
if (!sess)
|
||||
|
@ -4141,9 +4142,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
|
|||
}
|
||||
|
||||
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
|
||||
"Registering target for host %ld(%p)", base_vha->host_no, ha);
|
||||
"Registering target for host %ld(%p).\n", base_vha->host_no, ha);
|
||||
|
||||
BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
|
||||
BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
|
||||
|
||||
tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
|
||||
if (!tgt) {
|
||||
|
@ -4171,7 +4172,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
|
|||
INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
|
||||
atomic_set(&tgt->tgt_global_resets_count, 0);
|
||||
|
||||
ha->tgt.qla_tgt = tgt;
|
||||
base_vha->vha_tgt.qla_tgt = tgt;
|
||||
|
||||
ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
|
||||
"qla_target(%d): using 64 Bit PCI addressing",
|
||||
|
@ -4192,16 +4193,16 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
|
|||
/* Must be called under tgt_host_action_mutex */
|
||||
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
|
||||
{
|
||||
if (!ha->tgt.qla_tgt)
|
||||
if (!vha->vha_tgt.qla_tgt)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&qla_tgt_mutex);
|
||||
list_del(&ha->tgt.qla_tgt->tgt_list_entry);
|
||||
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
|
||||
mutex_unlock(&qla_tgt_mutex);
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
|
||||
vha->host_no, ha);
|
||||
qlt_release(ha->tgt.qla_tgt);
|
||||
qlt_release(vha->vha_tgt.qla_tgt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4235,8 +4236,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
|
|||
* @callback: lport initialization callback for tcm_qla2xxx code
|
||||
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
|
||||
*/
|
||||
int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
|
||||
int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
|
||||
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
|
||||
u64 npiv_wwpn, u64 npiv_wwnn,
|
||||
int (*callback)(struct scsi_qla_host *, void *, u64, u64))
|
||||
{
|
||||
struct qla_tgt *tgt;
|
||||
struct scsi_qla_host *vha;
|
||||
|
@ -4255,14 +4257,11 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
|
|||
if (!host)
|
||||
continue;
|
||||
|
||||
if (ha->tgt.tgt_ops != NULL)
|
||||
continue;
|
||||
|
||||
if (!(host->hostt->supported_mode & MODE_TARGET))
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
if (host->active_mode & MODE_TARGET) {
|
||||
if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
|
||||
pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
|
||||
host->host_no);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
@ -4276,24 +4275,18 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
|
|||
" qla2xxx scsi_host\n");
|
||||
continue;
|
||||
}
|
||||
qlt_lport_dump(vha, wwpn, b);
|
||||
qlt_lport_dump(vha, phys_wwpn, b);
|
||||
|
||||
if (memcmp(vha->port_name, b, WWN_SIZE)) {
|
||||
scsi_host_put(host);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Setup passed parameters ahead of invoking callback
|
||||
*/
|
||||
ha->tgt.tgt_ops = qla_tgt_ops;
|
||||
ha->tgt.target_lport_ptr = target_lport_ptr;
|
||||
rc = (*callback)(vha);
|
||||
if (rc != 0) {
|
||||
ha->tgt.tgt_ops = NULL;
|
||||
ha->tgt.target_lport_ptr = NULL;
|
||||
scsi_host_put(host);
|
||||
}
|
||||
mutex_unlock(&qla_tgt_mutex);
|
||||
|
||||
rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
|
||||
if (rc != 0)
|
||||
scsi_host_put(host);
|
||||
|
||||
return rc;
|
||||
}
|
||||
mutex_unlock(&qla_tgt_mutex);
|
||||
|
@ -4314,7 +4307,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
|
|||
/*
|
||||
* Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
|
||||
*/
|
||||
ha->tgt.target_lport_ptr = NULL;
|
||||
vha->vha_tgt.target_lport_ptr = NULL;
|
||||
ha->tgt.tgt_ops = NULL;
|
||||
/*
|
||||
* Release the Scsi_Host reference for the underlying qla2xxx host
|
||||
|
@ -4376,8 +4369,9 @@ void
|
|||
qlt_enable_vha(struct scsi_qla_host *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
unsigned long flags;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
if (!tgt) {
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe069,
|
||||
|
@ -4392,9 +4386,14 @@ qlt_enable_vha(struct scsi_qla_host *vha)
|
|||
qlt_set_mode(vha);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_hba_online(vha);
|
||||
if (vha->vp_idx) {
|
||||
qla24xx_disable_vp(vha);
|
||||
qla24xx_enable_vp(vha);
|
||||
} else {
|
||||
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(base_vha);
|
||||
qla2x00_wait_for_hba_online(base_vha);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qlt_enable_vha);
|
||||
|
||||
|
@ -4407,7 +4406,7 @@ void
|
|||
qlt_disable_vha(struct scsi_qla_host *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt *tgt = ha->tgt.qla_tgt;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
unsigned long flags;
|
||||
|
||||
if (!tgt) {
|
||||
|
@ -4438,8 +4437,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
|
|||
if (!qla_tgt_mode_enabled(vha))
|
||||
return;
|
||||
|
||||
mutex_init(&ha->tgt.tgt_mutex);
|
||||
mutex_init(&ha->tgt.tgt_host_action_mutex);
|
||||
vha->vha_tgt.qla_tgt = NULL;
|
||||
|
||||
mutex_init(&vha->vha_tgt.tgt_mutex);
|
||||
mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
|
||||
|
||||
qlt_clear_mode(vha);
|
||||
|
||||
|
@ -4450,6 +4451,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
|
|||
* assigning the value appropriately.
|
||||
*/
|
||||
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
||||
|
||||
qlt_add_target(ha, vha);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -4768,8 +4771,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
|
|||
ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
|
||||
}
|
||||
|
||||
mutex_init(&ha->tgt.tgt_mutex);
|
||||
mutex_init(&ha->tgt.tgt_host_action_mutex);
|
||||
mutex_init(&base_vha->vha_tgt.tgt_mutex);
|
||||
mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
|
||||
qlt_clear_mode(base_vha);
|
||||
}
|
||||
|
||||
|
|
|
@ -932,8 +932,8 @@ void qlt_disable_vha(struct scsi_qla_host *);
|
|||
*/
|
||||
extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
|
||||
extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
|
||||
extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
|
||||
int (*callback)(struct scsi_qla_host *), void *);
|
||||
extern int qlt_lport_register(void *, u64, u64, u64,
|
||||
int (*callback)(struct scsi_qla_host *, void *, u64, u64));
|
||||
extern void qlt_lport_deregister(struct scsi_qla_host *);
|
||||
extern void qlt_unreg_sess(struct qla_tgt_sess *);
|
||||
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
|
||||
|
|
|
@ -53,16 +53,6 @@
|
|||
struct workqueue_struct *tcm_qla2xxx_free_wq;
|
||||
struct workqueue_struct *tcm_qla2xxx_cmd_wq;
|
||||
|
||||
static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse WWN.
|
||||
* If strict, we require lower-case hex and colon separators to be sure
|
||||
|
@ -174,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn(
|
|||
*wwnn = 0;
|
||||
|
||||
/* count may include a LF at end of string */
|
||||
if (name[cnt-1] == '\n')
|
||||
if (name[cnt-1] == '\n' || name[cnt-1] == 0)
|
||||
cnt--;
|
||||
|
||||
/* validate we have enough characters for WWPN */
|
||||
|
@ -777,6 +767,9 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess)
|
|||
|
||||
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
|
||||
{
|
||||
if (!sess)
|
||||
return;
|
||||
|
||||
assert_spin_locked(&sess->vha->hw->hardware_lock);
|
||||
kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
|
||||
}
|
||||
|
@ -957,7 +950,6 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
|
|||
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
|
||||
struct tcm_qla2xxx_lport, lport_wwn);
|
||||
struct scsi_qla_host *vha = lport->qla_vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
unsigned long op;
|
||||
|
@ -977,12 +969,12 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
|
|||
atomic_set(&tpg->lport_tpg_enabled, 1);
|
||||
qlt_enable_vha(vha);
|
||||
} else {
|
||||
if (!ha->tgt.qla_tgt) {
|
||||
pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
|
||||
if (!vha->vha_tgt.qla_tgt) {
|
||||
pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
atomic_set(&tpg->lport_tpg_enabled, 0);
|
||||
qlt_stop_phase1(ha->tgt.qla_tgt);
|
||||
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
|
||||
}
|
||||
|
||||
return count;
|
||||
|
@ -1011,7 +1003,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
|
|||
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!lport->qla_npiv_vp && (tpgt != 1)) {
|
||||
if ((tpgt != 1)) {
|
||||
pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
@ -1038,11 +1030,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
|
|||
kfree(tpg);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Setup local TPG=1 pointer for non NPIV mode.
|
||||
*/
|
||||
if (lport->qla_npiv_vp == NULL)
|
||||
lport->tpg_1 = tpg;
|
||||
|
||||
lport->tpg_1 = tpg;
|
||||
|
||||
return &tpg->se_tpg;
|
||||
}
|
||||
|
@ -1053,19 +1042,17 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
|
|||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
struct tcm_qla2xxx_lport *lport = tpg->lport;
|
||||
struct scsi_qla_host *vha = lport->qla_vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
/*
|
||||
* Call into qla2x_target.c LLD logic to shutdown the active
|
||||
* FC Nexuses and disable target mode operation for this qla_hw_data
|
||||
*/
|
||||
if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
|
||||
qlt_stop_phase1(ha->tgt.qla_tgt);
|
||||
if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
|
||||
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
|
||||
|
||||
core_tpg_deregister(se_tpg);
|
||||
/*
|
||||
* Clear local TPG=1 pointer for non NPIV mode.
|
||||
*/
|
||||
if (lport->qla_npiv_vp == NULL)
|
||||
lport->tpg_1 = NULL;
|
||||
|
||||
kfree(tpg);
|
||||
|
@ -1095,12 +1082,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
|
|||
tpg->lport = lport;
|
||||
tpg->lport_tpgt = tpgt;
|
||||
|
||||
/*
|
||||
* By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
|
||||
* NodeACLs
|
||||
*/
|
||||
tpg->tpg_attrib.generate_node_acls = 1;
|
||||
tpg->tpg_attrib.demo_mode_write_protect = 1;
|
||||
tpg->tpg_attrib.cache_dynamic_acls = 1;
|
||||
tpg->tpg_attrib.demo_mode_login_only = 1;
|
||||
|
||||
ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
|
||||
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
|
||||
if (ret < 0) {
|
||||
kfree(tpg);
|
||||
return NULL;
|
||||
}
|
||||
lport->tpg_1 = tpg;
|
||||
return &tpg->se_tpg;
|
||||
}
|
||||
|
||||
|
@ -1111,13 +1108,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
|
|||
scsi_qla_host_t *vha,
|
||||
const uint8_t *s_id)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct tcm_qla2xxx_lport *lport;
|
||||
struct se_node_acl *se_nacl;
|
||||
struct tcm_qla2xxx_nacl *nacl;
|
||||
u32 key;
|
||||
|
||||
lport = ha->tgt.target_lport_ptr;
|
||||
lport = vha->vha_tgt.target_lport_ptr;
|
||||
if (!lport) {
|
||||
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
|
||||
dump_stack();
|
||||
|
@ -1221,13 +1217,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
|
|||
scsi_qla_host_t *vha,
|
||||
const uint16_t loop_id)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct tcm_qla2xxx_lport *lport;
|
||||
struct se_node_acl *se_nacl;
|
||||
struct tcm_qla2xxx_nacl *nacl;
|
||||
struct tcm_qla2xxx_fc_loopid *fc_loopid;
|
||||
|
||||
lport = ha->tgt.target_lport_ptr;
|
||||
lport = vha->vha_tgt.target_lport_ptr;
|
||||
if (!lport) {
|
||||
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
|
||||
dump_stack();
|
||||
|
@ -1341,6 +1336,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
|
|||
{
|
||||
struct qla_tgt *tgt = sess->tgt;
|
||||
struct qla_hw_data *ha = tgt->ha;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
struct se_session *se_sess;
|
||||
struct se_node_acl *se_nacl;
|
||||
struct tcm_qla2xxx_lport *lport;
|
||||
|
@ -1357,7 +1353,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
|
|||
se_nacl = se_sess->se_node_acl;
|
||||
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
|
||||
|
||||
lport = ha->tgt.target_lport_ptr;
|
||||
lport = vha->vha_tgt.target_lport_ptr;
|
||||
if (!lport) {
|
||||
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
|
||||
dump_stack();
|
||||
|
@ -1391,7 +1387,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
|
|||
unsigned char port_name[36];
|
||||
unsigned long flags;
|
||||
|
||||
lport = ha->tgt.target_lport_ptr;
|
||||
lport = vha->vha_tgt.target_lport_ptr;
|
||||
if (!lport) {
|
||||
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
|
||||
dump_stack();
|
||||
|
@ -1455,7 +1451,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
|
|||
{
|
||||
struct qla_tgt *tgt = sess->tgt;
|
||||
struct qla_hw_data *ha = tgt->ha;
|
||||
struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
|
||||
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
|
||||
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
|
||||
struct tcm_qla2xxx_nacl, se_node_acl);
|
||||
|
@ -1562,15 +1559,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
|
||||
static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
|
||||
void *target_lport_ptr,
|
||||
u64 npiv_wwpn, u64 npiv_wwnn)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct tcm_qla2xxx_lport *lport;
|
||||
struct tcm_qla2xxx_lport *lport =
|
||||
(struct tcm_qla2xxx_lport *)target_lport_ptr;
|
||||
/*
|
||||
* Setup local pointer to vha, NPIV VP pointer (if present) and
|
||||
* vha->tcm_lport pointer
|
||||
* Setup tgt_ops, local pointer to vha and target_lport_ptr
|
||||
*/
|
||||
lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
|
||||
ha->tgt.tgt_ops = &tcm_qla2xxx_template;
|
||||
vha->vha_tgt.target_lport_ptr = target_lport_ptr;
|
||||
lport->qla_vha = vha;
|
||||
|
||||
return 0;
|
||||
|
@ -1602,8 +1602,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport(
|
|||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
|
||||
tcm_qla2xxx_lport_register_cb, lport);
|
||||
ret = qlt_lport_register(lport, wwpn, 0, 0,
|
||||
tcm_qla2xxx_lport_register_cb);
|
||||
if (ret != 0)
|
||||
goto out_lport;
|
||||
|
||||
|
@ -1621,7 +1621,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
|
|||
struct tcm_qla2xxx_lport *lport = container_of(wwn,
|
||||
struct tcm_qla2xxx_lport, lport_wwn);
|
||||
struct scsi_qla_host *vha = lport->qla_vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct se_node_acl *node;
|
||||
u32 key = 0;
|
||||
|
||||
|
@ -1630,8 +1629,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
|
|||
* shutdown of struct qla_tgt after the call to
|
||||
* qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
|
||||
*/
|
||||
if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
|
||||
qlt_stop_phase2(ha->tgt.qla_tgt);
|
||||
if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
|
||||
qlt_stop_phase2(vha->vha_tgt.qla_tgt);
|
||||
|
||||
qlt_lport_deregister(vha);
|
||||
|
||||
|
@ -1642,17 +1641,70 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
|
|||
kfree(lport);
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
|
||||
void *target_lport_ptr,
|
||||
u64 npiv_wwpn, u64 npiv_wwnn)
|
||||
{
|
||||
struct fc_vport *vport;
|
||||
struct Scsi_Host *sh = base_vha->host;
|
||||
struct scsi_qla_host *npiv_vha;
|
||||
struct tcm_qla2xxx_lport *lport =
|
||||
(struct tcm_qla2xxx_lport *)target_lport_ptr;
|
||||
struct fc_vport_identifiers vport_id;
|
||||
|
||||
if (!qla_tgt_mode_enabled(base_vha)) {
|
||||
pr_err("qla2xxx base_vha not enabled for target mode\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
memset(&vport_id, 0, sizeof(vport_id));
|
||||
vport_id.port_name = npiv_wwpn;
|
||||
vport_id.node_name = npiv_wwnn;
|
||||
vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
|
||||
vport_id.vport_type = FC_PORTTYPE_NPIV;
|
||||
vport_id.disable = false;
|
||||
|
||||
vport = fc_vport_create(sh, 0, &vport_id);
|
||||
if (!vport) {
|
||||
pr_err("fc_vport_create failed for qla2xxx_npiv\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
/*
|
||||
* Setup local pointer to NPIV vhba + target_lport_ptr
|
||||
*/
|
||||
npiv_vha = (struct scsi_qla_host *)vport->dd_data;
|
||||
npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
|
||||
lport->qla_vha = npiv_vha;
|
||||
|
||||
scsi_host_get(npiv_vha->host);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
|
||||
struct target_fabric_configfs *tf,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct tcm_qla2xxx_lport *lport;
|
||||
u64 npiv_wwpn, npiv_wwnn;
|
||||
u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
|
||||
char *p, tmp[128];
|
||||
int ret;
|
||||
|
||||
if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
|
||||
&npiv_wwpn, &npiv_wwnn) < 0)
|
||||
snprintf(tmp, 128, "%s", name);
|
||||
|
||||
p = strchr(tmp, '@');
|
||||
if (!p) {
|
||||
pr_err("Unable to locate NPIV '@' seperator\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
*p++ = '\0';
|
||||
|
||||
if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
|
||||
&npiv_wwpn, &npiv_wwnn) < 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
|
||||
|
@ -1666,12 +1718,19 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
|
|||
TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
|
||||
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
|
||||
|
||||
/* FIXME: tcm_qla2xxx_npiv_make_lport */
|
||||
ret = -ENOSYS;
|
||||
ret = tcm_qla2xxx_init_lport(lport);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
|
||||
tcm_qla2xxx_lport_register_npiv_cb);
|
||||
if (ret != 0)
|
||||
goto out_lport;
|
||||
|
||||
return &lport->lport_wwn;
|
||||
out_lport:
|
||||
vfree(lport->lport_loopid_map);
|
||||
btree_destroy32(&lport->lport_fcport_map);
|
||||
out:
|
||||
kfree(lport);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -1681,14 +1740,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
|
|||
{
|
||||
struct tcm_qla2xxx_lport *lport = container_of(wwn,
|
||||
struct tcm_qla2xxx_lport, lport_wwn);
|
||||
struct scsi_qla_host *vha = lport->qla_vha;
|
||||
struct Scsi_Host *sh = vha->host;
|
||||
/*
|
||||
* Notify libfc that we want to release the lport->npiv_vport
|
||||
*/
|
||||
fc_vport_terminate(lport->npiv_vport);
|
||||
struct scsi_qla_host *npiv_vha = lport->qla_vha;
|
||||
struct qla_hw_data *ha = npiv_vha->hw;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
scsi_host_put(sh);
|
||||
scsi_host_put(npiv_vha->host);
|
||||
/*
|
||||
* Notify libfc that we want to release the vha->fc_vport
|
||||
*/
|
||||
fc_vport_terminate(npiv_vha->fc_vport);
|
||||
scsi_host_put(base_vha->host);
|
||||
kfree(lport);
|
||||
}
|
||||
|
||||
|
@ -1769,14 +1830,16 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
|
|||
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
|
||||
.tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
|
||||
.tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
|
||||
.tpg_check_demo_mode = tcm_qla2xxx_check_false,
|
||||
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
|
||||
.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
|
||||
.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
|
||||
.tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
|
||||
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
|
||||
.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
|
||||
.tpg_check_prod_mode_write_protect =
|
||||
tcm_qla2xxx_check_prod_write_protect,
|
||||
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
|
||||
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
|
||||
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
|
||||
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
|
||||
.check_stop_free = tcm_qla2xxx_check_stop_free,
|
||||
.release_cmd = tcm_qla2xxx_release_cmd,
|
||||
.put_session = tcm_qla2xxx_put_session,
|
||||
.shutdown_session = tcm_qla2xxx_shutdown_session,
|
||||
|
@ -1871,7 +1934,8 @@ static int tcm_qla2xxx_register_configfs(void)
|
|||
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
|
||||
*/
|
||||
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
|
||||
tcm_qla2xxx_tpg_attrs;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
|
|
|
@ -70,12 +70,8 @@ struct tcm_qla2xxx_lport {
|
|||
struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
|
||||
/* Pointer to struct scsi_qla_host from qla2xxx LLD */
|
||||
struct scsi_qla_host *qla_vha;
|
||||
/* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
|
||||
struct scsi_qla_host *qla_npiv_vp;
|
||||
/* Pointer to struct qla_tgt pointer */
|
||||
struct qla_tgt lport_qla_tgt;
|
||||
/* Pointer to struct fc_vport for NPIV vport from libfc */
|
||||
struct fc_vport *npiv_vport;
|
||||
/* Pointer to TPG=1 for non NPIV mode */
|
||||
struct tcm_qla2xxx_tpg *tpg_1;
|
||||
/* Returned by tcm_qla2xxx_make_lport() */
|
||||
|
|
|
@ -3,6 +3,7 @@ menuconfig TARGET_CORE
|
|||
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
|
||||
depends on SCSI && BLOCK
|
||||
select CONFIGFS_FS
|
||||
select CRC_T10DIF
|
||||
default n
|
||||
help
|
||||
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
|
||||
|
@ -13,6 +14,7 @@ if TARGET_CORE
|
|||
|
||||
config TCM_IBLOCK
|
||||
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
|
||||
select BLK_DEV_INTEGRITY
|
||||
help
|
||||
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
|
||||
access to Linux/Block devices using BIO
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
static LIST_HEAD(g_tiqn_list);
|
||||
static LIST_HEAD(g_np_list);
|
||||
static DEFINE_SPINLOCK(tiqn_lock);
|
||||
static DEFINE_SPINLOCK(np_lock);
|
||||
static DEFINE_MUTEX(np_lock);
|
||||
|
||||
static struct idr tiqn_idr;
|
||||
struct idr sess_idr;
|
||||
|
@ -307,6 +307,9 @@ bool iscsit_check_np_match(
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with mutex np_lock held
|
||||
*/
|
||||
static struct iscsi_np *iscsit_get_np(
|
||||
struct __kernel_sockaddr_storage *sockaddr,
|
||||
int network_transport)
|
||||
|
@ -314,11 +317,10 @@ static struct iscsi_np *iscsit_get_np(
|
|||
struct iscsi_np *np;
|
||||
bool match;
|
||||
|
||||
spin_lock_bh(&np_lock);
|
||||
list_for_each_entry(np, &g_np_list, np_list) {
|
||||
spin_lock(&np->np_thread_lock);
|
||||
spin_lock_bh(&np->np_thread_lock);
|
||||
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
|
||||
spin_unlock(&np->np_thread_lock);
|
||||
spin_unlock_bh(&np->np_thread_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -330,13 +332,11 @@ static struct iscsi_np *iscsit_get_np(
|
|||
* while iscsi_tpg_add_network_portal() is called.
|
||||
*/
|
||||
np->np_exports++;
|
||||
spin_unlock(&np->np_thread_lock);
|
||||
spin_unlock_bh(&np_lock);
|
||||
spin_unlock_bh(&np->np_thread_lock);
|
||||
return np;
|
||||
}
|
||||
spin_unlock(&np->np_thread_lock);
|
||||
spin_unlock_bh(&np->np_thread_lock);
|
||||
}
|
||||
spin_unlock_bh(&np_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np(
|
|||
struct sockaddr_in6 *sock_in6;
|
||||
struct iscsi_np *np;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&np_lock);
|
||||
|
||||
/*
|
||||
* Locate the existing struct iscsi_np if already active..
|
||||
*/
|
||||
np = iscsit_get_np(sockaddr, network_transport);
|
||||
if (np)
|
||||
if (np) {
|
||||
mutex_unlock(&np_lock);
|
||||
return np;
|
||||
}
|
||||
|
||||
np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
|
||||
if (!np) {
|
||||
pr_err("Unable to allocate memory for struct iscsi_np\n");
|
||||
mutex_unlock(&np_lock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np(
|
|||
ret = iscsi_target_setup_login_socket(np, sockaddr);
|
||||
if (ret != 0) {
|
||||
kfree(np);
|
||||
mutex_unlock(&np_lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np(
|
|||
pr_err("Unable to create kthread: iscsi_np\n");
|
||||
ret = PTR_ERR(np->np_thread);
|
||||
kfree(np);
|
||||
mutex_unlock(&np_lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
/*
|
||||
|
@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np(
|
|||
* point because iscsi_np has not been added to g_np_list yet.
|
||||
*/
|
||||
np->np_exports = 1;
|
||||
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
|
||||
|
||||
spin_lock_bh(&np_lock);
|
||||
list_add_tail(&np->np_list, &g_np_list);
|
||||
spin_unlock_bh(&np_lock);
|
||||
mutex_unlock(&np_lock);
|
||||
|
||||
pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
|
||||
np->np_ip, np->np_port, np->np_transport->name);
|
||||
|
@ -470,9 +478,9 @@ int iscsit_del_np(struct iscsi_np *np)
|
|||
|
||||
np->np_transport->iscsit_free_np(np);
|
||||
|
||||
spin_lock_bh(&np_lock);
|
||||
mutex_lock(&np_lock);
|
||||
list_del(&np->np_list);
|
||||
spin_unlock_bh(&np_lock);
|
||||
mutex_unlock(&np_lock);
|
||||
|
||||
pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
|
||||
np->np_ip, np->np_port, np->np_transport->name);
|
||||
|
@ -622,7 +630,7 @@ static int iscsit_add_reject(
|
|||
{
|
||||
struct iscsi_cmd *cmd;
|
||||
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd)
|
||||
return -1;
|
||||
|
||||
|
@ -2475,7 +2483,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
|
|||
if (!conn_p)
|
||||
return;
|
||||
|
||||
cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
|
||||
cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
|
||||
if (!cmd) {
|
||||
iscsit_dec_conn_usage_count(conn_p);
|
||||
return;
|
||||
|
@ -3951,7 +3959,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
|
|||
|
||||
switch (hdr->opcode & ISCSI_OPCODE_MASK) {
|
||||
case ISCSI_OP_SCSI_CMD:
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd)
|
||||
goto reject;
|
||||
|
||||
|
@ -3963,28 +3971,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
|
|||
case ISCSI_OP_NOOP_OUT:
|
||||
cmd = NULL;
|
||||
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd)
|
||||
goto reject;
|
||||
}
|
||||
ret = iscsit_handle_nop_out(conn, cmd, buf);
|
||||
break;
|
||||
case ISCSI_OP_SCSI_TMFUNC:
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd)
|
||||
goto reject;
|
||||
|
||||
ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
|
||||
break;
|
||||
case ISCSI_OP_TEXT:
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd)
|
||||
goto reject;
|
||||
|
||||
ret = iscsit_handle_text_cmd(conn, cmd, buf);
|
||||
break;
|
||||
case ISCSI_OP_LOGOUT:
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd)
|
||||
goto reject;
|
||||
|
||||
|
|
|
@ -1192,7 +1192,7 @@ get_target:
|
|||
*/
|
||||
alloc_tags:
|
||||
tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
|
||||
tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
|
||||
tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
|
||||
tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
|
||||
|
||||
ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
|
||||
|
|
|
@ -152,13 +152,16 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
|
|||
* May be called from software interrupt (timer) context for allocating
|
||||
* iSCSI NopINs.
|
||||
*/
|
||||
struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
|
||||
struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
|
||||
{
|
||||
struct iscsi_cmd *cmd;
|
||||
struct se_session *se_sess = conn->sess->se_sess;
|
||||
int size, tag;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
|
||||
if (tag < 0)
|
||||
return NULL;
|
||||
|
||||
size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
|
||||
cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
|
||||
memset(cmd, 0, size);
|
||||
|
@ -926,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
|
|||
u8 state;
|
||||
struct iscsi_cmd *cmd;
|
||||
|
||||
cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
|
||||
if (!cmd)
|
||||
return -1;
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
|
|||
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
|
||||
extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
|
||||
extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
|
||||
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
|
||||
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
|
||||
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
|
||||
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
|
||||
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
|
||||
|
|
|
@ -217,7 +217,8 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
|||
scsi_bufflen(sc), tcm_loop_sam_attr(sc),
|
||||
sc->sc_data_direction, 0,
|
||||
scsi_sglist(sc), scsi_sg_count(sc),
|
||||
sgl_bidi, sgl_bidi_count);
|
||||
sgl_bidi, sgl_bidi_count,
|
||||
scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
|
||||
if (rc < 0) {
|
||||
set_host_byte(sc, DID_NO_CONNECT);
|
||||
goto out_done;
|
||||
|
@ -462,7 +463,7 @@ static int tcm_loop_driver_probe(struct device *dev)
|
|||
{
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct Scsi_Host *sh;
|
||||
int error;
|
||||
int error, host_prot;
|
||||
|
||||
tl_hba = to_tcm_loop_hba(dev);
|
||||
|
||||
|
@ -486,6 +487,13 @@ static int tcm_loop_driver_probe(struct device *dev)
|
|||
sh->max_channel = 0;
|
||||
sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
|
||||
|
||||
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
|
||||
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
|
||||
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
|
||||
|
||||
scsi_host_set_prot(sh, host_prot);
|
||||
scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
|
||||
|
||||
error = scsi_add_host(sh, &tl_hba->dev);
|
||||
if (error) {
|
||||
pr_err("%s: scsi_add_host failed\n", __func__);
|
||||
|
@ -1228,7 +1236,7 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
|
|||
|
||||
/* Start items for tcm_loop_naa_cit */
|
||||
|
||||
struct se_portal_group *tcm_loop_make_naa_tpg(
|
||||
static struct se_portal_group *tcm_loop_make_naa_tpg(
|
||||
struct se_wwn *wwn,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
|
@ -1273,7 +1281,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
|
|||
return &tl_tpg->tl_se_tpg;
|
||||
}
|
||||
|
||||
void tcm_loop_drop_naa_tpg(
|
||||
static void tcm_loop_drop_naa_tpg(
|
||||
struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
|
||||
|
@ -1305,7 +1313,7 @@ void tcm_loop_drop_naa_tpg(
|
|||
|
||||
/* Start items for tcm_loop_cit */
|
||||
|
||||
struct se_wwn *tcm_loop_make_scsi_hba(
|
||||
static struct se_wwn *tcm_loop_make_scsi_hba(
|
||||
struct target_fabric_configfs *tf,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
|
@ -1375,7 +1383,7 @@ out:
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void tcm_loop_drop_scsi_hba(
|
||||
static void tcm_loop_drop_scsi_hba(
|
||||
struct se_wwn *wwn)
|
||||
{
|
||||
struct tcm_loop_hba *tl_hba = container_of(wwn,
|
||||
|
|
|
@ -41,11 +41,14 @@
|
|||
#include "target_core_alua.h"
|
||||
#include "target_core_ua.h"
|
||||
|
||||
static sense_reason_t core_alua_check_transition(int state, int *primary);
|
||||
static sense_reason_t core_alua_check_transition(int state, int valid,
|
||||
int *primary);
|
||||
static int core_alua_set_tg_pt_secondary_state(
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
|
||||
struct se_port *port, int explicit, int offline);
|
||||
|
||||
static char *core_alua_dump_state(int state);
|
||||
|
||||
static u16 alua_lu_gps_counter;
|
||||
static u32 alua_lu_gps_count;
|
||||
|
||||
|
@ -54,6 +57,86 @@ static LIST_HEAD(lu_gps_list);
|
|||
|
||||
struct t10_alua_lu_gp *default_lu_gp;
|
||||
|
||||
/*
|
||||
* REPORT REFERRALS
|
||||
*
|
||||
* See sbc3r35 section 5.23
|
||||
*/
|
||||
sense_reason_t
|
||||
target_emulate_report_referrals(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct t10_alua_lba_map *map;
|
||||
struct t10_alua_lba_map_member *map_mem;
|
||||
unsigned char *buf;
|
||||
u32 rd_len = 0, off;
|
||||
|
||||
if (cmd->data_length < 4) {
|
||||
pr_warn("REPORT REFERRALS allocation length %u too"
|
||||
" small\n", cmd->data_length);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
off = 4;
|
||||
spin_lock(&dev->t10_alua.lba_map_lock);
|
||||
if (list_empty(&dev->t10_alua.lba_map_list)) {
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
list_for_each_entry(map, &dev->t10_alua.lba_map_list,
|
||||
lba_map_list) {
|
||||
int desc_num = off + 3;
|
||||
int pg_num;
|
||||
|
||||
off += 4;
|
||||
if (cmd->data_length > off)
|
||||
put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
|
||||
off += 8;
|
||||
if (cmd->data_length > off)
|
||||
put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
|
||||
off += 8;
|
||||
rd_len += 20;
|
||||
pg_num = 0;
|
||||
list_for_each_entry(map_mem, &map->lba_map_mem_list,
|
||||
lba_map_mem_list) {
|
||||
int alua_state = map_mem->lba_map_mem_alua_state;
|
||||
int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
|
||||
|
||||
if (cmd->data_length > off)
|
||||
buf[off] = alua_state & 0x0f;
|
||||
off += 2;
|
||||
if (cmd->data_length > off)
|
||||
buf[off] = (alua_pg_id >> 8) & 0xff;
|
||||
off++;
|
||||
if (cmd->data_length > off)
|
||||
buf[off] = (alua_pg_id & 0xff);
|
||||
off++;
|
||||
rd_len += 4;
|
||||
pg_num++;
|
||||
}
|
||||
if (cmd->data_length > desc_num)
|
||||
buf[desc_num] = pg_num;
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
|
||||
/*
|
||||
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
|
||||
*/
|
||||
put_unaligned_be16(rd_len, &buf[2]);
|
||||
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
target_complete_cmd(cmd, GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* REPORT_TARGET_PORT_GROUPS
|
||||
*
|
||||
|
@ -210,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|||
unsigned char *ptr;
|
||||
sense_reason_t rc = TCM_NO_SENSE;
|
||||
u32 len = 4; /* Skip over RESERVED area in header */
|
||||
int alua_access_state, primary = 0;
|
||||
int alua_access_state, primary = 0, valid_states;
|
||||
u16 tg_pt_id, rtpi;
|
||||
|
||||
if (!l_port)
|
||||
|
@ -252,6 +335,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|||
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
goto out;
|
||||
}
|
||||
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
|
||||
|
||||
ptr = &buf[4]; /* Skip over RESERVED area in header */
|
||||
|
||||
|
@ -263,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|||
* the state is a primary or secondary target port asymmetric
|
||||
* access state.
|
||||
*/
|
||||
rc = core_alua_check_transition(alua_access_state, &primary);
|
||||
rc = core_alua_check_transition(alua_access_state,
|
||||
valid_states, &primary);
|
||||
if (rc) {
|
||||
/*
|
||||
* If the SET TARGET PORT GROUPS attempts to establish
|
||||
|
@ -386,6 +471,81 @@ static inline int core_alua_state_nonoptimized(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int core_alua_state_lba_dependent(
|
||||
struct se_cmd *cmd,
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
u8 *alua_ascq)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
u64 segment_size, segment_mult, sectors, lba;
|
||||
|
||||
/* Only need to check for cdb actually containing LBAs */
|
||||
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
|
||||
return 0;
|
||||
|
||||
spin_lock(&dev->t10_alua.lba_map_lock);
|
||||
segment_size = dev->t10_alua.lba_map_segment_size;
|
||||
segment_mult = dev->t10_alua.lba_map_segment_multiplier;
|
||||
sectors = cmd->data_length / dev->dev_attrib.block_size;
|
||||
|
||||
lba = cmd->t_task_lba;
|
||||
while (lba < cmd->t_task_lba + sectors) {
|
||||
struct t10_alua_lba_map *cur_map = NULL, *map;
|
||||
struct t10_alua_lba_map_member *map_mem;
|
||||
|
||||
list_for_each_entry(map, &dev->t10_alua.lba_map_list,
|
||||
lba_map_list) {
|
||||
u64 start_lba, last_lba;
|
||||
u64 first_lba = map->lba_map_first_lba;
|
||||
|
||||
if (segment_mult) {
|
||||
u64 tmp = lba;
|
||||
start_lba = sector_div(tmp, segment_size * segment_mult);
|
||||
|
||||
last_lba = first_lba + segment_size - 1;
|
||||
if (start_lba >= first_lba &&
|
||||
start_lba <= last_lba) {
|
||||
lba += segment_size;
|
||||
cur_map = map;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
last_lba = map->lba_map_last_lba;
|
||||
if (lba >= first_lba && lba <= last_lba) {
|
||||
lba = last_lba + 1;
|
||||
cur_map = map;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!cur_map) {
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
|
||||
return 1;
|
||||
}
|
||||
list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
|
||||
lba_map_mem_list) {
|
||||
if (map_mem->lba_map_mem_alua_pg_id !=
|
||||
tg_pt_gp->tg_pt_gp_id)
|
||||
continue;
|
||||
switch(map_mem->lba_map_mem_alua_state) {
|
||||
case ALUA_ACCESS_STATE_STANDBY:
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
|
||||
return 1;
|
||||
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int core_alua_state_standby(
|
||||
struct se_cmd *cmd,
|
||||
unsigned char *cdb,
|
||||
|
@ -583,6 +743,9 @@ target_alua_state_check(struct se_cmd *cmd)
|
|||
case ALUA_ACCESS_STATE_TRANSITION:
|
||||
ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
|
||||
ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
|
||||
break;
|
||||
/*
|
||||
* OFFLINE is a secondary ALUA target port group access state, that is
|
||||
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
|
||||
|
@ -618,17 +781,36 @@ out:
|
|||
* Check implicit and explicit ALUA state change request.
|
||||
*/
|
||||
static sense_reason_t
|
||||
core_alua_check_transition(int state, int *primary)
|
||||
core_alua_check_transition(int state, int valid, int *primary)
|
||||
{
|
||||
/*
|
||||
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
|
||||
* defined as primary target port asymmetric access states.
|
||||
*/
|
||||
switch (state) {
|
||||
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
|
||||
if (!(valid & ALUA_AO_SUP))
|
||||
goto not_supported;
|
||||
*primary = 1;
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
||||
if (!(valid & ALUA_AN_SUP))
|
||||
goto not_supported;
|
||||
*primary = 1;
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_STANDBY:
|
||||
if (!(valid & ALUA_S_SUP))
|
||||
goto not_supported;
|
||||
*primary = 1;
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
||||
/*
|
||||
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
|
||||
* defined as primary target port asymmetric access states.
|
||||
*/
|
||||
if (!(valid & ALUA_U_SUP))
|
||||
goto not_supported;
|
||||
*primary = 1;
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
|
||||
if (!(valid & ALUA_LBD_SUP))
|
||||
goto not_supported;
|
||||
*primary = 1;
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_OFFLINE:
|
||||
|
@ -636,14 +818,27 @@ core_alua_check_transition(int state, int *primary)
|
|||
* OFFLINE state is defined as a secondary target port
|
||||
* asymmetric access state.
|
||||
*/
|
||||
if (!(valid & ALUA_O_SUP))
|
||||
goto not_supported;
|
||||
*primary = 0;
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_TRANSITION:
|
||||
/*
|
||||
* Transitioning is set internally, and
|
||||
* cannot be selected manually.
|
||||
*/
|
||||
goto not_supported;
|
||||
default:
|
||||
pr_err("Unknown ALUA access state: 0x%02x\n", state);
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
not_supported:
|
||||
pr_err("ALUA access state %s not supported",
|
||||
core_alua_dump_state(state));
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
}
|
||||
|
||||
static char *core_alua_dump_state(int state)
|
||||
|
@ -653,12 +848,16 @@ static char *core_alua_dump_state(int state)
|
|||
return "Active/Optimized";
|
||||
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
||||
return "Active/NonOptimized";
|
||||
case ALUA_ACCESS_STATE_LBA_DEPENDENT:
|
||||
return "LBA Dependent";
|
||||
case ALUA_ACCESS_STATE_STANDBY:
|
||||
return "Standby";
|
||||
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
||||
return "Unavailable";
|
||||
case ALUA_ACCESS_STATE_OFFLINE:
|
||||
return "Offline";
|
||||
case ALUA_ACCESS_STATE_TRANSITION:
|
||||
return "Transitioning";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
|
@ -735,58 +934,49 @@ static int core_alua_write_tpg_metadata(
|
|||
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
|
||||
*/
|
||||
static int core_alua_update_tpg_primary_metadata(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
int primary_state,
|
||||
unsigned char *md_buf)
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp)
|
||||
{
|
||||
unsigned char *md_buf;
|
||||
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
|
||||
char path[ALUA_METADATA_PATH_LEN];
|
||||
int len;
|
||||
int len, rc;
|
||||
|
||||
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
|
||||
if (!md_buf) {
|
||||
pr_err("Unable to allocate buf for ALUA metadata\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(path, 0, ALUA_METADATA_PATH_LEN);
|
||||
|
||||
len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
|
||||
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
|
||||
"tg_pt_gp_id=%hu\n"
|
||||
"alua_access_state=0x%02x\n"
|
||||
"alua_access_status=0x%02x\n",
|
||||
tg_pt_gp->tg_pt_gp_id, primary_state,
|
||||
tg_pt_gp->tg_pt_gp_id,
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state,
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status);
|
||||
|
||||
snprintf(path, ALUA_METADATA_PATH_LEN,
|
||||
"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
|
||||
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
|
||||
|
||||
return core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
kfree(md_buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int core_alua_do_transition_tg_pt(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
struct se_port *l_port,
|
||||
struct se_node_acl *nacl,
|
||||
unsigned char *md_buf,
|
||||
int new_state,
|
||||
int explicit)
|
||||
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
|
||||
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
struct se_dev_entry *se_deve;
|
||||
struct se_lun_acl *lacl;
|
||||
struct se_port *port;
|
||||
struct t10_alua_tg_pt_gp_member *mem;
|
||||
int old_state = 0;
|
||||
/*
|
||||
* Save the old primary ALUA access state, and set the current state
|
||||
* to ALUA_ACCESS_STATE_TRANSITION.
|
||||
*/
|
||||
old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_TRANSITION);
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
/*
|
||||
* Check for the optional ALUA primary state transition delay
|
||||
*/
|
||||
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
|
||||
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
|
||||
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
|
||||
|
||||
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
|
||||
list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
|
||||
|
@ -821,9 +1011,12 @@ static int core_alua_do_transition_tg_pt(
|
|||
if (!lacl)
|
||||
continue;
|
||||
|
||||
if (explicit &&
|
||||
(nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
|
||||
(l_port != NULL) && (l_port == port))
|
||||
if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
|
||||
(tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
|
||||
(tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
|
||||
(tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
|
||||
(tg_pt_gp->tg_pt_gp_alua_port == port))
|
||||
continue;
|
||||
|
||||
core_scsi3_ua_allocate(lacl->se_lun_nacl,
|
||||
|
@ -851,20 +1044,102 @@ static int core_alua_do_transition_tg_pt(
|
|||
*/
|
||||
if (tg_pt_gp->tg_pt_gp_write_metadata) {
|
||||
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
core_alua_update_tpg_primary_metadata(tg_pt_gp,
|
||||
new_state, md_buf);
|
||||
core_alua_update_tpg_primary_metadata(tg_pt_gp);
|
||||
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
}
|
||||
/*
|
||||
* Set the current primary ALUA access state to the requested new state
|
||||
*/
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state);
|
||||
|
||||
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
|
||||
" from primary access state %s to %s\n", (explicit) ? "explicit" :
|
||||
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
||||
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
|
||||
core_alua_dump_state(new_state));
|
||||
tg_pt_gp->tg_pt_gp_id,
|
||||
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
|
||||
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
if (tg_pt_gp->tg_pt_gp_transition_complete)
|
||||
complete(tg_pt_gp->tg_pt_gp_transition_complete);
|
||||
}
|
||||
|
||||
static int core_alua_do_transition_tg_pt(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
int new_state,
|
||||
int explicit)
|
||||
{
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
|
||||
/* Nothing to be done here */
|
||||
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
|
||||
return 0;
|
||||
|
||||
if (new_state == ALUA_ACCESS_STATE_TRANSITION)
|
||||
return -EAGAIN;
|
||||
|
||||
/*
|
||||
* Flush any pending transitions
|
||||
*/
|
||||
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
|
||||
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
|
||||
ALUA_ACCESS_STATE_TRANSITION) {
|
||||
/* Just in case */
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
|
||||
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
|
||||
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
||||
wait_for_completion(&wait);
|
||||
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the old primary ALUA access state, and set the current state
|
||||
* to ALUA_ACCESS_STATE_TRANSITION.
|
||||
*/
|
||||
tg_pt_gp->tg_pt_gp_alua_previous_state =
|
||||
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
|
||||
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_TRANSITION);
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
|
||||
/*
|
||||
* Check for the optional ALUA primary state transition delay
|
||||
*/
|
||||
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
|
||||
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
|
||||
|
||||
/*
|
||||
* Take a reference for workqueue item
|
||||
*/
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
|
||||
unsigned long transition_tmo;
|
||||
|
||||
transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
|
||||
queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
|
||||
&tg_pt_gp->tg_pt_gp_transition_work,
|
||||
transition_tmo);
|
||||
} else {
|
||||
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
|
||||
queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
|
||||
&tg_pt_gp->tg_pt_gp_transition_work, 0);
|
||||
wait_for_completion(&wait);
|
||||
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -878,23 +1153,15 @@ int core_alua_do_port_transition(
|
|||
int explicit)
|
||||
{
|
||||
struct se_device *dev;
|
||||
struct se_port *port;
|
||||
struct se_node_acl *nacl;
|
||||
struct t10_alua_lu_gp *lu_gp;
|
||||
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
unsigned char *md_buf;
|
||||
int primary;
|
||||
int primary, valid_states, rc = 0;
|
||||
|
||||
if (core_alua_check_transition(new_state, &primary) != 0)
|
||||
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
|
||||
if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
|
||||
if (!md_buf) {
|
||||
pr_err("Unable to allocate buf for ALUA metadata\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
|
||||
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
|
||||
lu_gp = local_lu_gp_mem->lu_gp;
|
||||
|
@ -911,12 +1178,13 @@ int core_alua_do_port_transition(
|
|||
* core_alua_do_transition_tg_pt() will always return
|
||||
* success.
|
||||
*/
|
||||
core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
|
||||
md_buf, new_state, explicit);
|
||||
l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
|
||||
l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
|
||||
rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
|
||||
new_state, explicit);
|
||||
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
kfree(md_buf);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
/*
|
||||
* For all other LU groups aside from 'default_lu_gp', walk all of
|
||||
|
@ -951,11 +1219,11 @@ int core_alua_do_port_transition(
|
|||
continue;
|
||||
|
||||
if (l_tg_pt_gp == tg_pt_gp) {
|
||||
port = l_port;
|
||||
nacl = l_nacl;
|
||||
tg_pt_gp->tg_pt_gp_alua_port = l_port;
|
||||
tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
|
||||
} else {
|
||||
port = NULL;
|
||||
nacl = NULL;
|
||||
tg_pt_gp->tg_pt_gp_alua_port = NULL;
|
||||
tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
|
||||
}
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
|
@ -964,12 +1232,14 @@ int core_alua_do_port_transition(
|
|||
* core_alua_do_transition_tg_pt() will always return
|
||||
* success.
|
||||
*/
|
||||
core_alua_do_transition_tg_pt(tg_pt_gp, port,
|
||||
nacl, md_buf, new_state, explicit);
|
||||
rc = core_alua_do_transition_tg_pt(tg_pt_gp,
|
||||
new_state, explicit);
|
||||
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
|
@ -979,16 +1249,18 @@ int core_alua_do_port_transition(
|
|||
}
|
||||
spin_unlock(&lu_gp->lu_gp_lock);
|
||||
|
||||
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
|
||||
" Group IDs: %hu %s transition to primary state: %s\n",
|
||||
config_item_name(&lu_gp->lu_gp_group.cg_item),
|
||||
l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
|
||||
core_alua_dump_state(new_state));
|
||||
if (!rc) {
|
||||
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
|
||||
" Group IDs: %hu %s transition to primary state: %s\n",
|
||||
config_item_name(&lu_gp->lu_gp_group.cg_item),
|
||||
l_tg_pt_gp->tg_pt_gp_id,
|
||||
(explicit) ? "explicit" : "implicit",
|
||||
core_alua_dump_state(new_state));
|
||||
}
|
||||
|
||||
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
kfree(md_buf);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -996,13 +1268,18 @@ int core_alua_do_port_transition(
|
|||
*/
|
||||
static int core_alua_update_tpg_secondary_metadata(
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
|
||||
struct se_port *port,
|
||||
unsigned char *md_buf,
|
||||
u32 md_buf_len)
|
||||
struct se_port *port)
|
||||
{
|
||||
unsigned char *md_buf;
|
||||
struct se_portal_group *se_tpg = port->sep_tpg;
|
||||
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
|
||||
int len;
|
||||
int len, rc;
|
||||
|
||||
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
|
||||
if (!md_buf) {
|
||||
pr_err("Unable to allocate buf for ALUA metadata\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(path, 0, ALUA_METADATA_PATH_LEN);
|
||||
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
|
||||
|
@ -1014,7 +1291,7 @@ static int core_alua_update_tpg_secondary_metadata(
|
|||
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
|
||||
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
|
||||
|
||||
len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
|
||||
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
|
||||
"alua_tg_pt_status=0x%02x\n",
|
||||
atomic_read(&port->sep_tg_pt_secondary_offline),
|
||||
port->sep_tg_pt_secondary_stat);
|
||||
|
@ -1023,7 +1300,10 @@ static int core_alua_update_tpg_secondary_metadata(
|
|||
se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
|
||||
port->sep_lun->unpacked_lun);
|
||||
|
||||
return core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
kfree(md_buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int core_alua_set_tg_pt_secondary_state(
|
||||
|
@ -1033,8 +1313,6 @@ static int core_alua_set_tg_pt_secondary_state(
|
|||
int offline)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
unsigned char *md_buf;
|
||||
u32 md_buf_len;
|
||||
int trans_delay_msecs;
|
||||
|
||||
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
|
@ -1055,7 +1333,6 @@ static int core_alua_set_tg_pt_secondary_state(
|
|||
else
|
||||
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
|
||||
|
||||
md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
|
||||
port->sep_tg_pt_secondary_stat = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
|
@ -1077,23 +1354,115 @@ static int core_alua_set_tg_pt_secondary_state(
|
|||
* secondary state and status
|
||||
*/
|
||||
if (port->sep_tg_pt_secondary_write_md) {
|
||||
md_buf = kzalloc(md_buf_len, GFP_KERNEL);
|
||||
if (!md_buf) {
|
||||
pr_err("Unable to allocate md_buf for"
|
||||
" secondary ALUA access metadata\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_lock(&port->sep_tg_pt_md_mutex);
|
||||
core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
|
||||
md_buf, md_buf_len);
|
||||
core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
|
||||
mutex_unlock(&port->sep_tg_pt_md_mutex);
|
||||
|
||||
kfree(md_buf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct t10_alua_lba_map *
|
||||
core_alua_allocate_lba_map(struct list_head *list,
|
||||
u64 first_lba, u64 last_lba)
|
||||
{
|
||||
struct t10_alua_lba_map *lba_map;
|
||||
|
||||
lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
|
||||
if (!lba_map) {
|
||||
pr_err("Unable to allocate struct t10_alua_lba_map\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
|
||||
lba_map->lba_map_first_lba = first_lba;
|
||||
lba_map->lba_map_last_lba = last_lba;
|
||||
|
||||
list_add_tail(&lba_map->lba_map_list, list);
|
||||
return lba_map;
|
||||
}
|
||||
|
||||
int
|
||||
core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
|
||||
int pg_id, int state)
|
||||
{
|
||||
struct t10_alua_lba_map_member *lba_map_mem;
|
||||
|
||||
list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
|
||||
lba_map_mem_list) {
|
||||
if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
|
||||
pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
|
||||
if (!lba_map_mem) {
|
||||
pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
lba_map_mem->lba_map_mem_alua_state = state;
|
||||
lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
|
||||
|
||||
list_add_tail(&lba_map_mem->lba_map_mem_list,
|
||||
&lba_map->lba_map_mem_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
core_alua_free_lba_map(struct list_head *lba_list)
|
||||
{
|
||||
struct t10_alua_lba_map *lba_map, *lba_map_tmp;
|
||||
struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
|
||||
|
||||
list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
|
||||
lba_map_list) {
|
||||
list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
|
||||
&lba_map->lba_map_mem_list,
|
||||
lba_map_mem_list) {
|
||||
list_del(&lba_map_mem->lba_map_mem_list);
|
||||
kmem_cache_free(t10_alua_lba_map_mem_cache,
|
||||
lba_map_mem);
|
||||
}
|
||||
list_del(&lba_map->lba_map_list);
|
||||
kmem_cache_free(t10_alua_lba_map_cache, lba_map);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
|
||||
int segment_size, int segment_mult)
|
||||
{
|
||||
struct list_head old_lba_map_list;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
int activate = 0, supported;
|
||||
|
||||
INIT_LIST_HEAD(&old_lba_map_list);
|
||||
spin_lock(&dev->t10_alua.lba_map_lock);
|
||||
dev->t10_alua.lba_map_segment_size = segment_size;
|
||||
dev->t10_alua.lba_map_segment_multiplier = segment_mult;
|
||||
list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
|
||||
if (lba_map_list) {
|
||||
list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
|
||||
activate = 1;
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
|
||||
tg_pt_gp_list) {
|
||||
|
||||
if (!tg_pt_gp->tg_pt_gp_valid_id)
|
||||
continue;
|
||||
supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
|
||||
if (activate)
|
||||
supported |= ALUA_LBD_SUP;
|
||||
else
|
||||
supported &= ~ALUA_LBD_SUP;
|
||||
tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
core_alua_free_lba_map(&old_lba_map_list);
|
||||
}
|
||||
|
||||
struct t10_alua_lu_gp *
|
||||
core_alua_allocate_lu_gp(const char *name, int def_group)
|
||||
{
|
||||
|
@ -1346,8 +1715,9 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
|
|||
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
|
||||
INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
|
||||
core_alua_do_transition_tg_pt_work);
|
||||
tg_pt_gp->tg_pt_gp_dev = dev;
|
||||
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
|
||||
/*
|
||||
|
@ -1475,6 +1845,8 @@ void core_alua_free_tg_pt_gp(
|
|||
dev->t10_alua.alua_tg_pt_gps_counter--;
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
||||
|
||||
/*
|
||||
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
|
||||
* core_alua_get_tg_pt_gp_by_name() in
|
||||
|
|
|
@ -13,12 +13,13 @@
|
|||
/*
|
||||
* ASYMMETRIC ACCESS STATE field
|
||||
*
|
||||
* from spc4r17 section 6.27 Table 245
|
||||
* from spc4r36j section 6.37 Table 307
|
||||
*/
|
||||
#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
|
||||
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
|
||||
#define ALUA_ACCESS_STATE_STANDBY 0x2
|
||||
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
|
||||
#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
|
||||
#define ALUA_ACCESS_STATE_OFFLINE 0xe
|
||||
#define ALUA_ACCESS_STATE_TRANSITION 0xf
|
||||
|
||||
|
@ -78,18 +79,30 @@
|
|||
*/
|
||||
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
|
||||
|
||||
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
|
||||
#define ALUA_MD_BUF_LEN 1024
|
||||
|
||||
extern struct kmem_cache *t10_alua_lu_gp_cache;
|
||||
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
|
||||
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
|
||||
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
|
||||
extern struct kmem_cache *t10_alua_lba_map_cache;
|
||||
extern struct kmem_cache *t10_alua_lba_map_mem_cache;
|
||||
|
||||
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
|
||||
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
|
||||
extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
|
||||
extern int core_alua_check_nonop_delay(struct se_cmd *);
|
||||
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
|
||||
struct se_device *, struct se_port *,
|
||||
struct se_node_acl *, int, int);
|
||||
extern char *core_alua_dump_status(int);
|
||||
extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
|
||||
struct list_head *, u64, u64);
|
||||
extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
|
||||
extern void core_alua_free_lba_map(struct list_head *);
|
||||
extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
|
||||
int, int);
|
||||
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
|
||||
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
|
||||
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
|
||||
|
|
|
@ -643,6 +643,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
|
|||
DEF_DEV_ATTRIB(emulate_3pc);
|
||||
SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(pi_prot_type);
|
||||
SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
|
||||
SE_DEV_ATTR_RO(hw_pi_prot_type);
|
||||
|
||||
DEF_DEV_ATTRIB(pi_prot_format);
|
||||
SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(enforce_pr_isids);
|
||||
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
|
||||
|
||||
|
@ -702,6 +711,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
|
|||
&target_core_dev_attrib_emulate_tpws.attr,
|
||||
&target_core_dev_attrib_emulate_caw.attr,
|
||||
&target_core_dev_attrib_emulate_3pc.attr,
|
||||
&target_core_dev_attrib_pi_prot_type.attr,
|
||||
&target_core_dev_attrib_hw_pi_prot_type.attr,
|
||||
&target_core_dev_attrib_pi_prot_format.attr,
|
||||
&target_core_dev_attrib_enforce_pr_isids.attr,
|
||||
&target_core_dev_attrib_is_nonrot.attr,
|
||||
&target_core_dev_attrib_emulate_rest_reord.attr,
|
||||
|
@ -1741,6 +1753,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
|
|||
.store = target_core_store_alua_lu_gp,
|
||||
};
|
||||
|
||||
static ssize_t target_core_show_dev_lba_map(void *p, char *page)
|
||||
{
|
||||
struct se_device *dev = p;
|
||||
struct t10_alua_lba_map *map;
|
||||
struct t10_alua_lba_map_member *mem;
|
||||
char *b = page;
|
||||
int bl = 0;
|
||||
char state;
|
||||
|
||||
spin_lock(&dev->t10_alua.lba_map_lock);
|
||||
if (!list_empty(&dev->t10_alua.lba_map_list))
|
||||
bl += sprintf(b + bl, "%u %u\n",
|
||||
dev->t10_alua.lba_map_segment_size,
|
||||
dev->t10_alua.lba_map_segment_multiplier);
|
||||
list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
|
||||
bl += sprintf(b + bl, "%llu %llu",
|
||||
map->lba_map_first_lba, map->lba_map_last_lba);
|
||||
list_for_each_entry(mem, &map->lba_map_mem_list,
|
||||
lba_map_mem_list) {
|
||||
switch (mem->lba_map_mem_alua_state) {
|
||||
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
|
||||
state = 'O';
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
||||
state = 'A';
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_STANDBY:
|
||||
state = 'S';
|
||||
break;
|
||||
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
||||
state = 'U';
|
||||
break;
|
||||
default:
|
||||
state = '.';
|
||||
break;
|
||||
}
|
||||
bl += sprintf(b + bl, " %d:%c",
|
||||
mem->lba_map_mem_alua_pg_id, state);
|
||||
}
|
||||
bl += sprintf(b + bl, "\n");
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
return bl;
|
||||
}
|
||||
|
||||
static ssize_t target_core_store_dev_lba_map(
|
||||
void *p,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct se_device *dev = p;
|
||||
struct t10_alua_lba_map *lba_map = NULL;
|
||||
struct list_head lba_list;
|
||||
char *map_entries, *ptr;
|
||||
char state;
|
||||
int pg_num = -1, pg;
|
||||
int ret = 0, num = 0, pg_id, alua_state;
|
||||
unsigned long start_lba = -1, end_lba = -1;
|
||||
unsigned long segment_size = -1, segment_mult = -1;
|
||||
|
||||
map_entries = kstrdup(page, GFP_KERNEL);
|
||||
if (!map_entries)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&lba_list);
|
||||
while ((ptr = strsep(&map_entries, "\n")) != NULL) {
|
||||
if (!*ptr)
|
||||
continue;
|
||||
|
||||
if (num == 0) {
|
||||
if (sscanf(ptr, "%lu %lu\n",
|
||||
&segment_size, &segment_mult) != 2) {
|
||||
pr_err("Invalid line %d\n", num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
num++;
|
||||
continue;
|
||||
}
|
||||
if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
|
||||
pr_err("Invalid line %d\n", num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ptr = strchr(ptr, ' ');
|
||||
if (!ptr) {
|
||||
pr_err("Invalid line %d, missing end lba\n", num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ptr++;
|
||||
ptr = strchr(ptr, ' ');
|
||||
if (!ptr) {
|
||||
pr_err("Invalid line %d, missing state definitions\n",
|
||||
num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ptr++;
|
||||
lba_map = core_alua_allocate_lba_map(&lba_list,
|
||||
start_lba, end_lba);
|
||||
if (IS_ERR(lba_map)) {
|
||||
ret = PTR_ERR(lba_map);
|
||||
break;
|
||||
}
|
||||
pg = 0;
|
||||
while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
|
||||
switch (state) {
|
||||
case 'O':
|
||||
alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
|
||||
break;
|
||||
case 'A':
|
||||
alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
|
||||
break;
|
||||
case 'S':
|
||||
alua_state = ALUA_ACCESS_STATE_STANDBY;
|
||||
break;
|
||||
case 'U':
|
||||
alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid ALUA state '%c'\n", state);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = core_alua_allocate_lba_map_mem(lba_map,
|
||||
pg_id, alua_state);
|
||||
if (ret) {
|
||||
pr_err("Invalid target descriptor %d:%c "
|
||||
"at line %d\n",
|
||||
pg_id, state, num);
|
||||
break;
|
||||
}
|
||||
pg++;
|
||||
ptr = strchr(ptr, ' ');
|
||||
if (ptr)
|
||||
ptr++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
if (pg_num == -1)
|
||||
pg_num = pg;
|
||||
else if (pg != pg_num) {
|
||||
pr_err("Only %d from %d port groups definitions "
|
||||
"at line %d\n", pg, pg_num, num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
num++;
|
||||
}
|
||||
out:
|
||||
if (ret) {
|
||||
core_alua_free_lba_map(&lba_list);
|
||||
count = ret;
|
||||
} else
|
||||
core_alua_set_lba_map(dev, &lba_list,
|
||||
segment_size, segment_mult);
|
||||
kfree(map_entries);
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "lba_map",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = target_core_show_dev_lba_map,
|
||||
.store = target_core_store_dev_lba_map,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *lio_core_dev_attrs[] = {
|
||||
&target_core_attr_dev_info.attr,
|
||||
&target_core_attr_dev_control.attr,
|
||||
|
@ -1748,6 +1930,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
|
|||
&target_core_attr_dev_udev_path.attr,
|
||||
&target_core_attr_dev_enable.attr,
|
||||
&target_core_attr_dev_alua_lu_gp.attr,
|
||||
&target_core_attr_dev_lba_map.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -2054,6 +2237,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
|
|||
" transition while TPGS_IMPLICIT_ALUA is disabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
|
||||
new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
|
||||
/* LBA DEPENDENT is only allowed with implicit ALUA */
|
||||
pr_err("Unable to process implicit configfs ALUA transition"
|
||||
" while explicit ALUA management is enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = core_alua_do_port_transition(tg_pt_gp, dev,
|
||||
NULL, NULL, new_state, 0);
|
||||
|
@ -2188,7 +2378,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
|
|||
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
|
||||
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
|
||||
tg_pt_gp_alua_supported_states, ALUA_U_SUP);
|
||||
|
@ -2937,7 +3127,7 @@ static int __init target_core_init_configfs(void)
|
|||
* and ALUA Logical Unit Group and Target Port Group infrastructure.
|
||||
*/
|
||||
target_cg = &subsys->su_group;
|
||||
target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
|
||||
target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
|
||||
GFP_KERNEL);
|
||||
if (!target_cg->default_groups) {
|
||||
pr_err("Unable to allocate target_cg->default_groups\n");
|
||||
|
|
|
@ -918,6 +918,90 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
|
||||
{
|
||||
int rc, old_prot = dev->dev_attrib.pi_prot_type;
|
||||
|
||||
if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
|
||||
pr_err("Illegal value %d for pi_prot_type\n", flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (flag == 2) {
|
||||
pr_err("DIF TYPE2 protection currently not supported\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
if (dev->dev_attrib.hw_pi_prot_type) {
|
||||
pr_warn("DIF protection enabled on underlying hardware,"
|
||||
" ignoring\n");
|
||||
return 0;
|
||||
}
|
||||
if (!dev->transport->init_prot || !dev->transport->free_prot) {
|
||||
pr_err("DIF protection not supported by backend: %s\n",
|
||||
dev->transport->name);
|
||||
return -ENOSYS;
|
||||
}
|
||||
if (!(dev->dev_flags & DF_CONFIGURED)) {
|
||||
pr_err("DIF protection requires device to be configured\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (dev->export_count) {
|
||||
pr_err("dev[%p]: Unable to change SE Device PROT type while"
|
||||
" export_count is %d\n", dev, dev->export_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->dev_attrib.pi_prot_type = flag;
|
||||
|
||||
if (flag && !old_prot) {
|
||||
rc = dev->transport->init_prot(dev);
|
||||
if (rc) {
|
||||
dev->dev_attrib.pi_prot_type = old_prot;
|
||||
return rc;
|
||||
}
|
||||
|
||||
} else if (!flag && old_prot) {
|
||||
dev->transport->free_prot(dev);
|
||||
}
|
||||
pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!flag)
|
||||
return 0;
|
||||
|
||||
if (flag != 1) {
|
||||
pr_err("Illegal value %d for pi_prot_format\n", flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!dev->transport->format_prot) {
|
||||
pr_err("DIF protection format not supported by backend %s\n",
|
||||
dev->transport->name);
|
||||
return -ENOSYS;
|
||||
}
|
||||
if (!(dev->dev_flags & DF_CONFIGURED)) {
|
||||
pr_err("DIF protection format requires device to be configured\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (dev->export_count) {
|
||||
pr_err("dev[%p]: Unable to format SE Device PROT type while"
|
||||
" export_count is %d\n", dev, dev->export_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = dev->transport->format_prot(dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
|
||||
{
|
||||
if ((flag != 0) && (flag != 1)) {
|
||||
|
@ -1117,23 +1201,23 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
|
|||
struct se_lun *core_dev_add_lun(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_device *dev,
|
||||
u32 lun)
|
||||
u32 unpacked_lun)
|
||||
{
|
||||
struct se_lun *lun_p;
|
||||
struct se_lun *lun;
|
||||
int rc;
|
||||
|
||||
lun_p = core_tpg_pre_addlun(tpg, lun);
|
||||
if (IS_ERR(lun_p))
|
||||
return lun_p;
|
||||
lun = core_tpg_alloc_lun(tpg, unpacked_lun);
|
||||
if (IS_ERR(lun))
|
||||
return lun;
|
||||
|
||||
rc = core_tpg_post_addlun(tpg, lun_p,
|
||||
rc = core_tpg_add_lun(tpg, lun,
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
|
||||
if (rc < 0)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
|
||||
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
|
||||
tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
|
||||
/*
|
||||
* Update LUN maps for dynamically added initiators when
|
||||
|
@ -1154,7 +1238,7 @@ struct se_lun *core_dev_add_lun(
|
|||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
}
|
||||
|
||||
return lun_p;
|
||||
return lun;
|
||||
}
|
||||
|
||||
/* core_dev_del_lun():
|
||||
|
@ -1420,6 +1504,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
|
||||
dev->se_hba = hba;
|
||||
dev->transport = hba->transport;
|
||||
dev->prot_length = sizeof(struct se_dif_v1_tuple);
|
||||
|
||||
INIT_LIST_HEAD(&dev->dev_list);
|
||||
INIT_LIST_HEAD(&dev->dev_sep_list);
|
||||
|
@ -1444,6 +1529,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
|
||||
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
|
||||
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
|
||||
INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
|
||||
spin_lock_init(&dev->t10_alua.lba_map_lock);
|
||||
|
||||
dev->t10_wwn.t10_dev = dev;
|
||||
dev->t10_alua.t10_dev = dev;
|
||||
|
@ -1460,6 +1547,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
|
||||
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
|
||||
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
|
||||
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
|
||||
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
|
||||
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
|
||||
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
|
||||
|
@ -1588,9 +1676,13 @@ void target_free_device(struct se_device *dev)
|
|||
}
|
||||
|
||||
core_alua_free_lu_gp_mem(dev);
|
||||
core_alua_set_lba_map(dev, NULL, 0, 0);
|
||||
core_scsi3_free_all_registrations(dev);
|
||||
se_release_vpd_for_dev(dev);
|
||||
|
||||
if (dev->transport->free_prot)
|
||||
dev->transport->free_prot(dev);
|
||||
|
||||
dev->transport->free_device(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun(
|
|||
lun_cg->default_groups[1] = NULL;
|
||||
|
||||
port_stat_grp = &lun->port_stat_grps.stat_group;
|
||||
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
|
||||
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4,
|
||||
GFP_KERNEL);
|
||||
if (!port_stat_grp->default_groups) {
|
||||
pr_err("Unable to allocate port_stat_grp->default_groups\n");
|
||||
|
|
|
@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev)
|
|||
kfree(fd_dev);
|
||||
}
|
||||
|
||||
static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
|
||||
int is_write)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct fd_dev *dev = FD_DEV(se_dev);
|
||||
struct file *prot_fd = dev->fd_prot_file;
|
||||
struct scatterlist *sg;
|
||||
loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
|
||||
unsigned char *buf;
|
||||
u32 prot_size, len, size;
|
||||
int rc, ret = 1, i;
|
||||
|
||||
prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
|
||||
se_dev->prot_length;
|
||||
|
||||
if (!is_write) {
|
||||
fd_prot->prot_buf = vzalloc(prot_size);
|
||||
if (!fd_prot->prot_buf) {
|
||||
pr_err("Unable to allocate fd_prot->prot_buf\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
buf = fd_prot->prot_buf;
|
||||
|
||||
fd_prot->prot_sg_nents = cmd->t_prot_nents;
|
||||
fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
|
||||
fd_prot->prot_sg_nents, GFP_KERNEL);
|
||||
if (!fd_prot->prot_sg) {
|
||||
pr_err("Unable to allocate fd_prot->prot_sg\n");
|
||||
vfree(fd_prot->prot_buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
size = prot_size;
|
||||
|
||||
for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
|
||||
|
||||
len = min_t(u32, PAGE_SIZE, size);
|
||||
sg_set_buf(sg, buf, len);
|
||||
size -= len;
|
||||
buf += len;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_write) {
|
||||
rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
|
||||
if (rc < 0 || prot_size != rc) {
|
||||
pr_err("kernel_write() for fd_do_prot_rw failed:"
|
||||
" %d\n", rc);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
|
||||
if (rc < 0) {
|
||||
pr_err("kernel_read() for fd_do_prot_rw failed:"
|
||||
" %d\n", rc);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_write || ret < 0) {
|
||||
kfree(fd_prot->prot_sg);
|
||||
vfree(fd_prot->prot_buf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
|
||||
u32 sgl_nents, int is_write)
|
||||
{
|
||||
|
@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
enum dma_data_direction data_direction)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct fd_prot fd_prot;
|
||||
sense_reason_t rc;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -558,8 +626,48 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
* physical memory addresses to struct iovec virtual memory.
|
||||
*/
|
||||
if (data_direction == DMA_FROM_DEVICE) {
|
||||
memset(&fd_prot, 0, sizeof(struct fd_prot));
|
||||
|
||||
if (cmd->prot_type) {
|
||||
ret = fd_do_prot_rw(cmd, &fd_prot, false);
|
||||
if (ret < 0)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
|
||||
|
||||
if (ret > 0 && cmd->prot_type) {
|
||||
u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
|
||||
|
||||
rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
|
||||
0, fd_prot.prot_sg, 0);
|
||||
if (rc) {
|
||||
kfree(fd_prot.prot_sg);
|
||||
vfree(fd_prot.prot_buf);
|
||||
return rc;
|
||||
}
|
||||
kfree(fd_prot.prot_sg);
|
||||
vfree(fd_prot.prot_buf);
|
||||
}
|
||||
} else {
|
||||
memset(&fd_prot, 0, sizeof(struct fd_prot));
|
||||
|
||||
if (cmd->prot_type) {
|
||||
u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
|
||||
|
||||
ret = fd_do_prot_rw(cmd, &fd_prot, false);
|
||||
if (ret < 0)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
|
||||
0, fd_prot.prot_sg, 0);
|
||||
if (rc) {
|
||||
kfree(fd_prot.prot_sg);
|
||||
vfree(fd_prot.prot_buf);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
|
||||
/*
|
||||
* Perform implicit vfs_fsync_range() for fd_do_writev() ops
|
||||
|
@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
|
||||
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
|
||||
}
|
||||
|
||||
if (ret > 0 && cmd->prot_type) {
|
||||
ret = fd_do_prot_rw(cmd, &fd_prot, true);
|
||||
if (ret < 0)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
kfree(fd_prot.prot_sg);
|
||||
vfree(fd_prot.prot_buf);
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
|
@ -700,6 +817,140 @@ static sector_t fd_get_blocks(struct se_device *dev)
|
|||
dev->dev_attrib.block_size);
|
||||
}
|
||||
|
||||
static int fd_init_prot(struct se_device *dev)
|
||||
{
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
struct file *prot_file, *file = fd_dev->fd_file;
|
||||
struct inode *inode;
|
||||
int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
|
||||
char buf[FD_MAX_DEV_PROT_NAME];
|
||||
|
||||
if (!file) {
|
||||
pr_err("Unable to locate fd_dev->fd_file\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
inode = file->f_mapping->host;
|
||||
if (S_ISBLK(inode->i_mode)) {
|
||||
pr_err("FILEIO Protection emulation only supported on"
|
||||
" !S_ISBLK\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
|
||||
flags &= ~O_DSYNC;
|
||||
|
||||
snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
|
||||
fd_dev->fd_dev_name);
|
||||
|
||||
prot_file = filp_open(buf, flags, 0600);
|
||||
if (IS_ERR(prot_file)) {
|
||||
pr_err("filp_open(%s) failed\n", buf);
|
||||
ret = PTR_ERR(prot_file);
|
||||
return ret;
|
||||
}
|
||||
fd_dev->fd_prot_file = prot_file;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
|
||||
u32 unit_size, u32 *ref_tag, u16 app_tag,
|
||||
bool inc_reftag)
|
||||
{
|
||||
unsigned char *p = buf;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < unit_size; i += dev->prot_length) {
|
||||
*((u16 *)&p[0]) = 0xffff;
|
||||
*((__be16 *)&p[2]) = cpu_to_be16(app_tag);
|
||||
*((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
|
||||
|
||||
if (inc_reftag)
|
||||
(*ref_tag)++;
|
||||
|
||||
p += dev->prot_length;
|
||||
}
|
||||
}
|
||||
|
||||
static int fd_format_prot(struct se_device *dev)
|
||||
{
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
struct file *prot_fd = fd_dev->fd_prot_file;
|
||||
sector_t prot_length, prot;
|
||||
unsigned char *buf;
|
||||
loff_t pos = 0;
|
||||
u32 ref_tag = 0;
|
||||
int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
|
||||
int rc, ret = 0, size, len;
|
||||
bool inc_reftag = false;
|
||||
|
||||
if (!dev->dev_attrib.pi_prot_type) {
|
||||
pr_err("Unable to format_prot while pi_prot_type == 0\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!prot_fd) {
|
||||
pr_err("Unable to locate fd_dev->fd_prot_file\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
switch (dev->dev_attrib.pi_prot_type) {
|
||||
case TARGET_DIF_TYPE3_PROT:
|
||||
ref_tag = 0xffffffff;
|
||||
break;
|
||||
case TARGET_DIF_TYPE2_PROT:
|
||||
case TARGET_DIF_TYPE1_PROT:
|
||||
inc_reftag = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
buf = vzalloc(unit_size);
|
||||
if (!buf) {
|
||||
pr_err("Unable to allocate FILEIO prot buf\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
|
||||
size = prot_length;
|
||||
|
||||
pr_debug("Using FILEIO prot_length: %llu\n",
|
||||
(unsigned long long)prot_length);
|
||||
|
||||
for (prot = 0; prot < prot_length; prot += unit_size) {
|
||||
|
||||
fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
|
||||
inc_reftag);
|
||||
|
||||
len = min(unit_size, size);
|
||||
|
||||
rc = kernel_write(prot_fd, buf, len, pos);
|
||||
if (rc != len) {
|
||||
pr_err("vfs_write to prot file failed: %d\n", rc);
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
pos += len;
|
||||
size -= len;
|
||||
}
|
||||
|
||||
out:
|
||||
vfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fd_free_prot(struct se_device *dev)
|
||||
{
|
||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||
|
||||
if (!fd_dev->fd_prot_file)
|
||||
return;
|
||||
|
||||
filp_close(fd_dev->fd_prot_file, NULL);
|
||||
fd_dev->fd_prot_file = NULL;
|
||||
}
|
||||
|
||||
static struct sbc_ops fd_sbc_ops = {
|
||||
.execute_rw = fd_execute_rw,
|
||||
.execute_sync_cache = fd_execute_sync_cache,
|
||||
|
@ -730,6 +981,9 @@ static struct se_subsystem_api fileio_template = {
|
|||
.show_configfs_dev_params = fd_show_configfs_dev_params,
|
||||
.get_device_type = sbc_get_device_type,
|
||||
.get_blocks = fd_get_blocks,
|
||||
.init_prot = fd_init_prot,
|
||||
.format_prot = fd_format_prot,
|
||||
.free_prot = fd_free_prot,
|
||||
};
|
||||
|
||||
static int __init fileio_module_init(void)
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#define FD_VERSION "4.0"
|
||||
|
||||
#define FD_MAX_DEV_NAME 256
|
||||
#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
|
||||
#define FD_DEVICE_QUEUE_DEPTH 32
|
||||
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
|
||||
#define FD_BLOCKSIZE 512
|
||||
|
@ -18,6 +19,13 @@
|
|||
#define FBDF_HAS_PATH 0x01
|
||||
#define FBDF_HAS_SIZE 0x02
|
||||
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
|
||||
#define FDBD_FORMAT_UNIT_SIZE 2048
|
||||
|
||||
struct fd_prot {
|
||||
unsigned char *prot_buf;
|
||||
struct scatterlist *prot_sg;
|
||||
u32 prot_sg_nents;
|
||||
};
|
||||
|
||||
struct fd_dev {
|
||||
struct se_device dev;
|
||||
|
@ -32,6 +40,7 @@ struct fd_dev {
|
|||
u32 fd_block_size;
|
||||
unsigned long long fd_dev_size;
|
||||
struct file *fd_file;
|
||||
struct file *fd_prot_file;
|
||||
/* FILEIO HBA device is connected to */
|
||||
struct fd_host *fd_host;
|
||||
} ____cacheline_aligned;
|
||||
|
|
|
@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
|
|||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct request_queue *q;
|
||||
struct block_device *bd = NULL;
|
||||
struct blk_integrity *bi;
|
||||
fmode_t mode;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
|
@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev)
|
|||
if (blk_queue_nonrot(q))
|
||||
dev->dev_attrib.is_nonrot = 1;
|
||||
|
||||
bi = bdev_get_integrity(bd);
|
||||
if (bi) {
|
||||
struct bio_set *bs = ib_dev->ibd_bio_set;
|
||||
|
||||
if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
|
||||
!strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
|
||||
pr_err("IBLOCK export of blk_integrity: %s not"
|
||||
" supported\n", bi->name);
|
||||
ret = -ENOSYS;
|
||||
goto out_blkdev_put;
|
||||
}
|
||||
|
||||
if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
|
||||
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
|
||||
} else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
|
||||
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
|
||||
}
|
||||
|
||||
if (dev->dev_attrib.pi_prot_type) {
|
||||
if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
|
||||
pr_err("Unable to allocate bioset for PI\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_blkdev_put;
|
||||
}
|
||||
pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
|
||||
bs->bio_integrity_pool);
|
||||
}
|
||||
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_blkdev_put:
|
||||
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
||||
out_free_bioset:
|
||||
bioset_free(ib_dev->ibd_bio_set);
|
||||
ib_dev->ibd_bio_set = NULL;
|
||||
|
@ -170,8 +203,10 @@ static void iblock_free_device(struct se_device *dev)
|
|||
|
||||
if (ib_dev->ibd_bd != NULL)
|
||||
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
||||
if (ib_dev->ibd_bio_set != NULL)
|
||||
if (ib_dev->ibd_bio_set != NULL) {
|
||||
bioset_integrity_free(ib_dev->ibd_bio_set);
|
||||
bioset_free(ib_dev->ibd_bio_set);
|
||||
}
|
||||
kfree(ib_dev);
|
||||
}
|
||||
|
||||
|
@ -586,13 +621,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
|
|||
return bl;
|
||||
}
|
||||
|
||||
static int
|
||||
iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct blk_integrity *bi;
|
||||
struct bio_integrity_payload *bip;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct scatterlist *sg;
|
||||
int i, rc;
|
||||
|
||||
bi = bdev_get_integrity(ib_dev->ibd_bd);
|
||||
if (!bi) {
|
||||
pr_err("Unable to locate bio_integrity\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
|
||||
if (!bip) {
|
||||
pr_err("Unable to allocate bio_integrity_payload\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
|
||||
dev->prot_length;
|
||||
bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
|
||||
(unsigned long long)bip->bip_iter.bi_sector);
|
||||
|
||||
for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
|
||||
|
||||
rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
|
||||
sg->offset);
|
||||
if (rc != sg->length) {
|
||||
pr_err("bio_integrity_add_page() failed; %d\n", rc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
|
||||
sg_page(sg), sg->length, sg->offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
enum dma_data_direction data_direction)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_req *ibr;
|
||||
struct bio *bio;
|
||||
struct bio *bio, *bio_start;
|
||||
struct bio_list list;
|
||||
struct scatterlist *sg;
|
||||
u32 sg_num = sgl_nents;
|
||||
|
@ -655,6 +735,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
if (!bio)
|
||||
goto fail_free_ibr;
|
||||
|
||||
bio_start = bio;
|
||||
bio_list_init(&list);
|
||||
bio_list_add(&list, bio);
|
||||
|
||||
|
@ -688,6 +769,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
sg_num--;
|
||||
}
|
||||
|
||||
if (cmd->prot_type) {
|
||||
int rc = iblock_alloc_bip(cmd, bio_start);
|
||||
if (rc)
|
||||
goto fail_put_bios;
|
||||
}
|
||||
|
||||
iblock_submit_bios(&list, rw);
|
||||
iblock_complete_cmd(cmd);
|
||||
return 0;
|
||||
|
@ -763,7 +850,7 @@ iblock_parse_cdb(struct se_cmd *cmd)
|
|||
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
|
||||
}
|
||||
|
||||
bool iblock_get_write_cache(struct se_device *dev)
|
||||
static bool iblock_get_write_cache(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
|
|
|
@ -35,6 +35,8 @@ int se_dev_set_emulate_tpu(struct se_device *, int);
|
|||
int se_dev_set_emulate_tpws(struct se_device *, int);
|
||||
int se_dev_set_emulate_caw(struct se_device *, int);
|
||||
int se_dev_set_emulate_3pc(struct se_device *, int);
|
||||
int se_dev_set_pi_prot_type(struct se_device *, int);
|
||||
int se_dev_set_pi_prot_format(struct se_device *, int);
|
||||
int se_dev_set_enforce_pr_isids(struct se_device *, int);
|
||||
int se_dev_set_is_nonrot(struct se_device *, int);
|
||||
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
|
||||
|
@ -77,9 +79,9 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tp
|
|||
const char *);
|
||||
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
|
||||
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
|
||||
struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
|
||||
int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
|
||||
u32, void *);
|
||||
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
|
||||
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
|
||||
u32, struct se_device *);
|
||||
struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
|
||||
int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
|
||||
|
||||
|
|
|
@ -43,6 +43,11 @@
|
|||
#define PR_APTPL_MAX_IPORT_LEN 256
|
||||
#define PR_APTPL_MAX_TPORT_LEN 256
|
||||
|
||||
/*
|
||||
* Function defined in target_core_spc.c
|
||||
*/
|
||||
void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
|
||||
|
||||
extern struct kmem_cache *t10_pr_reg_cache;
|
||||
|
||||
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
|
||||
|
|
|
@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)
|
|||
hba->hba_ptr = NULL;
|
||||
}
|
||||
|
||||
/* rd_release_device_space():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static void rd_release_device_space(struct rd_dev *rd_dev)
|
||||
static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
|
||||
u32 sg_table_count)
|
||||
{
|
||||
u32 i, j, page_count = 0, sg_per_table;
|
||||
struct rd_dev_sg_table *sg_table;
|
||||
struct page *pg;
|
||||
struct scatterlist *sg;
|
||||
u32 i, j, page_count = 0, sg_per_table;
|
||||
|
||||
if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
|
||||
return;
|
||||
|
||||
sg_table = rd_dev->sg_table_array;
|
||||
|
||||
for (i = 0; i < rd_dev->sg_table_count; i++) {
|
||||
for (i = 0; i < sg_table_count; i++) {
|
||||
sg = sg_table[i].sg_table;
|
||||
sg_per_table = sg_table[i].rd_sg_count;
|
||||
|
||||
|
@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
|
|||
page_count++;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(sg);
|
||||
}
|
||||
|
||||
kfree(sg_table);
|
||||
return page_count;
|
||||
}
|
||||
|
||||
static void rd_release_device_space(struct rd_dev *rd_dev)
|
||||
{
|
||||
u32 page_count;
|
||||
|
||||
if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
|
||||
return;
|
||||
|
||||
page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
|
||||
rd_dev->sg_table_count);
|
||||
|
||||
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
|
||||
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
|
||||
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
|
||||
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
|
||||
|
||||
kfree(sg_table);
|
||||
rd_dev->sg_table_array = NULL;
|
||||
rd_dev->sg_table_count = 0;
|
||||
}
|
||||
|
@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
|
|||
*
|
||||
*
|
||||
*/
|
||||
static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
|
||||
u32 total_sg_needed, unsigned char init_payload)
|
||||
{
|
||||
u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
|
||||
u32 i = 0, j, page_offset = 0, sg_per_table;
|
||||
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
|
||||
sizeof(struct scatterlist));
|
||||
struct rd_dev_sg_table *sg_table;
|
||||
struct page *pg;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (rd_dev->rd_page_count <= 0) {
|
||||
pr_err("Illegal page count: %u for Ramdisk device\n",
|
||||
rd_dev->rd_page_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Don't need backing pages for NULLIO */
|
||||
if (rd_dev->rd_flags & RDF_NULLIO)
|
||||
return 0;
|
||||
|
||||
total_sg_needed = rd_dev->rd_page_count;
|
||||
|
||||
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
|
||||
|
||||
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
|
||||
if (!sg_table) {
|
||||
pr_err("Unable to allocate memory for Ramdisk"
|
||||
" scatterlist tables\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd_dev->sg_table_array = sg_table;
|
||||
rd_dev->sg_table_count = sg_tables;
|
||||
unsigned char *p;
|
||||
|
||||
while (total_sg_needed) {
|
||||
sg_per_table = (total_sg_needed > max_sg_per_table) ?
|
||||
|
@ -186,16 +166,114 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
|||
}
|
||||
sg_assign_page(&sg[j], pg);
|
||||
sg[j].length = PAGE_SIZE;
|
||||
|
||||
p = kmap(pg);
|
||||
memset(p, init_payload, PAGE_SIZE);
|
||||
kunmap(pg);
|
||||
}
|
||||
|
||||
page_offset += sg_per_table;
|
||||
total_sg_needed -= sg_per_table;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
{
|
||||
struct rd_dev_sg_table *sg_table;
|
||||
u32 sg_tables, total_sg_needed;
|
||||
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
|
||||
sizeof(struct scatterlist));
|
||||
int rc;
|
||||
|
||||
if (rd_dev->rd_page_count <= 0) {
|
||||
pr_err("Illegal page count: %u for Ramdisk device\n",
|
||||
rd_dev->rd_page_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Don't need backing pages for NULLIO */
|
||||
if (rd_dev->rd_flags & RDF_NULLIO)
|
||||
return 0;
|
||||
|
||||
total_sg_needed = rd_dev->rd_page_count;
|
||||
|
||||
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
|
||||
|
||||
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
|
||||
if (!sg_table) {
|
||||
pr_err("Unable to allocate memory for Ramdisk"
|
||||
" scatterlist tables\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd_dev->sg_table_array = sg_table;
|
||||
rd_dev->sg_table_count = sg_tables;
|
||||
|
||||
rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
|
||||
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
|
||||
rd_dev->rd_dev_id, rd_dev->rd_page_count,
|
||||
rd_dev->sg_table_count);
|
||||
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
|
||||
rd_dev->rd_dev_id, rd_dev->rd_page_count,
|
||||
rd_dev->sg_table_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rd_release_prot_space(struct rd_dev *rd_dev)
|
||||
{
|
||||
u32 page_count;
|
||||
|
||||
if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
|
||||
return;
|
||||
|
||||
page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
|
||||
rd_dev->sg_prot_count);
|
||||
|
||||
pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
|
||||
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
|
||||
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
|
||||
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
|
||||
|
||||
rd_dev->sg_prot_array = NULL;
|
||||
rd_dev->sg_prot_count = 0;
|
||||
}
|
||||
|
||||
static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
|
||||
{
|
||||
struct rd_dev_sg_table *sg_table;
|
||||
u32 total_sg_needed, sg_tables;
|
||||
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
|
||||
sizeof(struct scatterlist));
|
||||
int rc;
|
||||
|
||||
if (rd_dev->rd_flags & RDF_NULLIO)
|
||||
return 0;
|
||||
|
||||
total_sg_needed = rd_dev->rd_page_count / prot_length;
|
||||
|
||||
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
|
||||
|
||||
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
|
||||
if (!sg_table) {
|
||||
pr_err("Unable to allocate memory for Ramdisk protection"
|
||||
" scatterlist tables\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd_dev->sg_prot_array = sg_table;
|
||||
rd_dev->sg_prot_count = sg_tables;
|
||||
|
||||
rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
|
||||
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
|
||||
rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -278,6 +356,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
|
||||
{
|
||||
struct rd_dev_sg_table *sg_table;
|
||||
u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
|
||||
sizeof(struct scatterlist));
|
||||
|
||||
i = page / sg_per_table;
|
||||
if (i < rd_dev->sg_prot_count) {
|
||||
sg_table = &rd_dev->sg_prot_array[i];
|
||||
if ((sg_table->page_start_offset <= page) &&
|
||||
(sg_table->page_end_offset >= page))
|
||||
return sg_table;
|
||||
}
|
||||
|
||||
pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
|
||||
page);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
enum dma_data_direction data_direction)
|
||||
|
@ -292,6 +390,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
u32 rd_page;
|
||||
u32 src_len;
|
||||
u64 tmp;
|
||||
sense_reason_t rc;
|
||||
|
||||
if (dev->rd_flags & RDF_NULLIO) {
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
|
@ -314,6 +413,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
|
||||
cmd->t_task_lba, rd_size, rd_page, rd_offset);
|
||||
|
||||
if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
|
||||
struct rd_dev_sg_table *prot_table;
|
||||
struct scatterlist *prot_sg;
|
||||
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
|
||||
u32 prot_offset, prot_page;
|
||||
|
||||
tmp = cmd->t_task_lba * se_dev->prot_length;
|
||||
prot_offset = do_div(tmp, PAGE_SIZE);
|
||||
prot_page = tmp;
|
||||
|
||||
prot_table = rd_get_prot_table(dev, prot_page);
|
||||
if (!prot_table)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
|
||||
|
||||
rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
|
||||
prot_sg, prot_offset);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
src_len = PAGE_SIZE - rd_offset;
|
||||
sg_miter_start(&m, sgl, sgl_nents,
|
||||
data_direction == DMA_FROM_DEVICE ?
|
||||
|
@ -375,6 +496,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
}
|
||||
sg_miter_stop(&m);
|
||||
|
||||
if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
|
||||
struct rd_dev_sg_table *prot_table;
|
||||
struct scatterlist *prot_sg;
|
||||
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
|
||||
u32 prot_offset, prot_page;
|
||||
|
||||
tmp = cmd->t_task_lba * se_dev->prot_length;
|
||||
prot_offset = do_div(tmp, PAGE_SIZE);
|
||||
prot_page = tmp;
|
||||
|
||||
prot_table = rd_get_prot_table(dev, prot_page);
|
||||
if (!prot_table)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
|
||||
|
||||
rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
|
||||
prot_sg, prot_offset);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
@ -456,6 +599,23 @@ static sector_t rd_get_blocks(struct se_device *dev)
|
|||
return blocks_long;
|
||||
}
|
||||
|
||||
static int rd_init_prot(struct se_device *dev)
|
||||
{
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
|
||||
if (!dev->dev_attrib.pi_prot_type)
|
||||
return 0;
|
||||
|
||||
return rd_build_prot_space(rd_dev, dev->prot_length);
|
||||
}
|
||||
|
||||
static void rd_free_prot(struct se_device *dev)
|
||||
{
|
||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||
|
||||
rd_release_prot_space(rd_dev);
|
||||
}
|
||||
|
||||
static struct sbc_ops rd_sbc_ops = {
|
||||
.execute_rw = rd_execute_rw,
|
||||
};
|
||||
|
@ -481,6 +641,8 @@ static struct se_subsystem_api rd_mcp_template = {
|
|||
.show_configfs_dev_params = rd_show_configfs_dev_params,
|
||||
.get_device_type = sbc_get_device_type,
|
||||
.get_blocks = rd_get_blocks,
|
||||
.init_prot = rd_init_prot,
|
||||
.free_prot = rd_free_prot,
|
||||
};
|
||||
|
||||
int __init rd_module_init(void)
|
||||
|
|
|
@ -33,8 +33,12 @@ struct rd_dev {
|
|||
u32 rd_page_count;
|
||||
/* Number of SG tables in sg_table_array */
|
||||
u32 sg_table_count;
|
||||
/* Number of SG tables in sg_prot_array */
|
||||
u32 sg_prot_count;
|
||||
/* Array of rd_dev_sg_table_t containing scatterlists */
|
||||
struct rd_dev_sg_table *sg_table_array;
|
||||
/* Array of rd_dev_sg_table containing protection scatterlists */
|
||||
struct rd_dev_sg_table *sg_prot_array;
|
||||
/* Ramdisk HBA device is connected to */
|
||||
struct rd_host *rd_host;
|
||||
} ____cacheline_aligned;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
@ -33,7 +34,7 @@
|
|||
|
||||
#include "target_core_internal.h"
|
||||
#include "target_core_ua.h"
|
||||
|
||||
#include "target_core_alua.h"
|
||||
|
||||
static sense_reason_t
|
||||
sbc_emulate_readcapacity(struct se_cmd *cmd)
|
||||
|
@ -105,6 +106,11 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
|
|||
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[11] = dev->dev_attrib.block_size & 0xff;
|
||||
/*
|
||||
* Set P_TYPE and PROT_EN bits for DIF support
|
||||
*/
|
||||
if (dev->dev_attrib.pi_prot_type)
|
||||
buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
|
||||
|
||||
if (dev->transport->get_lbppbe)
|
||||
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
|
||||
|
@ -563,6 +569,44 @@ sbc_compare_and_write(struct se_cmd *cmd)
|
|||
return TCM_NO_SENSE;
|
||||
}
|
||||
|
||||
static bool
|
||||
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
|
||||
u32 sectors)
|
||||
{
|
||||
if (!cmd->t_prot_sg || !cmd->t_prot_nents)
|
||||
return true;
|
||||
|
||||
switch (dev->dev_attrib.pi_prot_type) {
|
||||
case TARGET_DIF_TYPE3_PROT:
|
||||
if (!(cdb[1] & 0xe0))
|
||||
return true;
|
||||
|
||||
cmd->reftag_seed = 0xffffffff;
|
||||
break;
|
||||
case TARGET_DIF_TYPE2_PROT:
|
||||
if (cdb[1] & 0xe0)
|
||||
return false;
|
||||
|
||||
cmd->reftag_seed = cmd->t_task_lba;
|
||||
break;
|
||||
case TARGET_DIF_TYPE1_PROT:
|
||||
if (!(cdb[1] & 0xe0))
|
||||
return true;
|
||||
|
||||
cmd->reftag_seed = cmd->t_task_lba;
|
||||
break;
|
||||
case TARGET_DIF_TYPE0_PROT:
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
||||
cmd->prot_type = dev->dev_attrib.pi_prot_type;
|
||||
cmd->prot_length = dev->prot_length * sectors;
|
||||
cmd->prot_handover = PROT_SEPERATED;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
sense_reason_t
|
||||
sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
||||
{
|
||||
|
@ -583,6 +627,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case READ_10:
|
||||
sectors = transport_get_sectors_10(cdb);
|
||||
cmd->t_task_lba = transport_lba_32(cdb);
|
||||
|
||||
if (!sbc_check_prot(dev, cmd, cdb, sectors))
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||
cmd->execute_rw = ops->execute_rw;
|
||||
cmd->execute_cmd = sbc_execute_rw;
|
||||
|
@ -590,6 +638,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case READ_12:
|
||||
sectors = transport_get_sectors_12(cdb);
|
||||
cmd->t_task_lba = transport_lba_32(cdb);
|
||||
|
||||
if (!sbc_check_prot(dev, cmd, cdb, sectors))
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||
cmd->execute_rw = ops->execute_rw;
|
||||
cmd->execute_cmd = sbc_execute_rw;
|
||||
|
@ -597,6 +649,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case READ_16:
|
||||
sectors = transport_get_sectors_16(cdb);
|
||||
cmd->t_task_lba = transport_lba_64(cdb);
|
||||
|
||||
if (!sbc_check_prot(dev, cmd, cdb, sectors))
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||
cmd->execute_rw = ops->execute_rw;
|
||||
cmd->execute_cmd = sbc_execute_rw;
|
||||
|
@ -612,6 +668,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case WRITE_VERIFY:
|
||||
sectors = transport_get_sectors_10(cdb);
|
||||
cmd->t_task_lba = transport_lba_32(cdb);
|
||||
|
||||
if (!sbc_check_prot(dev, cmd, cdb, sectors))
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
if (cdb[1] & 0x8)
|
||||
cmd->se_cmd_flags |= SCF_FUA;
|
||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||
|
@ -621,6 +681,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case WRITE_12:
|
||||
sectors = transport_get_sectors_12(cdb);
|
||||
cmd->t_task_lba = transport_lba_32(cdb);
|
||||
|
||||
if (!sbc_check_prot(dev, cmd, cdb, sectors))
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
if (cdb[1] & 0x8)
|
||||
cmd->se_cmd_flags |= SCF_FUA;
|
||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||
|
@ -630,6 +694,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case WRITE_16:
|
||||
sectors = transport_get_sectors_16(cdb);
|
||||
cmd->t_task_lba = transport_lba_64(cdb);
|
||||
|
||||
if (!sbc_check_prot(dev, cmd, cdb, sectors))
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
if (cdb[1] & 0x8)
|
||||
cmd->se_cmd_flags |= SCF_FUA;
|
||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||
|
@ -731,6 +799,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
case SAI_READ_CAPACITY_16:
|
||||
cmd->execute_cmd = sbc_emulate_readcapacity_16;
|
||||
break;
|
||||
case SAI_REPORT_REFERRALS:
|
||||
cmd->execute_cmd = target_emulate_report_referrals;
|
||||
break;
|
||||
default:
|
||||
pr_err("Unsupported SA: 0x%02x\n",
|
||||
cmd->t_task_cdb[1] & 0x1f);
|
||||
|
@ -959,3 +1030,182 @@ err:
|
|||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(sbc_execute_unmap);
|
||||
|
||||
static sense_reason_t
|
||||
sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
|
||||
const void *p, sector_t sector, unsigned int ei_lba)
|
||||
{
|
||||
int block_size = dev->dev_attrib.block_size;
|
||||
__be16 csum;
|
||||
|
||||
csum = cpu_to_be16(crc_t10dif(p, block_size));
|
||||
|
||||
if (sdt->guard_tag != csum) {
|
||||
pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
|
||||
" csum 0x%04x\n", (unsigned long long)sector,
|
||||
be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
|
||||
return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
|
||||
}
|
||||
|
||||
if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
|
||||
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
|
||||
pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
|
||||
" sector MSB: 0x%08x\n", (unsigned long long)sector,
|
||||
be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
|
||||
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
|
||||
}
|
||||
|
||||
if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
|
||||
be32_to_cpu(sdt->ref_tag) != ei_lba) {
|
||||
pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
|
||||
" ei_lba: 0x%08x\n", (unsigned long long)sector,
|
||||
be32_to_cpu(sdt->ref_tag), ei_lba);
|
||||
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
|
||||
struct scatterlist *sg, int sg_off)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct scatterlist *psg;
|
||||
void *paddr, *addr;
|
||||
unsigned int i, len, left;
|
||||
|
||||
left = sectors * dev->prot_length;
|
||||
|
||||
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
|
||||
|
||||
len = min(psg->length, left);
|
||||
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
|
||||
addr = kmap_atomic(sg_page(sg)) + sg_off;
|
||||
|
||||
if (read)
|
||||
memcpy(paddr, addr, len);
|
||||
else
|
||||
memcpy(addr, paddr, len);
|
||||
|
||||
left -= len;
|
||||
kunmap_atomic(paddr);
|
||||
kunmap_atomic(addr);
|
||||
}
|
||||
}
|
||||
|
||||
sense_reason_t
|
||||
sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
|
||||
unsigned int ei_lba, struct scatterlist *sg, int sg_off)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_dif_v1_tuple *sdt;
|
||||
struct scatterlist *dsg, *psg = cmd->t_prot_sg;
|
||||
sector_t sector = start;
|
||||
void *daddr, *paddr;
|
||||
int i, j, offset = 0;
|
||||
sense_reason_t rc;
|
||||
|
||||
for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
|
||||
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
|
||||
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
|
||||
|
||||
for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
|
||||
|
||||
if (offset >= psg->length) {
|
||||
kunmap_atomic(paddr);
|
||||
psg = sg_next(psg);
|
||||
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
sdt = paddr + offset;
|
||||
|
||||
pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
|
||||
" app_tag: 0x%04x ref_tag: %u\n",
|
||||
(unsigned long long)sector, sdt->guard_tag,
|
||||
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
|
||||
|
||||
rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
|
||||
ei_lba);
|
||||
if (rc) {
|
||||
kunmap_atomic(paddr);
|
||||
kunmap_atomic(daddr);
|
||||
cmd->bad_sector = sector;
|
||||
return rc;
|
||||
}
|
||||
|
||||
sector++;
|
||||
ei_lba++;
|
||||
offset += sizeof(struct se_dif_v1_tuple);
|
||||
}
|
||||
|
||||
kunmap_atomic(paddr);
|
||||
kunmap_atomic(daddr);
|
||||
}
|
||||
sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(sbc_dif_verify_write);
|
||||
|
||||
sense_reason_t
|
||||
sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
|
||||
unsigned int ei_lba, struct scatterlist *sg, int sg_off)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_dif_v1_tuple *sdt;
|
||||
struct scatterlist *dsg;
|
||||
sector_t sector = start;
|
||||
void *daddr, *paddr;
|
||||
int i, j, offset = sg_off;
|
||||
sense_reason_t rc;
|
||||
|
||||
for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
|
||||
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
|
||||
paddr = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
|
||||
for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
|
||||
|
||||
if (offset >= sg->length) {
|
||||
kunmap_atomic(paddr);
|
||||
sg = sg_next(sg);
|
||||
paddr = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
sdt = paddr + offset;
|
||||
|
||||
pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
|
||||
" app_tag: 0x%04x ref_tag: %u\n",
|
||||
(unsigned long long)sector, sdt->guard_tag,
|
||||
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
|
||||
|
||||
if (sdt->app_tag == cpu_to_be16(0xffff)) {
|
||||
sector++;
|
||||
offset += sizeof(struct se_dif_v1_tuple);
|
||||
continue;
|
||||
}
|
||||
|
||||
rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
|
||||
ei_lba);
|
||||
if (rc) {
|
||||
kunmap_atomic(paddr);
|
||||
kunmap_atomic(daddr);
|
||||
cmd->bad_sector = sector;
|
||||
return rc;
|
||||
}
|
||||
|
||||
sector++;
|
||||
ei_lba++;
|
||||
offset += sizeof(struct se_dif_v1_tuple);
|
||||
}
|
||||
|
||||
kunmap_atomic(paddr);
|
||||
kunmap_atomic(daddr);
|
||||
}
|
||||
sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(sbc_dif_verify_read);
|
||||
|
|
|
@ -100,6 +100,11 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
|
|||
*/
|
||||
if (dev->dev_attrib.emulate_3pc)
|
||||
buf[5] |= 0x8;
|
||||
/*
|
||||
* Set Protection (PROTECT) bit when DIF has been enabled.
|
||||
*/
|
||||
if (dev->dev_attrib.pi_prot_type)
|
||||
buf[5] |= 0x1;
|
||||
|
||||
buf[7] = 0x2; /* CmdQue=1 */
|
||||
|
||||
|
@ -267,7 +272,7 @@ check_t10_vend_desc:
|
|||
port = lun->lun_sep;
|
||||
if (port) {
|
||||
struct t10_alua_lu_gp *lu_gp;
|
||||
u32 padding, scsi_name_len;
|
||||
u32 padding, scsi_name_len, scsi_target_len;
|
||||
u16 lu_gp_id = 0;
|
||||
u16 tg_pt_gp_id = 0;
|
||||
u16 tpgt;
|
||||
|
@ -365,16 +370,6 @@ check_lu_gp:
|
|||
* section 7.5.1 Table 362
|
||||
*/
|
||||
check_scsi_name:
|
||||
scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
|
||||
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
|
||||
scsi_name_len += 10;
|
||||
/* Check for 4-byte padding */
|
||||
padding = ((-scsi_name_len) & 3);
|
||||
if (padding != 0)
|
||||
scsi_name_len += padding;
|
||||
/* Header size + Designation descriptor */
|
||||
scsi_name_len += 4;
|
||||
|
||||
buf[off] =
|
||||
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
|
||||
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
|
||||
|
@ -402,13 +397,57 @@ check_scsi_name:
|
|||
* shall be no larger than 256 and shall be a multiple
|
||||
* of four.
|
||||
*/
|
||||
padding = ((-scsi_name_len) & 3);
|
||||
if (padding)
|
||||
scsi_name_len += padding;
|
||||
if (scsi_name_len > 256)
|
||||
scsi_name_len = 256;
|
||||
|
||||
buf[off-1] = scsi_name_len;
|
||||
off += scsi_name_len;
|
||||
/* Header size + Designation descriptor */
|
||||
len += (scsi_name_len + 4);
|
||||
|
||||
/*
|
||||
* Target device designator
|
||||
*/
|
||||
buf[off] =
|
||||
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
|
||||
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
|
||||
buf[off] = 0x80; /* Set PIV=1 */
|
||||
/* Set ASSOCIATION == target device: 10b */
|
||||
buf[off] |= 0x20;
|
||||
/* DESIGNATOR TYPE == SCSI name string */
|
||||
buf[off++] |= 0x8;
|
||||
off += 2; /* Skip over Reserved and length */
|
||||
/*
|
||||
* SCSI name string identifer containing, $FABRIC_MOD
|
||||
* dependent information. For LIO-Target and iSCSI
|
||||
* Target Port, this means "<iSCSI name>" in
|
||||
* UTF-8 encoding.
|
||||
*/
|
||||
scsi_target_len = sprintf(&buf[off], "%s",
|
||||
tpg->se_tpg_tfo->tpg_get_wwn(tpg));
|
||||
scsi_target_len += 1 /* Include NULL terminator */;
|
||||
/*
|
||||
* The null-terminated, null-padded (see 4.4.2) SCSI
|
||||
* NAME STRING field contains a UTF-8 format string.
|
||||
* The number of bytes in the SCSI NAME STRING field
|
||||
* (i.e., the value in the DESIGNATOR LENGTH field)
|
||||
* shall be no larger than 256 and shall be a multiple
|
||||
* of four.
|
||||
*/
|
||||
padding = ((-scsi_target_len) & 3);
|
||||
if (padding)
|
||||
scsi_target_len += padding;
|
||||
if (scsi_name_len > 256)
|
||||
scsi_name_len = 256;
|
||||
|
||||
buf[off-1] = scsi_target_len;
|
||||
off += scsi_target_len;
|
||||
|
||||
/* Header size + Designation descriptor */
|
||||
len += (scsi_target_len + 4);
|
||||
}
|
||||
buf[2] = ((len >> 8) & 0xff);
|
||||
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
|
||||
|
@ -436,12 +475,26 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
|
|||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
buf[3] = 0x3c;
|
||||
/*
|
||||
* Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
|
||||
* only for TYPE3 protection.
|
||||
*/
|
||||
if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
|
||||
buf[4] = 0x5;
|
||||
else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
|
||||
buf[4] = 0x4;
|
||||
|
||||
/* Set HEADSUP, ORDSUP, SIMPSUP */
|
||||
buf[5] = 0x07;
|
||||
|
||||
/* If WriteCache emulation is enabled, set V_SUP */
|
||||
if (spc_check_dev_wce(dev))
|
||||
buf[6] = 0x01;
|
||||
/* If an LBA map is present set R_SUP */
|
||||
spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
|
||||
if (!list_empty(&dev->t10_alua.lba_map_list))
|
||||
buf[8] = 0x10;
|
||||
spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -600,6 +653,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Referrals VPD page */
|
||||
static sense_reason_t
|
||||
spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
buf[3] = 0x0c;
|
||||
put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
|
||||
put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
|
||||
|
||||
|
@ -614,6 +681,7 @@ static struct {
|
|||
{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
|
||||
{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
|
||||
{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
|
||||
{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
|
||||
};
|
||||
|
||||
/* supported vital product data pages */
|
||||
|
@ -643,11 +711,15 @@ spc_emulate_inquiry(struct se_cmd *cmd)
|
|||
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
|
||||
unsigned char *rbuf;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
unsigned char buf[SE_INQUIRY_BUF];
|
||||
unsigned char *buf;
|
||||
sense_reason_t ret;
|
||||
int p;
|
||||
|
||||
memset(buf, 0, SE_INQUIRY_BUF);
|
||||
buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
pr_err("Unable to allocate response buffer for INQUIRY\n");
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
if (dev == tpg->tpg_virt_lun0.lun_se_dev)
|
||||
buf[0] = 0x3f; /* Not connected */
|
||||
|
@ -680,9 +752,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
|
|||
out:
|
||||
rbuf = transport_kmap_data_sg(cmd);
|
||||
if (rbuf) {
|
||||
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
|
||||
memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
|
||||
transport_kunmap_data_sg(cmd);
|
||||
}
|
||||
kfree(buf);
|
||||
|
||||
if (!ret)
|
||||
target_complete_cmd(cmd, GOOD);
|
||||
|
@ -785,6 +858,19 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
|
|||
* status (see SAM-4).
|
||||
*/
|
||||
p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
|
||||
/*
|
||||
* From spc4r30, section 7.5.7 Control mode page
|
||||
*
|
||||
* Application Tag Owner (ATO) bit set to one.
|
||||
*
|
||||
* If the ATO bit is set to one the device server shall not modify the
|
||||
* LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
|
||||
* type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
|
||||
* TAG field.
|
||||
*/
|
||||
if (dev->dev_attrib.pi_prot_type)
|
||||
p[5] |= 0x80;
|
||||
|
||||
p[8] = 0xff;
|
||||
p[9] = 0xff;
|
||||
p[11] = 30;
|
||||
|
|
|
@ -656,7 +656,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
|
|||
spin_lock_init(&lun->lun_sep_lock);
|
||||
init_completion(&lun->lun_ref_comp);
|
||||
|
||||
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
|
||||
ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -781,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
|
|||
}
|
||||
EXPORT_SYMBOL(core_tpg_deregister);
|
||||
|
||||
struct se_lun *core_tpg_pre_addlun(
|
||||
struct se_lun *core_tpg_alloc_lun(
|
||||
struct se_portal_group *tpg,
|
||||
u32 unpacked_lun)
|
||||
{
|
||||
|
@ -811,11 +811,11 @@ struct se_lun *core_tpg_pre_addlun(
|
|||
return lun;
|
||||
}
|
||||
|
||||
int core_tpg_post_addlun(
|
||||
int core_tpg_add_lun(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun,
|
||||
u32 lun_access,
|
||||
void *lun_ptr)
|
||||
struct se_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -823,7 +823,7 @@ int core_tpg_post_addlun(
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = core_dev_export(lun_ptr, tpg, lun);
|
||||
ret = core_dev_export(dev, tpg, lun);
|
||||
if (ret < 0) {
|
||||
percpu_ref_cancel_init(&lun->lun_ref);
|
||||
return ret;
|
||||
|
|
|
@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
|
|||
struct kmem_cache *t10_alua_lu_gp_mem_cache;
|
||||
struct kmem_cache *t10_alua_tg_pt_gp_cache;
|
||||
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
|
||||
struct kmem_cache *t10_alua_lba_map_cache;
|
||||
struct kmem_cache *t10_alua_lba_map_mem_cache;
|
||||
|
||||
static void transport_complete_task_attr(struct se_cmd *cmd);
|
||||
static void transport_handle_queue_full(struct se_cmd *cmd,
|
||||
|
@ -128,14 +130,36 @@ int init_se_kmem_caches(void)
|
|||
"mem_t failed\n");
|
||||
goto out_free_tg_pt_gp_cache;
|
||||
}
|
||||
t10_alua_lba_map_cache = kmem_cache_create(
|
||||
"t10_alua_lba_map_cache",
|
||||
sizeof(struct t10_alua_lba_map),
|
||||
__alignof__(struct t10_alua_lba_map), 0, NULL);
|
||||
if (!t10_alua_lba_map_cache) {
|
||||
pr_err("kmem_cache_create() for t10_alua_lba_map_"
|
||||
"cache failed\n");
|
||||
goto out_free_tg_pt_gp_mem_cache;
|
||||
}
|
||||
t10_alua_lba_map_mem_cache = kmem_cache_create(
|
||||
"t10_alua_lba_map_mem_cache",
|
||||
sizeof(struct t10_alua_lba_map_member),
|
||||
__alignof__(struct t10_alua_lba_map_member), 0, NULL);
|
||||
if (!t10_alua_lba_map_mem_cache) {
|
||||
pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
|
||||
"cache failed\n");
|
||||
goto out_free_lba_map_cache;
|
||||
}
|
||||
|
||||
target_completion_wq = alloc_workqueue("target_completion",
|
||||
WQ_MEM_RECLAIM, 0);
|
||||
if (!target_completion_wq)
|
||||
goto out_free_tg_pt_gp_mem_cache;
|
||||
goto out_free_lba_map_mem_cache;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_lba_map_mem_cache:
|
||||
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
|
||||
out_free_lba_map_cache:
|
||||
kmem_cache_destroy(t10_alua_lba_map_cache);
|
||||
out_free_tg_pt_gp_mem_cache:
|
||||
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
|
||||
out_free_tg_pt_gp_cache:
|
||||
|
@ -164,6 +188,8 @@ void release_se_kmem_caches(void)
|
|||
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
|
||||
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
|
||||
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
|
||||
kmem_cache_destroy(t10_alua_lba_map_cache);
|
||||
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
|
||||
}
|
||||
|
||||
/* This code ensures unique mib indexes are handed out. */
|
||||
|
@ -568,10 +594,11 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
|
|||
{
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
|
||||
if (!lun || !cmd->lun_ref_active)
|
||||
if (!lun)
|
||||
return;
|
||||
|
||||
percpu_ref_put(&lun->lun_ref);
|
||||
if (cmpxchg(&cmd->lun_ref_active, true, false))
|
||||
percpu_ref_put(&lun->lun_ref);
|
||||
}
|
||||
|
||||
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
|
||||
|
@ -1284,6 +1311,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
|
|||
* @sgl_count: scatterlist count for unidirectional mapping
|
||||
* @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
|
||||
* @sgl_bidi_count: scatterlist count for bidirectional READ mapping
|
||||
* @sgl_prot: struct scatterlist memory protection information
|
||||
* @sgl_prot_count: scatterlist count for protection information
|
||||
*
|
||||
* Returns non zero to signal active I/O shutdown failure. All other
|
||||
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
|
||||
|
@ -1296,7 +1325,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
|
|||
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
|
||||
u32 data_length, int task_attr, int data_dir, int flags,
|
||||
struct scatterlist *sgl, u32 sgl_count,
|
||||
struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
|
||||
struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
|
||||
struct scatterlist *sgl_prot, u32 sgl_prot_count)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
sense_reason_t rc;
|
||||
|
@ -1338,6 +1368,14 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
|
|||
target_put_sess_cmd(se_sess, se_cmd);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Save pointers for SGLs containing protection information,
|
||||
* if present.
|
||||
*/
|
||||
if (sgl_prot_count) {
|
||||
se_cmd->t_prot_sg = sgl_prot;
|
||||
se_cmd->t_prot_nents = sgl_prot_count;
|
||||
}
|
||||
|
||||
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
|
||||
if (rc != 0) {
|
||||
|
@ -1380,6 +1418,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if we need to delay processing because of ALUA
|
||||
* Active/NonOptimized primary access state..
|
||||
|
@ -1419,7 +1458,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
|
|||
{
|
||||
return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
|
||||
unpacked_lun, data_length, task_attr, data_dir,
|
||||
flags, NULL, 0, NULL, 0);
|
||||
flags, NULL, 0, NULL, 0, NULL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(target_submit_cmd);
|
||||
|
||||
|
@ -2455,6 +2494,19 @@ static int transport_get_sense_codes(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
|
||||
{
|
||||
/* Place failed LBA in sense data information descriptor 0. */
|
||||
buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
|
||||
buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
|
||||
buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
|
||||
buffer[SPC_VALIDITY_OFFSET] = 0x80;
|
||||
|
||||
/* Descriptor Information: failing sector */
|
||||
put_unaligned_be64(bad_sector, &buffer[12]);
|
||||
}
|
||||
|
||||
int
|
||||
transport_send_check_condition_and_sense(struct se_cmd *cmd,
|
||||
sense_reason_t reason, int from_transport)
|
||||
|
@ -2648,6 +2700,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
|
|||
buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
|
||||
buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
|
||||
break;
|
||||
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
|
||||
/* CURRENT ERROR */
|
||||
buffer[0] = 0x70;
|
||||
buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
|
||||
/* ILLEGAL REQUEST */
|
||||
buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
|
||||
/* LOGICAL BLOCK GUARD CHECK FAILED */
|
||||
buffer[SPC_ASC_KEY_OFFSET] = 0x10;
|
||||
buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
|
||||
transport_err_sector_info(buffer, cmd->bad_sector);
|
||||
break;
|
||||
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
|
||||
/* CURRENT ERROR */
|
||||
buffer[0] = 0x70;
|
||||
buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
|
||||
/* ILLEGAL REQUEST */
|
||||
buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
|
||||
/* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
|
||||
buffer[SPC_ASC_KEY_OFFSET] = 0x10;
|
||||
buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
|
||||
transport_err_sector_info(buffer, cmd->bad_sector);
|
||||
break;
|
||||
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
|
||||
/* CURRENT ERROR */
|
||||
buffer[0] = 0x70;
|
||||
buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
|
||||
/* ILLEGAL REQUEST */
|
||||
buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
|
||||
/* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
|
||||
buffer[SPC_ASC_KEY_OFFSET] = 0x10;
|
||||
buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
|
||||
transport_err_sector_info(buffer, cmd->bad_sector);
|
||||
break;
|
||||
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
|
||||
default:
|
||||
/* CURRENT ERROR */
|
||||
|
|
|
@ -98,7 +98,6 @@ int core_scsi3_ua_allocate(
|
|||
pr_err("Unable to allocate struct se_ua\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&ua->ua_dev_list);
|
||||
INIT_LIST_HEAD(&ua->ua_nacl_list);
|
||||
|
||||
ua->ua_nacl = nacl;
|
||||
|
|
|
@ -39,10 +39,6 @@
|
|||
#include "target_core_xcopy.h"
|
||||
|
||||
static struct workqueue_struct *xcopy_wq = NULL;
|
||||
/*
|
||||
* From target_core_spc.c
|
||||
*/
|
||||
extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
|
||||
/*
|
||||
* From target_core_device.c
|
||||
*/
|
||||
|
|
|
@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
|
|||
struct se_session *se_sess = sess->se_sess;
|
||||
int tag;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
|
||||
if (tag < 0)
|
||||
goto busy;
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
|
|||
return found;
|
||||
}
|
||||
|
||||
struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
|
||||
static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct ft_node_acl *acl;
|
||||
|
||||
|
@ -552,7 +552,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
|
|||
.fabric_drop_nodeacl = &ft_del_acl,
|
||||
};
|
||||
|
||||
int ft_register_configfs(void)
|
||||
static int ft_register_configfs(void)
|
||||
{
|
||||
struct target_fabric_configfs *fabric;
|
||||
int ret;
|
||||
|
@ -599,7 +599,7 @@ int ft_register_configfs(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ft_deregister_configfs(void)
|
||||
static void ft_deregister_configfs(void)
|
||||
{
|
||||
if (!ft_configfs)
|
||||
return;
|
||||
|
|
|
@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
|
|||
}
|
||||
se_sess = tv_nexus->tvn_se_sess;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
|
||||
if (tag < 0) {
|
||||
pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -889,7 +889,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
|
|||
cmd->tvc_lun, cmd->tvc_exp_data_len,
|
||||
cmd->tvc_task_attr, cmd->tvc_data_direction,
|
||||
TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
|
||||
sg_bidi_ptr, sg_no_bidi);
|
||||
sg_bidi_ptr, sg_no_bidi, NULL, 0);
|
||||
if (rc < 0) {
|
||||
transport_send_check_condition_and_sense(se_cmd,
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
@ -61,7 +62,7 @@ struct percpu_ida {
|
|||
/* Max size of percpu freelist, */
|
||||
#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
|
||||
|
||||
int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
|
||||
int percpu_ida_alloc(struct percpu_ida *pool, int state);
|
||||
void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
|
||||
|
||||
void percpu_ida_destroy(struct percpu_ida *pool);
|
||||
|
|
|
@ -155,6 +155,7 @@ enum scsi_timeouts {
|
|||
/* values for service action in */
|
||||
#define SAI_READ_CAPACITY_16 0x10
|
||||
#define SAI_GET_LBA_STATUS 0x12
|
||||
#define SAI_REPORT_REFERRALS 0x13
|
||||
/* values for VARIABLE_LENGTH_CMD service action codes
|
||||
* see spc4r17 Section D.3.5, table D.7 and D.8 */
|
||||
#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
|
||||
|
|
|
@ -94,7 +94,7 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
|
|||
/*
|
||||
* From iscsi_target_util.c
|
||||
*/
|
||||
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
|
||||
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
|
||||
extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
|
||||
unsigned char *, __be32);
|
||||
extern void iscsit_release_cmd(struct iscsi_cmd *);
|
||||
|
|
|
@ -41,6 +41,9 @@ struct se_subsystem_api {
|
|||
unsigned int (*get_io_opt)(struct se_device *);
|
||||
unsigned char *(*get_sense_buffer)(struct se_cmd *);
|
||||
bool (*get_write_cache)(struct se_device *);
|
||||
int (*init_prot)(struct se_device *);
|
||||
int (*format_prot)(struct se_device *);
|
||||
void (*free_prot)(struct se_device *);
|
||||
};
|
||||
|
||||
struct sbc_ops {
|
||||
|
@ -70,6 +73,10 @@ sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
|
|||
sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
|
||||
sector_t lba, sector_t nolb),
|
||||
void *priv);
|
||||
sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
|
||||
unsigned int, struct scatterlist *, int);
|
||||
sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
|
||||
unsigned int, struct scatterlist *, int);
|
||||
|
||||
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
|
||||
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
|
||||
|
|
|
@ -37,6 +37,9 @@
|
|||
/* Used by transport_send_check_condition_and_sense() */
|
||||
#define SPC_SENSE_KEY_OFFSET 2
|
||||
#define SPC_ADD_SENSE_LEN_OFFSET 7
|
||||
#define SPC_DESC_TYPE_OFFSET 8
|
||||
#define SPC_ADDITIONAL_DESC_LEN_OFFSET 9
|
||||
#define SPC_VALIDITY_OFFSET 10
|
||||
#define SPC_ASC_KEY_OFFSET 12
|
||||
#define SPC_ASCQ_KEY_OFFSET 13
|
||||
#define TRANSPORT_IQN_LEN 224
|
||||
|
@ -112,7 +115,7 @@
|
|||
/* Queue Algorithm Modifier default for restricted reordering in control mode page */
|
||||
#define DA_EMULATE_REST_REORD 0
|
||||
|
||||
#define SE_INQUIRY_BUF 512
|
||||
#define SE_INQUIRY_BUF 1024
|
||||
#define SE_MODE_PAGE_BUF 512
|
||||
#define SE_SENSE_BUF 96
|
||||
|
||||
|
@ -205,6 +208,9 @@ enum tcm_sense_reason_table {
|
|||
TCM_OUT_OF_RESOURCES = R(0x12),
|
||||
TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
|
||||
TCM_MISCOMPARE_VERIFY = R(0x14),
|
||||
TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
|
||||
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
|
||||
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
|
||||
#undef R
|
||||
};
|
||||
|
||||
|
@ -247,10 +253,28 @@ typedef enum {
|
|||
|
||||
struct se_cmd;
|
||||
|
||||
struct t10_alua_lba_map_member {
|
||||
struct list_head lba_map_mem_list;
|
||||
int lba_map_mem_alua_state;
|
||||
int lba_map_mem_alua_pg_id;
|
||||
};
|
||||
|
||||
struct t10_alua_lba_map {
|
||||
u64 lba_map_first_lba;
|
||||
u64 lba_map_last_lba;
|
||||
struct list_head lba_map_list;
|
||||
struct list_head lba_map_mem_list;
|
||||
};
|
||||
|
||||
struct t10_alua {
|
||||
/* ALUA Target Port Group ID */
|
||||
u16 alua_tg_pt_gps_counter;
|
||||
u32 alua_tg_pt_gps_count;
|
||||
/* Referrals support */
|
||||
spinlock_t lba_map_lock;
|
||||
u32 lba_map_segment_size;
|
||||
u32 lba_map_segment_multiplier;
|
||||
struct list_head lba_map_list;
|
||||
spinlock_t tg_pt_gps_lock;
|
||||
struct se_device *t10_dev;
|
||||
/* Used for default ALUA Target Port Group */
|
||||
|
@ -284,6 +308,8 @@ struct t10_alua_tg_pt_gp {
|
|||
u16 tg_pt_gp_id;
|
||||
int tg_pt_gp_valid_id;
|
||||
int tg_pt_gp_alua_supported_states;
|
||||
int tg_pt_gp_alua_pending_state;
|
||||
int tg_pt_gp_alua_previous_state;
|
||||
int tg_pt_gp_alua_access_status;
|
||||
int tg_pt_gp_alua_access_type;
|
||||
int tg_pt_gp_nonop_delay_msecs;
|
||||
|
@ -291,9 +317,6 @@ struct t10_alua_tg_pt_gp {
|
|||
int tg_pt_gp_implicit_trans_secs;
|
||||
int tg_pt_gp_pref;
|
||||
int tg_pt_gp_write_metadata;
|
||||
/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
|
||||
#define ALUA_MD_BUF_LEN 1024
|
||||
u32 tg_pt_gp_md_buf_len;
|
||||
u32 tg_pt_gp_members;
|
||||
atomic_t tg_pt_gp_alua_access_state;
|
||||
atomic_t tg_pt_gp_ref_cnt;
|
||||
|
@ -303,6 +326,10 @@ struct t10_alua_tg_pt_gp {
|
|||
struct config_group tg_pt_gp_group;
|
||||
struct list_head tg_pt_gp_list;
|
||||
struct list_head tg_pt_gp_mem_list;
|
||||
struct se_port *tg_pt_gp_alua_port;
|
||||
struct se_node_acl *tg_pt_gp_alua_nacl;
|
||||
struct delayed_work tg_pt_gp_transition_work;
|
||||
struct completion *tg_pt_gp_transition_complete;
|
||||
};
|
||||
|
||||
struct t10_alua_tg_pt_gp_member {
|
||||
|
@ -414,6 +441,34 @@ struct se_tmr_req {
|
|||
struct list_head tmr_list;
|
||||
};
|
||||
|
||||
enum target_prot_op {
|
||||
TARGET_PROT_NORMAL = 0,
|
||||
TARGET_PROT_DIN_INSERT,
|
||||
TARGET_PROT_DOUT_INSERT,
|
||||
TARGET_PROT_DIN_STRIP,
|
||||
TARGET_PROT_DOUT_STRIP,
|
||||
TARGET_PROT_DIN_PASS,
|
||||
TARGET_PROT_DOUT_PASS,
|
||||
};
|
||||
|
||||
enum target_prot_ho {
|
||||
PROT_SEPERATED,
|
||||
PROT_INTERLEAVED,
|
||||
};
|
||||
|
||||
enum target_prot_type {
|
||||
TARGET_DIF_TYPE0_PROT,
|
||||
TARGET_DIF_TYPE1_PROT,
|
||||
TARGET_DIF_TYPE2_PROT,
|
||||
TARGET_DIF_TYPE3_PROT,
|
||||
};
|
||||
|
||||
struct se_dif_v1_tuple {
|
||||
__be16 guard_tag;
|
||||
__be16 app_tag;
|
||||
__be32 ref_tag;
|
||||
};
|
||||
|
||||
struct se_cmd {
|
||||
/* SAM response code being sent to initiator */
|
||||
u8 scsi_status;
|
||||
|
@ -497,14 +552,24 @@ struct se_cmd {
|
|||
void *priv;
|
||||
|
||||
/* Used for lun->lun_ref counting */
|
||||
bool lun_ref_active;
|
||||
int lun_ref_active;
|
||||
|
||||
/* DIF related members */
|
||||
enum target_prot_op prot_op;
|
||||
enum target_prot_type prot_type;
|
||||
u32 prot_length;
|
||||
u32 reftag_seed;
|
||||
struct scatterlist *t_prot_sg;
|
||||
unsigned int t_prot_nents;
|
||||
enum target_prot_ho prot_handover;
|
||||
sense_reason_t pi_err;
|
||||
sector_t bad_sector;
|
||||
};
|
||||
|
||||
struct se_ua {
|
||||
u8 ua_asc;
|
||||
u8 ua_ascq;
|
||||
struct se_node_acl *ua_nacl;
|
||||
struct list_head ua_dev_list;
|
||||
struct list_head ua_nacl_list;
|
||||
};
|
||||
|
||||
|
@ -605,6 +670,9 @@ struct se_dev_attrib {
|
|||
int emulate_tpws;
|
||||
int emulate_caw;
|
||||
int emulate_3pc;
|
||||
int pi_prot_format;
|
||||
enum target_prot_type pi_prot_type;
|
||||
enum target_prot_type hw_pi_prot_type;
|
||||
int enforce_pr_isids;
|
||||
int is_nonrot;
|
||||
int emulate_rest_reord;
|
||||
|
@ -736,6 +804,8 @@ struct se_device {
|
|||
/* Linked list for struct se_hba struct se_device list */
|
||||
struct list_head dev_list;
|
||||
struct se_lun xcopy_lun;
|
||||
/* Protection Information */
|
||||
int prot_length;
|
||||
};
|
||||
|
||||
struct se_hba {
|
||||
|
|
|
@ -105,7 +105,8 @@ sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
|
|||
sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
|
||||
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
|
||||
unsigned char *, unsigned char *, u32, u32, int, int, int,
|
||||
struct scatterlist *, u32, struct scatterlist *, u32);
|
||||
struct scatterlist *, u32, struct scatterlist *, u32,
|
||||
struct scatterlist *, u32);
|
||||
int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
|
||||
unsigned char *, u32, u32, int, int, int);
|
||||
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
||||
|
|
|
@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
|
|||
/**
|
||||
* percpu_ida_alloc - allocate a tag
|
||||
* @pool: pool to allocate from
|
||||
* @gfp: gfp flags
|
||||
* @state: task state for prepare_to_wait
|
||||
*
|
||||
* Returns a tag - an integer in the range [0..nr_tags) (passed to
|
||||
* tag_pool_init()), or otherwise -ENOSPC on allocation failure.
|
||||
*
|
||||
* Safe to be called from interrupt context (assuming it isn't passed
|
||||
* __GFP_WAIT, of course).
|
||||
* TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
|
||||
*
|
||||
* @gfp indicates whether or not to wait until a free id is available (it's not
|
||||
* used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
|
||||
* however long it takes until another thread frees an id (same semantics as a
|
||||
* mempool).
|
||||
*
|
||||
* Will not fail if passed __GFP_WAIT.
|
||||
* Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
|
||||
*/
|
||||
int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
|
||||
int percpu_ida_alloc(struct percpu_ida *pool, int state)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
struct percpu_ida_cpu *tags;
|
||||
|
@ -174,7 +174,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
|
|||
*
|
||||
* global lock held and irqs disabled, don't need percpu lock
|
||||
*/
|
||||
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||
if (state != TASK_RUNNING)
|
||||
prepare_to_wait(&pool->wait, &wait, state);
|
||||
|
||||
if (!tags->nr_free)
|
||||
alloc_global_tags(pool, tags);
|
||||
|
@ -191,16 +192,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
|
|||
spin_unlock(&pool->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (tag >= 0 || !(gfp & __GFP_WAIT))
|
||||
if (tag >= 0 || state == TASK_RUNNING)
|
||||
break;
|
||||
|
||||
if (signal_pending_state(state, current)) {
|
||||
tag = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
schedule();
|
||||
|
||||
local_irq_save(flags);
|
||||
tags = this_cpu_ptr(pool->tag_cpu);
|
||||
}
|
||||
if (state != TASK_RUNNING)
|
||||
finish_wait(&pool->wait, &wait);
|
||||
|
||||
finish_wait(&pool->wait, &wait);
|
||||
return tag;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_ida_alloc);
|
||||
|
|
Загрузка…
Ссылка в новой задаче