Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The most notable item is IBM virtual SCSI target driver, that was originally ported to target-core back in 2010 by Tomo-san, and has been brought forward to v4.x code by Bryant Ly, Michael Cyr and co over the last months. Also included are two ORDERED task related bug-fixes Bryant + Michael found along the way using ibmvscsis with AIX guests, plus a few miscellaneous target-core + iscsi-target bug-fixes with associated stable tags" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: target: fix spelling mistake: "limitiation" -> "limitation" target: Fix residual overflow handling in target_complete_cmd_with_length tcm_fc: set and unset FCP_SPPF_TARG_FCN iscsi-target: Fix panic when adding second TCP connection to iSCSI session ibmvscsis: Initial commit of IBM VSCSI Tgt Driver target: Fix ordered task CHECK_CONDITION early exception handling target: Fix ordered task target_setup_cmd_from_cdb exception hang target: Fix max_unmap_lba_count calc overflow target: Fix race between iscsi-target connection shutdown + ABORT_TASK target: Fix missing complete during ABORT_TASK + CMD_T_FABRIC_STOP
This commit is contained in:
Коммит
fdf1f7ff1b
10
MAINTAINERS
10
MAINTAINERS
|
@ -5831,7 +5831,15 @@ M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
|
|||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/ibmvscsi/ibmvscsi*
|
||||
F: drivers/scsi/ibmvscsi/viosrp.h
|
||||
F: include/scsi/viosrp.h
|
||||
|
||||
IBM Power Virtual SCSI Device Target Driver
|
||||
M: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
|
||||
M: Michael Cyr <mikecyr@linux.vnet.ibm.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
L: target-devel@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/ibmvscsi_tgt/
|
||||
|
||||
IBM Power Virtual FC Device Drivers
|
||||
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
|
||||
|
|
|
@ -838,6 +838,23 @@ config SCSI_IBMVSCSI
|
|||
To compile this driver as a module, choose M here: the
|
||||
module will be called ibmvscsi.
|
||||
|
||||
config SCSI_IBMVSCSIS
|
||||
tristate "IBM Virtual SCSI Server support"
|
||||
depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI
|
||||
help
|
||||
This is the IBM POWER Virtual SCSI Target Server
|
||||
This driver uses the SRP protocol for communication betwen servers
|
||||
guest and/or the host that run on the same server.
|
||||
More information on VSCSI protocol can be found at www.power.org
|
||||
|
||||
The userspace configuration needed to initialize the driver can be
|
||||
be found here:
|
||||
|
||||
https://github.com/powervm/ibmvscsis/wiki/Configuration
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called ibmvscsis.
|
||||
|
||||
config SCSI_IBMVFC
|
||||
tristate "IBM Virtual FC support"
|
||||
depends on PPC_PSERIES && SCSI
|
||||
|
|
|
@ -128,6 +128,7 @@ obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
|
|||
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
|
||||
obj-$(CONFIG_SCSI_IPR) += ipr.o
|
||||
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
|
||||
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi_tgt/
|
||||
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
|
||||
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
|
||||
obj-$(CONFIG_SCSI_STEX) += stex.o
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include "viosrp.h"
|
||||
#include <scsi/viosrp.h>
|
||||
|
||||
#define IBMVFC_NAME "ibmvfc"
|
||||
#define IBMVFC_DRIVER_VERSION "1.0.11"
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "viosrp.h"
|
||||
#include <scsi/viosrp.h>
|
||||
|
||||
struct scsi_cmnd;
|
||||
struct Scsi_Host;
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsis.o
|
||||
|
||||
ibmvscsis-y := libsrp.o ibmvscsi_tgt.o
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,346 @@
|
|||
/*******************************************************************************
|
||||
* IBM Virtual SCSI Target Driver
|
||||
* Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
|
||||
* Santiago Leon (santil@us.ibm.com) IBM Corp.
|
||||
* Linda Xie (lxie@us.ibm.com) IBM Corp.
|
||||
*
|
||||
* Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
|
||||
* Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
|
||||
* Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
|
||||
*
|
||||
* Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
|
||||
* Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef __H_IBMVSCSI_TGT
|
||||
#define __H_IBMVSCSI_TGT
|
||||
|
||||
#include "libsrp.h"
|
||||
|
||||
#define SYS_ID_NAME_LEN 64
|
||||
#define PARTITION_NAMELEN 96
|
||||
#define IBMVSCSIS_NAMELEN 32
|
||||
|
||||
#define MSG_HI 0
|
||||
#define MSG_LOW 1
|
||||
|
||||
#define MAX_CMD_Q_PAGES 4
|
||||
#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq))
|
||||
/* in terms of number of elements */
|
||||
#define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE
|
||||
#define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES)
|
||||
|
||||
#define SRP_VIOLATION 0x102 /* general error code */
|
||||
|
||||
/*
|
||||
* SRP buffer formats defined as of 16.a supported by this driver.
|
||||
*/
|
||||
#define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \
|
||||
(SRP_DATA_DESC_INDIRECT << 1))
|
||||
|
||||
#define SCSI_LUN_ADDR_METHOD_FLAT 1
|
||||
|
||||
struct dma_window {
|
||||
u32 liobn; /* Unique per vdevice */
|
||||
u64 tce_base; /* Physical location of the TCE table */
|
||||
u64 tce_size; /* Size of the TCE table in bytes */
|
||||
};
|
||||
|
||||
struct target_dds {
|
||||
u64 unit_id; /* 64 bit will force alignment */
|
||||
#define NUM_DMA_WINDOWS 2
|
||||
#define LOCAL 0
|
||||
#define REMOTE 1
|
||||
struct dma_window window[NUM_DMA_WINDOWS];
|
||||
|
||||
/* root node property "ibm,partition-no" */
|
||||
uint partition_num;
|
||||
char partition_name[PARTITION_NAMELEN];
|
||||
};
|
||||
|
||||
#define MAX_NUM_PORTS 1
|
||||
#define MAX_H_COPY_RDMA (128 * 1024)
|
||||
|
||||
#define MAX_EYE 64
|
||||
|
||||
/* Return codes */
|
||||
#define ADAPT_SUCCESS 0L
|
||||
/* choose error codes that do not conflict with PHYP */
|
||||
#define ERROR -40L
|
||||
|
||||
struct format_code {
|
||||
u8 reserved;
|
||||
u8 buffers;
|
||||
};
|
||||
|
||||
struct client_info {
|
||||
#define SRP_VERSION "16.a"
|
||||
char srp_version[8];
|
||||
/* root node property ibm,partition-name */
|
||||
char partition_name[PARTITION_NAMELEN];
|
||||
/* root node property ibm,partition-no */
|
||||
u32 partition_number;
|
||||
/* initially 1 */
|
||||
u32 mad_version;
|
||||
u32 os_type;
|
||||
};
|
||||
|
||||
/*
|
||||
* Changing this constant changes the number of seconds to wait before
|
||||
* considering the client will never service its queue again.
|
||||
*/
|
||||
#define SECONDS_TO_CONSIDER_FAILED 30
|
||||
/*
|
||||
* These constants set the polling period used to determine if the client
|
||||
* has freed at least one element in the response queue.
|
||||
*/
|
||||
#define WAIT_SECONDS 1
|
||||
#define WAIT_NANO_SECONDS 5000
|
||||
#define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \
|
||||
SECONDS_TO_CONSIDER_FAILED)
|
||||
/*
|
||||
* general purpose timer control block
|
||||
* which can be used for multiple functions
|
||||
*/
|
||||
struct timer_cb {
|
||||
struct hrtimer timer;
|
||||
/*
|
||||
* how long has it been since the client
|
||||
* serviced the queue. The variable is incrmented
|
||||
* in the service_wait_q routine and cleared
|
||||
* in send messages
|
||||
*/
|
||||
int timer_pops;
|
||||
/* the timer is started */
|
||||
bool started;
|
||||
};
|
||||
|
||||
struct cmd_queue {
|
||||
/* kva */
|
||||
struct viosrp_crq *base_addr;
|
||||
dma_addr_t crq_token;
|
||||
/* used to maintain index */
|
||||
uint mask;
|
||||
/* current element */
|
||||
uint index;
|
||||
int size;
|
||||
};
|
||||
|
||||
#define SCSOLNT_RESP_SHIFT 1
|
||||
#define UCSOLNT_RESP_SHIFT 2
|
||||
|
||||
#define SCSOLNT BIT(SCSOLNT_RESP_SHIFT)
|
||||
#define UCSOLNT BIT(UCSOLNT_RESP_SHIFT)
|
||||
|
||||
enum cmd_type {
|
||||
SCSI_CDB = 0x01,
|
||||
TASK_MANAGEMENT = 0x02,
|
||||
/* MAD or addressed to port 0 */
|
||||
ADAPTER_MAD = 0x04,
|
||||
UNSET_TYPE = 0x08,
|
||||
};
|
||||
|
||||
struct iu_rsp {
|
||||
u8 format;
|
||||
u8 sol_not;
|
||||
u16 len;
|
||||
/* tag is just to help client identify cmd, so don't translate be/le */
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
struct ibmvscsis_cmd {
|
||||
struct list_head list;
|
||||
/* Used for TCM Core operations */
|
||||
struct se_cmd se_cmd;
|
||||
struct iu_entry *iue;
|
||||
struct iu_rsp rsp;
|
||||
struct work_struct work;
|
||||
struct scsi_info *adapter;
|
||||
/* Sense buffer that will be mapped into outgoing status */
|
||||
unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
|
||||
u64 init_time;
|
||||
#define CMD_FAST_FAIL BIT(0)
|
||||
u32 flags;
|
||||
char type;
|
||||
};
|
||||
|
||||
struct ibmvscsis_nexus {
|
||||
struct se_session *se_sess;
|
||||
};
|
||||
|
||||
struct ibmvscsis_tport {
|
||||
/* SCSI protocol the tport is providing */
|
||||
u8 tport_proto_id;
|
||||
/* ASCII formatted WWPN for SRP Target port */
|
||||
char tport_name[IBMVSCSIS_NAMELEN];
|
||||
/* Returned by ibmvscsis_make_tport() */
|
||||
struct se_wwn tport_wwn;
|
||||
/* Returned by ibmvscsis_make_tpg() */
|
||||
struct se_portal_group se_tpg;
|
||||
/* ibmvscsis port target portal group tag for TCM */
|
||||
u16 tport_tpgt;
|
||||
/* Pointer to TCM session for I_T Nexus */
|
||||
struct ibmvscsis_nexus *ibmv_nexus;
|
||||
bool enabled;
|
||||
bool releasing;
|
||||
};
|
||||
|
||||
struct scsi_info {
|
||||
struct list_head list;
|
||||
char eye[MAX_EYE];
|
||||
|
||||
/* commands waiting for space on repsonse queue */
|
||||
struct list_head waiting_rsp;
|
||||
#define NO_QUEUE 0x00
|
||||
#define WAIT_ENABLED 0X01
|
||||
/* driver has received an initialize command */
|
||||
#define PART_UP_WAIT_ENAB 0x02
|
||||
#define WAIT_CONNECTION 0x04
|
||||
/* have established a connection */
|
||||
#define CONNECTED 0x08
|
||||
/* at least one port is processing SRP IU */
|
||||
#define SRP_PROCESSING 0x10
|
||||
/* remove request received */
|
||||
#define UNCONFIGURING 0x20
|
||||
/* disconnect by letting adapter go idle, no error */
|
||||
#define WAIT_IDLE 0x40
|
||||
/* disconnecting to clear an error */
|
||||
#define ERR_DISCONNECT 0x80
|
||||
/* disconnect to clear error state, then come back up */
|
||||
#define ERR_DISCONNECT_RECONNECT 0x100
|
||||
/* disconnected after clearing an error */
|
||||
#define ERR_DISCONNECTED 0x200
|
||||
/* A series of errors caused unexpected errors */
|
||||
#define UNDEFINED 0x400
|
||||
u16 state;
|
||||
int fast_fail;
|
||||
struct target_dds dds;
|
||||
char *cmd_pool;
|
||||
/* list of free commands */
|
||||
struct list_head free_cmd;
|
||||
/* command elements ready for scheduler */
|
||||
struct list_head schedule_q;
|
||||
/* commands sent to TCM */
|
||||
struct list_head active_q;
|
||||
caddr_t *map_buf;
|
||||
/* ioba of map buffer */
|
||||
dma_addr_t map_ioba;
|
||||
/* allowable number of outstanding SRP requests */
|
||||
int request_limit;
|
||||
/* extra credit */
|
||||
int credit;
|
||||
/* outstanding transactions against credit limit */
|
||||
int debit;
|
||||
|
||||
/* allow only one outstanding mad request */
|
||||
#define PROCESSING_MAD 0x00002
|
||||
/* Waiting to go idle */
|
||||
#define WAIT_FOR_IDLE 0x00004
|
||||
/* H_REG_CRQ called */
|
||||
#define CRQ_CLOSED 0x00010
|
||||
/* detected that client has failed */
|
||||
#define CLIENT_FAILED 0x00040
|
||||
/* detected that transport event occurred */
|
||||
#define TRANS_EVENT 0x00080
|
||||
/* don't attempt to send anything to the client */
|
||||
#define RESPONSE_Q_DOWN 0x00100
|
||||
/* request made to schedule disconnect handler */
|
||||
#define SCHEDULE_DISCONNECT 0x00400
|
||||
/* disconnect handler is scheduled */
|
||||
#define DISCONNECT_SCHEDULED 0x00800
|
||||
u32 flags;
|
||||
/* adapter lock */
|
||||
spinlock_t intr_lock;
|
||||
/* information needed to manage command queue */
|
||||
struct cmd_queue cmd_q;
|
||||
/* used in hcall to copy response back into srp buffer */
|
||||
u64 empty_iu_id;
|
||||
/* used in crq, to tag what iu the response is for */
|
||||
u64 empty_iu_tag;
|
||||
uint new_state;
|
||||
/* control block for the response queue timer */
|
||||
struct timer_cb rsp_q_timer;
|
||||
/* keep last client to enable proper accounting */
|
||||
struct client_info client_data;
|
||||
/* what can this client do */
|
||||
u32 client_cap;
|
||||
/*
|
||||
* The following two fields capture state and flag changes that
|
||||
* can occur when the lock is given up. In the orginal design,
|
||||
* the lock was held during calls into phyp;
|
||||
* however, phyp did not meet PAPR architecture. This is
|
||||
* a work around.
|
||||
*/
|
||||
u16 phyp_acr_state;
|
||||
u32 phyp_acr_flags;
|
||||
|
||||
struct workqueue_struct *work_q;
|
||||
struct completion wait_idle;
|
||||
struct device dev;
|
||||
struct vio_dev *dma_dev;
|
||||
struct srp_target target;
|
||||
struct ibmvscsis_tport tport;
|
||||
struct tasklet_struct work_task;
|
||||
struct work_struct proc_work;
|
||||
};
|
||||
|
||||
/*
|
||||
* Provide a constant that allows software to detect the adapter is
|
||||
* disconnecting from the client from one of several states.
|
||||
*/
|
||||
#define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \
|
||||
ERR_DISCONNECT)
|
||||
|
||||
/*
|
||||
* Provide a constant that can be used with interrupt handling that
|
||||
* essentially lets the interrupt handler know that all requests should
|
||||
* be thrown out,
|
||||
*/
|
||||
#define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \
|
||||
ERR_DISCONNECTED | WAIT_IDLE)
|
||||
|
||||
/*
|
||||
* If any of these flag bits are set then do not allow the interrupt
|
||||
* handler to schedule the off level handler.
|
||||
*/
|
||||
#define BLOCK (DISCONNECT_SCHEDULED)
|
||||
|
||||
/* State and transition events that stop the interrupt handler */
|
||||
#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
|
||||
((VSCSI)->flags & BLOCK))
|
||||
|
||||
/* flag bit that are not reset during disconnect */
|
||||
#define PRESERVE_FLAG_FIELDS 0
|
||||
|
||||
#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
|
||||
|
||||
#define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8)
|
||||
#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA)
|
||||
|
||||
#ifndef H_GET_PARTNER_INFO
|
||||
#define H_GET_PARTNER_INFO 0x0000000000000008LL
|
||||
#endif
|
||||
|
||||
#define h_copy_rdma(l, sa, sb, da, db) \
|
||||
plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
|
||||
#define h_vioctl(u, o, a, u1, u2, u3, u4) \
|
||||
plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2)
|
||||
#define h_reg_crq(ua, tok, sz) \
|
||||
plpar_hcall_norets(H_REG_CRQ, ua, tok, sz)
|
||||
#define h_free_crq(ua) \
|
||||
plpar_hcall_norets(H_FREE_CRQ, ua)
|
||||
#define h_send_crq(ua, d1, d2) \
|
||||
plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2)
|
||||
|
||||
#endif
|
|
@ -0,0 +1,427 @@
|
|||
/*******************************************************************************
|
||||
* SCSI RDMA Protocol lib functions
|
||||
*
|
||||
* Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
|
||||
* Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
***********************************************************************/
|
||||
|
||||
#define pr_fmt(fmt) "libsrp: " fmt
|
||||
|
||||
#include <linux/printk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <scsi/srp.h>
|
||||
#include <target/target_core_base.h>
|
||||
#include "libsrp.h"
|
||||
#include "ibmvscsi_tgt.h"
|
||||
|
||||
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
|
||||
struct srp_buf **ring)
|
||||
{
|
||||
struct iu_entry *iue;
|
||||
int i;
|
||||
|
||||
q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
|
||||
if (!q->pool)
|
||||
return -ENOMEM;
|
||||
q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
|
||||
if (!q->items)
|
||||
goto free_pool;
|
||||
|
||||
spin_lock_init(&q->lock);
|
||||
kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
|
||||
|
||||
for (i = 0, iue = q->items; i < max; i++) {
|
||||
kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
|
||||
iue->sbuf = ring[i];
|
||||
iue++;
|
||||
}
|
||||
return 0;
|
||||
|
||||
free_pool:
|
||||
kfree(q->pool);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void srp_iu_pool_free(struct srp_queue *q)
|
||||
{
|
||||
kfree(q->items);
|
||||
kfree(q->pool);
|
||||
}
|
||||
|
||||
static struct srp_buf **srp_ring_alloc(struct device *dev,
|
||||
size_t max, size_t size)
|
||||
{
|
||||
struct srp_buf **ring;
|
||||
int i;
|
||||
|
||||
ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
|
||||
if (!ring)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
|
||||
if (!ring[i])
|
||||
goto out;
|
||||
ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
|
||||
GFP_KERNEL);
|
||||
if (!ring[i]->buf)
|
||||
goto out;
|
||||
}
|
||||
return ring;
|
||||
|
||||
out:
|
||||
for (i = 0; i < max && ring[i]; i++) {
|
||||
if (ring[i]->buf) {
|
||||
dma_free_coherent(dev, size, ring[i]->buf,
|
||||
ring[i]->dma);
|
||||
}
|
||||
kfree(ring[i]);
|
||||
}
|
||||
kfree(ring);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void srp_ring_free(struct device *dev, struct srp_buf **ring,
|
||||
size_t max, size_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
|
||||
kfree(ring[i]);
|
||||
}
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
int srp_target_alloc(struct srp_target *target, struct device *dev,
|
||||
size_t nr, size_t iu_size)
|
||||
{
|
||||
int err;
|
||||
|
||||
spin_lock_init(&target->lock);
|
||||
|
||||
target->dev = dev;
|
||||
|
||||
target->srp_iu_size = iu_size;
|
||||
target->rx_ring_size = nr;
|
||||
target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
|
||||
if (!target->rx_ring)
|
||||
return -ENOMEM;
|
||||
err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
|
||||
if (err)
|
||||
goto free_ring;
|
||||
|
||||
dev_set_drvdata(target->dev, target);
|
||||
return 0;
|
||||
|
||||
free_ring:
|
||||
srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void srp_target_free(struct srp_target *target)
|
||||
{
|
||||
dev_set_drvdata(target->dev, NULL);
|
||||
srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
|
||||
target->srp_iu_size);
|
||||
srp_iu_pool_free(&target->iu_queue);
|
||||
}
|
||||
|
||||
struct iu_entry *srp_iu_get(struct srp_target *target)
|
||||
{
|
||||
struct iu_entry *iue = NULL;
|
||||
|
||||
if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
|
||||
sizeof(void *),
|
||||
&target->iu_queue.lock) != sizeof(void *)) {
|
||||
WARN_ONCE(1, "unexpected fifo state");
|
||||
return NULL;
|
||||
}
|
||||
if (!iue)
|
||||
return iue;
|
||||
iue->target = target;
|
||||
iue->flags = 0;
|
||||
return iue;
|
||||
}
|
||||
|
||||
void srp_iu_put(struct iu_entry *iue)
|
||||
{
|
||||
kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
|
||||
sizeof(void *), &iue->target->iu_queue.lock);
|
||||
}
|
||||
|
||||
static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
|
||||
enum dma_data_direction dir, srp_rdma_t rdma_io,
|
||||
int dma_map, int ext_desc)
|
||||
{
|
||||
struct iu_entry *iue = NULL;
|
||||
struct scatterlist *sg = NULL;
|
||||
int err, nsg = 0, len;
|
||||
|
||||
if (dma_map) {
|
||||
iue = cmd->iue;
|
||||
sg = cmd->se_cmd.t_data_sg;
|
||||
nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!nsg) {
|
||||
pr_err("fail to map %p %d\n", iue,
|
||||
cmd->se_cmd.t_data_nents);
|
||||
return 0;
|
||||
}
|
||||
len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
|
||||
} else {
|
||||
len = be32_to_cpu(md->len);
|
||||
}
|
||||
|
||||
err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
|
||||
|
||||
if (dma_map)
|
||||
dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
|
||||
struct srp_indirect_buf *id,
|
||||
enum dma_data_direction dir, srp_rdma_t rdma_io,
|
||||
int dma_map, int ext_desc)
|
||||
{
|
||||
struct iu_entry *iue = NULL;
|
||||
struct srp_direct_buf *md = NULL;
|
||||
struct scatterlist dummy, *sg = NULL;
|
||||
dma_addr_t token = 0;
|
||||
int err = 0;
|
||||
int nmd, nsg = 0, len;
|
||||
|
||||
if (dma_map || ext_desc) {
|
||||
iue = cmd->iue;
|
||||
sg = cmd->se_cmd.t_data_sg;
|
||||
}
|
||||
|
||||
nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
|
||||
|
||||
if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
|
||||
(dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
|
||||
md = &id->desc_list[0];
|
||||
goto rdma;
|
||||
}
|
||||
|
||||
if (ext_desc && dma_map) {
|
||||
md = dma_alloc_coherent(iue->target->dev,
|
||||
be32_to_cpu(id->table_desc.len),
|
||||
&token, GFP_KERNEL);
|
||||
if (!md) {
|
||||
pr_err("Can't get dma memory %u\n",
|
||||
be32_to_cpu(id->table_desc.len));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
|
||||
sg_dma_address(&dummy) = token;
|
||||
sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
|
||||
err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
|
||||
be32_to_cpu(id->table_desc.len));
|
||||
if (err) {
|
||||
pr_err("Error copying indirect table %d\n", err);
|
||||
goto free_mem;
|
||||
}
|
||||
} else {
|
||||
pr_err("This command uses external indirect buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rdma:
|
||||
if (dma_map) {
|
||||
nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!nsg) {
|
||||
pr_err("fail to map %p %d\n", iue,
|
||||
cmd->se_cmd.t_data_nents);
|
||||
err = -EIO;
|
||||
goto free_mem;
|
||||
}
|
||||
len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
|
||||
} else {
|
||||
len = be32_to_cpu(id->len);
|
||||
}
|
||||
|
||||
err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
|
||||
|
||||
if (dma_map)
|
||||
dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
|
||||
|
||||
free_mem:
|
||||
if (token && dma_map) {
|
||||
dma_free_coherent(iue->target->dev,
|
||||
be32_to_cpu(id->table_desc.len), md, token);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int data_out_desc_size(struct srp_cmd *cmd)
|
||||
{
|
||||
int size = 0;
|
||||
u8 fmt = cmd->buf_fmt >> 4;
|
||||
|
||||
switch (fmt) {
|
||||
case SRP_NO_DATA_DESC:
|
||||
break;
|
||||
case SRP_DATA_DESC_DIRECT:
|
||||
size = sizeof(struct srp_direct_buf);
|
||||
break;
|
||||
case SRP_DATA_DESC_INDIRECT:
|
||||
size = sizeof(struct srp_indirect_buf) +
|
||||
sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
|
||||
break;
|
||||
default:
|
||||
pr_err("client error. Invalid data_out_format %x\n", fmt);
|
||||
break;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: this can be called multiple times for a single command if it
|
||||
* has very long data.
|
||||
*/
|
||||
int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
|
||||
srp_rdma_t rdma_io, int dma_map, int ext_desc)
|
||||
{
|
||||
struct srp_direct_buf *md;
|
||||
struct srp_indirect_buf *id;
|
||||
enum dma_data_direction dir;
|
||||
int offset, err = 0;
|
||||
u8 format;
|
||||
|
||||
if (!cmd->se_cmd.t_data_nents)
|
||||
return 0;
|
||||
|
||||
offset = srp_cmd->add_cdb_len & ~3;
|
||||
|
||||
dir = srp_cmd_direction(srp_cmd);
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
offset += data_out_desc_size(srp_cmd);
|
||||
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
format = srp_cmd->buf_fmt >> 4;
|
||||
else
|
||||
format = srp_cmd->buf_fmt & ((1U << 4) - 1);
|
||||
|
||||
switch (format) {
|
||||
case SRP_NO_DATA_DESC:
|
||||
break;
|
||||
case SRP_DATA_DESC_DIRECT:
|
||||
md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
|
||||
err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
|
||||
break;
|
||||
case SRP_DATA_DESC_INDIRECT:
|
||||
id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
|
||||
err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
|
||||
ext_desc);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown format %d %x\n", dir, format);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
|
||||
{
|
||||
struct srp_direct_buf *md;
|
||||
struct srp_indirect_buf *id;
|
||||
u64 len = 0;
|
||||
uint offset = cmd->add_cdb_len & ~3;
|
||||
u8 fmt;
|
||||
|
||||
if (dir == DMA_TO_DEVICE) {
|
||||
fmt = cmd->buf_fmt >> 4;
|
||||
} else {
|
||||
fmt = cmd->buf_fmt & ((1U << 4) - 1);
|
||||
offset += data_out_desc_size(cmd);
|
||||
}
|
||||
|
||||
switch (fmt) {
|
||||
case SRP_NO_DATA_DESC:
|
||||
break;
|
||||
case SRP_DATA_DESC_DIRECT:
|
||||
md = (struct srp_direct_buf *)(cmd->add_data + offset);
|
||||
len = be32_to_cpu(md->len);
|
||||
break;
|
||||
case SRP_DATA_DESC_INDIRECT:
|
||||
id = (struct srp_indirect_buf *)(cmd->add_data + offset);
|
||||
len = be32_to_cpu(id->len);
|
||||
break;
|
||||
default:
|
||||
pr_err("invalid data format %x\n", fmt);
|
||||
break;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
|
||||
u64 *data_len)
|
||||
{
|
||||
struct srp_indirect_buf *idb;
|
||||
struct srp_direct_buf *db;
|
||||
uint add_cdb_offset;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* The pointer computations below will only be compiled correctly
|
||||
* if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
|
||||
* whether srp_cmd::add_data has been declared as a byte pointer.
|
||||
*/
|
||||
BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
|
||||
&& !__same_type(srp_cmd->add_data[0], (u8)0));
|
||||
|
||||
BUG_ON(!dir);
|
||||
BUG_ON(!data_len);
|
||||
|
||||
rc = 0;
|
||||
*data_len = 0;
|
||||
|
||||
*dir = DMA_NONE;
|
||||
|
||||
if (srp_cmd->buf_fmt & 0xf)
|
||||
*dir = DMA_FROM_DEVICE;
|
||||
else if (srp_cmd->buf_fmt >> 4)
|
||||
*dir = DMA_TO_DEVICE;
|
||||
|
||||
add_cdb_offset = srp_cmd->add_cdb_len & ~3;
|
||||
if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
|
||||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
|
||||
db = (struct srp_direct_buf *)(srp_cmd->add_data
|
||||
+ add_cdb_offset);
|
||||
*data_len = be32_to_cpu(db->len);
|
||||
} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
|
||||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
|
||||
idb = (struct srp_indirect_buf *)(srp_cmd->add_data
|
||||
+ add_cdb_offset);
|
||||
|
||||
*data_len = be32_to_cpu(idb->len);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
|
||||
MODULE_AUTHOR("FUJITA Tomonori");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -0,0 +1,123 @@
|
|||
#ifndef __LIBSRP_H__
|
||||
#define __LIBSRP_H__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <scsi/srp.h>
|
||||
|
||||
enum srp_valid {
|
||||
INVALIDATE_CMD_RESP_EL = 0,
|
||||
VALID_CMD_RESP_EL = 0x80,
|
||||
VALID_INIT_MSG = 0xC0,
|
||||
VALID_TRANS_EVENT = 0xFF
|
||||
};
|
||||
|
||||
enum srp_format {
|
||||
SRP_FORMAT = 1,
|
||||
MAD_FORMAT = 2,
|
||||
OS400_FORMAT = 3,
|
||||
AIX_FORMAT = 4,
|
||||
LINUX_FORMAT = 5,
|
||||
MESSAGE_IN_CRQ = 6
|
||||
};
|
||||
|
||||
enum srp_init_msg {
|
||||
INIT_MSG = 1,
|
||||
INIT_COMPLETE_MSG = 2
|
||||
};
|
||||
|
||||
enum srp_trans_event {
|
||||
UNUSED_FORMAT = 0,
|
||||
PARTNER_FAILED = 1,
|
||||
PARTNER_DEREGISTER = 2,
|
||||
MIGRATED = 6
|
||||
};
|
||||
|
||||
enum srp_status {
|
||||
HEADER_DESCRIPTOR = 0xF1,
|
||||
PING = 0xF5,
|
||||
PING_RESPONSE = 0xF6
|
||||
};
|
||||
|
||||
enum srp_mad_version {
|
||||
MAD_VERSION_1 = 1
|
||||
};
|
||||
|
||||
enum srp_os_type {
|
||||
OS400 = 1,
|
||||
LINUX = 2,
|
||||
AIX = 3,
|
||||
OFW = 4
|
||||
};
|
||||
|
||||
enum srp_task_attributes {
|
||||
SRP_SIMPLE_TASK = 0,
|
||||
SRP_HEAD_TASK = 1,
|
||||
SRP_ORDERED_TASK = 2,
|
||||
SRP_ACA_TASK = 4
|
||||
};
|
||||
|
||||
enum {
|
||||
SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE = 0,
|
||||
SRP_REQUEST_FIELDS_INVALID = 2,
|
||||
SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED = 4,
|
||||
SRP_TASK_MANAGEMENT_FUNCTION_FAILED = 5
|
||||
};
|
||||
|
||||
struct srp_buf {
|
||||
dma_addr_t dma;
|
||||
void *buf;
|
||||
};
|
||||
|
||||
struct srp_queue {
|
||||
void *pool;
|
||||
void *items;
|
||||
struct kfifo queue;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct srp_target {
|
||||
struct device *dev;
|
||||
|
||||
spinlock_t lock;
|
||||
struct list_head cmd_queue;
|
||||
|
||||
size_t srp_iu_size;
|
||||
struct srp_queue iu_queue;
|
||||
size_t rx_ring_size;
|
||||
struct srp_buf **rx_ring;
|
||||
|
||||
void *ldata;
|
||||
};
|
||||
|
||||
struct iu_entry {
|
||||
struct srp_target *target;
|
||||
|
||||
struct list_head ilist;
|
||||
dma_addr_t remote_token;
|
||||
unsigned long flags;
|
||||
|
||||
struct srp_buf *sbuf;
|
||||
u16 iu_len;
|
||||
};
|
||||
|
||||
struct ibmvscsis_cmd;
|
||||
|
||||
typedef int (srp_rdma_t)(struct ibmvscsis_cmd *, struct scatterlist *, int,
|
||||
struct srp_direct_buf *, int,
|
||||
enum dma_data_direction, unsigned int);
|
||||
int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
|
||||
void srp_target_free(struct srp_target *);
|
||||
struct iu_entry *srp_iu_get(struct srp_target *);
|
||||
void srp_iu_put(struct iu_entry *);
|
||||
int srp_transfer_data(struct ibmvscsis_cmd *, struct srp_cmd *,
|
||||
srp_rdma_t, int, int);
|
||||
u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir);
|
||||
int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
|
||||
u64 *data_len);
|
||||
static inline int srp_cmd_direction(struct srp_cmd *cmd)
|
||||
{
|
||||
return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -492,7 +492,8 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|||
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
|
||||
|
||||
spin_lock_bh(&conn->cmd_lock);
|
||||
if (!list_empty(&cmd->i_conn_node))
|
||||
if (!list_empty(&cmd->i_conn_node) &&
|
||||
!(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
|
||||
list_del_init(&cmd->i_conn_node);
|
||||
spin_unlock_bh(&conn->cmd_lock);
|
||||
|
||||
|
@ -4034,6 +4035,7 @@ int iscsi_target_rx_thread(void *arg)
|
|||
|
||||
static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
|
||||
{
|
||||
LIST_HEAD(tmp_list);
|
||||
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
|
||||
struct iscsi_session *sess = conn->sess;
|
||||
/*
|
||||
|
@ -4042,18 +4044,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
|
|||
* has been reset -> returned sleeping pre-handler state.
|
||||
*/
|
||||
spin_lock_bh(&conn->cmd_lock);
|
||||
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
|
||||
list_splice_init(&conn->conn_cmd_list, &tmp_list);
|
||||
|
||||
list_del_init(&cmd->i_conn_node);
|
||||
spin_unlock_bh(&conn->cmd_lock);
|
||||
list_for_each_entry(cmd, &tmp_list, i_conn_node) {
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
|
||||
iscsit_increment_maxcmdsn(cmd, sess);
|
||||
|
||||
iscsit_free_cmd(cmd, true);
|
||||
|
||||
spin_lock_bh(&conn->cmd_lock);
|
||||
if (se_cmd->se_tfo != NULL) {
|
||||
spin_lock(&se_cmd->t_state_lock);
|
||||
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&conn->cmd_lock);
|
||||
|
||||
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
|
||||
list_del_init(&cmd->i_conn_node);
|
||||
|
||||
iscsit_increment_maxcmdsn(cmd, sess);
|
||||
iscsit_free_cmd(cmd, true);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static void iscsit_stop_timers_for_cmds(
|
||||
|
|
|
@ -1371,8 +1371,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
|
|||
}
|
||||
login->zero_tsih = zero_tsih;
|
||||
|
||||
conn->sess->se_sess->sup_prot_ops =
|
||||
conn->conn_transport->iscsit_get_sup_prot_ops(conn);
|
||||
if (conn->sess)
|
||||
conn->sess->se_sess->sup_prot_ops =
|
||||
conn->conn_transport->iscsit_get_sup_prot_ops(conn);
|
||||
|
||||
tpg = conn->tpg;
|
||||
if (!tpg) {
|
||||
|
|
|
@ -821,13 +821,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
* in ATA and we need to set TPE=1
|
||||
*/
|
||||
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||
struct request_queue *q, int block_size)
|
||||
struct request_queue *q)
|
||||
{
|
||||
int block_size = queue_logical_block_size(q);
|
||||
|
||||
if (!blk_queue_discard(q))
|
||||
return false;
|
||||
|
||||
attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
|
||||
block_size;
|
||||
attrib->max_unmap_lba_count =
|
||||
q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
|
||||
/*
|
||||
* Currently hardcoded to 1 in Linux/SCSI code..
|
||||
*/
|
||||
|
|
|
@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
|
|||
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
|
||||
fd_dev->fd_block_size);
|
||||
|
||||
if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
|
||||
fd_dev->fd_block_size))
|
||||
if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
|
||||
pr_debug("IFILE: BLOCK Discard support available,"
|
||||
" disabled by default\n");
|
||||
/*
|
||||
|
@ -523,7 +522,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
*/
|
||||
if (cmd->data_length > FD_MAX_BYTES) {
|
||||
pr_err("FILEIO: Not able to process I/O of %u bytes due to"
|
||||
"FD_MAX_BYTES: %u iovec count limitiation\n",
|
||||
"FD_MAX_BYTES: %u iovec count limitation\n",
|
||||
cmd->data_length, FD_MAX_BYTES);
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
|
|
@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
|
|||
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
|
||||
dev->dev_attrib.hw_queue_depth = q->nr_requests;
|
||||
|
||||
if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
|
||||
dev->dev_attrib.hw_block_size))
|
||||
if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
|
||||
pr_debug("IBLOCK: BLOCK Discard support available,"
|
||||
" disabled by default\n");
|
||||
|
||||
|
|
|
@ -146,6 +146,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
|
|||
void target_qf_do_work(struct work_struct *work);
|
||||
bool target_check_wce(struct se_device *dev);
|
||||
bool target_check_fua(struct se_device *dev);
|
||||
void __target_execute_cmd(struct se_cmd *, bool);
|
||||
|
||||
/* target_core_stat.c */
|
||||
void target_stat_setup_dev_default_groups(struct se_device *);
|
||||
|
|
|
@ -602,7 +602,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
|
|||
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
|
||||
__target_execute_cmd(cmd);
|
||||
__target_execute_cmd(cmd, false);
|
||||
|
||||
kfree(buf);
|
||||
return ret;
|
||||
|
|
|
@ -754,7 +754,15 @@ EXPORT_SYMBOL(target_complete_cmd);
|
|||
|
||||
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
|
||||
{
|
||||
if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
|
||||
if (scsi_status != SAM_STAT_GOOD) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate new residual count based upon length of SCSI data
|
||||
* transferred.
|
||||
*/
|
||||
if (length < cmd->data_length) {
|
||||
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
|
||||
cmd->residual_count += cmd->data_length - length;
|
||||
} else {
|
||||
|
@ -763,6 +771,12 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
|
|||
}
|
||||
|
||||
cmd->data_length = length;
|
||||
} else if (length > cmd->data_length) {
|
||||
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
|
||||
cmd->residual_count = length - cmd->data_length;
|
||||
} else {
|
||||
cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
|
||||
cmd->residual_count = 0;
|
||||
}
|
||||
|
||||
target_complete_cmd(cmd, scsi_status);
|
||||
|
@ -1303,23 +1317,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
|
|||
|
||||
trace_target_sequencer_start(cmd);
|
||||
|
||||
/*
|
||||
* Check for an existing UNIT ATTENTION condition
|
||||
*/
|
||||
ret = target_scsi3_ua_check(cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = target_alua_state_check(cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = target_check_reservation(cmd);
|
||||
if (ret) {
|
||||
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dev->transport->parse_cdb(cmd);
|
||||
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
|
||||
pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
|
||||
|
@ -1761,20 +1758,45 @@ queue_full:
|
|||
}
|
||||
EXPORT_SYMBOL(transport_generic_request_failure);
|
||||
|
||||
void __target_execute_cmd(struct se_cmd *cmd)
|
||||
void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
|
||||
{
|
||||
sense_reason_t ret;
|
||||
|
||||
if (cmd->execute_cmd) {
|
||||
ret = cmd->execute_cmd(cmd);
|
||||
if (ret) {
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
if (!cmd->execute_cmd) {
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto err;
|
||||
}
|
||||
if (do_checks) {
|
||||
/*
|
||||
* Check for an existing UNIT ATTENTION condition after
|
||||
* target_handle_task_attr() has done SAM task attr
|
||||
* checking, and possibly have already defered execution
|
||||
* out to target_restart_delayed_cmds() context.
|
||||
*/
|
||||
ret = target_scsi3_ua_check(cmd);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
transport_generic_request_failure(cmd, ret);
|
||||
ret = target_alua_state_check(cmd);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = target_check_reservation(cmd);
|
||||
if (ret) {
|
||||
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
ret = cmd->execute_cmd(cmd);
|
||||
if (!ret)
|
||||
return;
|
||||
err:
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
|
||||
transport_generic_request_failure(cmd, ret);
|
||||
}
|
||||
|
||||
static int target_write_prot_action(struct se_cmd *cmd)
|
||||
|
@ -1819,6 +1841,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
|
|||
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
|
||||
return false;
|
||||
|
||||
cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
|
||||
|
||||
/*
|
||||
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
|
||||
* to allow the passed struct se_cmd list of tasks to the front of the list.
|
||||
|
@ -1899,7 +1923,7 @@ void target_execute_cmd(struct se_cmd *cmd)
|
|||
return;
|
||||
}
|
||||
|
||||
__target_execute_cmd(cmd);
|
||||
__target_execute_cmd(cmd, true);
|
||||
}
|
||||
EXPORT_SYMBOL(target_execute_cmd);
|
||||
|
||||
|
@ -1923,7 +1947,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
|
|||
list_del(&cmd->se_delayed_node);
|
||||
spin_unlock(&dev->delayed_cmd_lock);
|
||||
|
||||
__target_execute_cmd(cmd);
|
||||
__target_execute_cmd(cmd, true);
|
||||
|
||||
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
|
||||
break;
|
||||
|
@ -1941,6 +1965,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
|||
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
|
||||
return;
|
||||
|
||||
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
|
||||
goto restart;
|
||||
|
||||
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
|
||||
atomic_dec_mb(&dev->simple_cmds);
|
||||
dev->dev_cur_ordered_id++;
|
||||
|
@ -1957,7 +1984,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
|||
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
|
||||
dev->dev_cur_ordered_id);
|
||||
}
|
||||
|
||||
restart:
|
||||
target_restart_delayed_cmds(dev);
|
||||
}
|
||||
|
||||
|
@ -2557,15 +2584,10 @@ static void target_release_cmd_kref(struct kref *kref)
|
|||
bool fabric_stop;
|
||||
|
||||
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
||||
if (list_empty(&se_cmd->se_cmd_list)) {
|
||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||
target_free_cmd_mem(se_cmd);
|
||||
se_cmd->se_tfo->release_cmd(se_cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&se_cmd->t_state_lock);
|
||||
fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
|
||||
fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
|
||||
(se_cmd->transport_state & CMD_T_ABORTED);
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
|
||||
if (se_cmd->cmd_wait_set || fabric_stop) {
|
||||
|
|
|
@ -91,6 +91,7 @@ static void ft_tport_delete(struct ft_tport *tport)
|
|||
|
||||
ft_sess_delete_all(tport);
|
||||
lport = tport->lport;
|
||||
lport->service_params &= ~FCP_SPPF_TARG_FCN;
|
||||
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
|
||||
RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
|
||||
|
||||
|
@ -110,6 +111,7 @@ void ft_lport_add(struct fc_lport *lport, void *arg)
|
|||
{
|
||||
mutex_lock(&ft_lport_lock);
|
||||
ft_tport_get(lport);
|
||||
lport->service_params |= FCP_SPPF_TARG_FCN;
|
||||
mutex_unlock(&ft_lport_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,11 +15,6 @@
|
|||
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
|
||||
/* GNU General Public License for more details. */
|
||||
/* */
|
||||
/* You should have received a copy of the GNU General Public License */
|
||||
/* along with this program; if not, write to the Free Software */
|
||||
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
/* */
|
||||
/* */
|
||||
/* This file contains structures and definitions for IBM RPA (RS/6000 */
|
||||
/* platform architecture) implementation of the SRP (SCSI RDMA Protocol) */
|
||||
/* standard. SRP is used on IBM iSeries and pSeries platforms to send SCSI */
|
||||
|
@ -93,7 +88,7 @@ struct viosrp_crq {
|
|||
};
|
||||
|
||||
/* MADs are Management requests above and beyond the IUs defined in the SRP
|
||||
* standard.
|
||||
* standard.
|
||||
*/
|
||||
enum viosrp_mad_types {
|
||||
VIOSRP_EMPTY_IU_TYPE = 0x01,
|
||||
|
@ -131,7 +126,7 @@ enum viosrp_capability_flag {
|
|||
CAP_LIST_DATA = 0x08,
|
||||
};
|
||||
|
||||
/*
|
||||
/*
|
||||
* Common MAD header
|
||||
*/
|
||||
struct mad_common {
|
||||
|
@ -146,7 +141,7 @@ struct mad_common {
|
|||
* client to the server. There is no way for the server to send
|
||||
* an asynchronous message back to the client. The Empty IU is used
|
||||
* to hang out a meaningless request to the server so that it can respond
|
||||
* asynchrouously with something like a SCSI AER
|
||||
* asynchrouously with something like a SCSI AER
|
||||
*/
|
||||
struct viosrp_empty_iu {
|
||||
struct mad_common common;
|
||||
|
@ -189,7 +184,7 @@ struct mad_migration_cap {
|
|||
__be32 ecl;
|
||||
};
|
||||
|
||||
struct capabilities{
|
||||
struct capabilities {
|
||||
__be32 flags;
|
||||
char name[SRP_MAX_LOC_LEN];
|
||||
char loc[SRP_MAX_LOC_LEN];
|
|
@ -95,6 +95,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
|
|||
bool target_sense_desc_format(struct se_device *dev);
|
||||
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
|
||||
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||
struct request_queue *q, int block_size);
|
||||
struct request_queue *q);
|
||||
|
||||
#endif /* TARGET_CORE_BACKEND_H */
|
||||
|
|
|
@ -142,6 +142,7 @@ enum se_cmd_flags_table {
|
|||
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
|
||||
SCF_ACK_KREF = 0x00400000,
|
||||
SCF_USE_CPUID = 0x00800000,
|
||||
SCF_TASK_ATTR_SET = 0x01000000,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -163,7 +163,6 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
|
|||
void core_tmr_release_req(struct se_tmr_req *);
|
||||
int transport_generic_handle_tmr(struct se_cmd *);
|
||||
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
|
||||
void __target_execute_cmd(struct se_cmd *);
|
||||
int transport_lookup_tmr_lun(struct se_cmd *, u64);
|
||||
void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче