2014-10-02 03:07:05 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 Shaohua Li <shli@kernel.org>
|
|
|
|
* Copyright (C) 2014 Red Hat, Inc.
|
2015-04-23 21:47:00 +03:00
|
|
|
* Copyright (C) 2015 Arrikto, Inc.
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
* Copyright (C) 2017 Chinamobile, Inc.
|
2014-10-02 03:07:05 +04:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/idr.h>
|
2015-05-08 11:11:12 +03:00
|
|
|
#include <linux/kernel.h>
|
2014-10-02 03:07:05 +04:00
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/parser.h>
|
2015-05-28 21:35:41 +03:00
|
|
|
#include <linux/vmalloc.h>
|
2014-10-02 03:07:05 +04:00
|
|
|
#include <linux/uio_driver.h>
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
#include <linux/radix-tree.h>
|
2015-09-04 02:39:56 +03:00
|
|
|
#include <linux/stringify.h>
|
2016-02-27 01:59:57 +03:00
|
|
|
#include <linux/bitops.h>
|
2016-11-19 02:31:45 +03:00
|
|
|
#include <linux/highmem.h>
|
2017-03-19 01:04:13 +03:00
|
|
|
#include <linux/configfs.h>
|
2017-05-02 06:38:06 +03:00
|
|
|
#include <linux/mutex.h>
|
2017-11-28 21:40:30 +03:00
|
|
|
#include <linux/workqueue.h>
|
2014-10-02 03:07:05 +04:00
|
|
|
#include <net/genetlink.h>
|
2015-05-08 11:11:12 +03:00
|
|
|
#include <scsi/scsi_common.h>
|
|
|
|
#include <scsi/scsi_proto.h>
|
2014-10-02 03:07:05 +04:00
|
|
|
#include <target/target_core_base.h>
|
|
|
|
#include <target/target_core_fabric.h>
|
|
|
|
#include <target/target_core_backend.h>
|
2014-11-28 08:11:24 +03:00
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
#include <linux/target_core_user.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Define a shared-memory interface for LIO to pass SCSI commands and
|
|
|
|
* data to userspace for processing. This is to allow backends that
|
|
|
|
* are too complex for in-kernel support to be possible.
|
|
|
|
*
|
|
|
|
* It uses the UIO framework to do a lot of the device-creation and
|
|
|
|
* introspection work for us.
|
|
|
|
*
|
|
|
|
* See the .h file for how the ring is laid out. Note that while the
|
|
|
|
* command ring is defined, the particulars of the data area are
|
|
|
|
* not. Offset values in the command entry point to other locations
|
|
|
|
* internal to the mmap()ed area. There is separate space outside the
|
|
|
|
* command ring for data buffers. This leaves maximum flexibility for
|
|
|
|
* moving buffer allocations, or even page flipping or other
|
|
|
|
* allocation techniques, without altering the command ring layout.
|
|
|
|
*
|
|
|
|
* SECURITY:
|
|
|
|
* The user process must be assumed to be malicious. There's no way to
|
|
|
|
* prevent it breaking the command ring protocol if it wants, but in
|
|
|
|
* order to prevent other issues we must only ever read *data* from
|
|
|
|
* the shared memory area, not offsets or sizes. This applies to
|
|
|
|
* command ring entries as well as the mailbox. Extra code needed for
|
|
|
|
* this may have a 'UAM' comment.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
/* For cmd area, the size is fixed 8MB */
|
|
|
|
#define CMDR_SIZE (8 * 1024 * 1024)
|
2016-02-27 01:59:57 +03:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
/*
|
|
|
|
* For data area, the block size is PAGE_SIZE and
|
|
|
|
* the total size is 256K * PAGE_SIZE.
|
|
|
|
*/
|
|
|
|
#define DATA_BLOCK_SIZE PAGE_SIZE
|
|
|
|
#define DATA_BLOCK_BITS (256 * 1024)
|
2016-02-27 01:59:57 +03:00
|
|
|
#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
/* The total size of the ring is 8M + 256K * PAGE_SIZE */
|
2014-10-02 03:07:05 +04:00
|
|
|
#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
/*
|
|
|
|
* Default number of global data blocks(512K * PAGE_SIZE)
|
|
|
|
* when the unmap thread will be started.
|
|
|
|
*/
|
2017-05-02 06:38:06 +03:00
|
|
|
#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
static u8 tcmu_kern_cmd_reply_supported;
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static struct device *tcmu_root_device;
|
|
|
|
|
|
|
|
struct tcmu_hba {
|
|
|
|
u32 host_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TCMU_CONFIG_LEN 256
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
struct tcmu_nl_cmd {
|
|
|
|
/* wake up thread waiting for reply */
|
|
|
|
struct completion complete;
|
|
|
|
int cmd;
|
|
|
|
int status;
|
|
|
|
};
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
struct tcmu_dev {
|
2017-05-02 06:38:06 +03:00
|
|
|
struct list_head node;
|
2017-05-17 12:34:37 +03:00
|
|
|
struct kref kref;
|
2017-11-28 21:40:39 +03:00
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
struct se_device se_dev;
|
|
|
|
|
|
|
|
char *name;
|
|
|
|
struct se_hba *hba;
|
|
|
|
|
|
|
|
#define TCMU_DEV_BIT_OPEN 0
|
|
|
|
#define TCMU_DEV_BIT_BROKEN 1
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
struct uio_info uio_info;
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
struct inode *inode;
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
struct tcmu_mailbox *mb_addr;
|
|
|
|
size_t dev_size;
|
|
|
|
u32 cmdr_size;
|
|
|
|
u32 cmdr_last_cleaned;
|
2016-08-25 18:55:54 +03:00
|
|
|
/* Offset of data area from start of mb */
|
2016-02-27 01:59:57 +03:00
|
|
|
/* Must add data_off and mb_addr to get the address */
|
2014-10-02 03:07:05 +04:00
|
|
|
size_t data_off;
|
|
|
|
size_t data_size;
|
2016-02-27 01:59:57 +03:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
struct mutex cmdr_lock;
|
2017-11-28 21:40:39 +03:00
|
|
|
struct list_head cmdr_queue;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
uint32_t dbi_max;
|
2017-05-02 06:38:06 +03:00
|
|
|
uint32_t dbi_thresh;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
|
|
|
|
struct radix_tree_root data_blocks;
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
struct idr commands;
|
|
|
|
|
|
|
|
struct timer_list timeout;
|
2017-03-09 11:42:09 +03:00
|
|
|
unsigned int cmd_time_out;
|
2017-11-28 21:40:31 +03:00
|
|
|
struct list_head timedout_entry;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
spinlock_t nl_cmd_lock;
|
|
|
|
struct tcmu_nl_cmd curr_nl_cmd;
|
|
|
|
/* wake up threads waiting on curr_nl_cmd */
|
|
|
|
wait_queue_head_t nl_cmd_wq;
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
char dev_config[TCMU_CONFIG_LEN];
|
2017-09-13 08:01:22 +03:00
|
|
|
|
|
|
|
int nl_reply_supported;
|
2014-10-02 03:07:05 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
|
|
|
|
|
|
|
|
#define CMDR_OFF sizeof(struct tcmu_mailbox)
|
|
|
|
|
|
|
|
struct tcmu_cmd {
|
|
|
|
struct se_cmd *se_cmd;
|
|
|
|
struct tcmu_dev *tcmu_dev;
|
2017-11-28 21:40:39 +03:00
|
|
|
struct list_head cmdr_queue_entry;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
uint16_t cmd_id;
|
|
|
|
|
2016-02-27 01:59:57 +03:00
|
|
|
/* Can't use se_cmd when cleaning up expired cmds, because if
|
2014-10-02 03:07:05 +04:00
|
|
|
cmd has been completed then accessing se_cmd is off limits */
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
uint32_t dbi_cnt;
|
|
|
|
uint32_t dbi_cur;
|
|
|
|
uint32_t *dbi;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
unsigned long deadline;
|
|
|
|
|
|
|
|
#define TCMU_CMD_BIT_EXPIRED 0
|
|
|
|
unsigned long flags;
|
|
|
|
};
|
2017-11-28 21:40:39 +03:00
|
|
|
/*
|
|
|
|
* To avoid dead lock the mutex lock order should always be:
|
|
|
|
*
|
|
|
|
* mutex_lock(&root_udev_mutex);
|
|
|
|
* ...
|
|
|
|
* mutex_lock(&tcmu_dev->cmdr_lock);
|
|
|
|
* mutex_unlock(&tcmu_dev->cmdr_lock);
|
|
|
|
* ...
|
|
|
|
* mutex_unlock(&root_udev_mutex);
|
|
|
|
*/
|
2017-05-02 06:38:06 +03:00
|
|
|
static DEFINE_MUTEX(root_udev_mutex);
|
|
|
|
static LIST_HEAD(root_udev);
|
|
|
|
|
2017-11-28 21:40:31 +03:00
|
|
|
static DEFINE_SPINLOCK(timed_out_udevs_lock);
|
|
|
|
static LIST_HEAD(timed_out_udevs);
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
static atomic_t global_db_count = ATOMIC_INIT(0);
|
2017-11-28 21:40:39 +03:00
|
|
|
static struct delayed_work tcmu_unmap_work;
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static struct kmem_cache *tcmu_cmd_cache;
|
|
|
|
|
|
|
|
/* multicast group */
|
|
|
|
enum tcmu_multicast_groups {
|
|
|
|
TCMU_MCGRP_CONFIG,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct genl_multicast_group tcmu_mcgrps[] = {
|
|
|
|
[TCMU_MCGRP_CONFIG] = { .name = "config", },
|
|
|
|
};
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
|
|
|
|
[TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
|
|
|
|
[TCMU_ATTR_MINOR] = { .type = NLA_U32 },
|
|
|
|
[TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
|
|
|
|
[TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
|
|
|
|
[TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
|
|
|
|
{
|
|
|
|
struct se_device *dev;
|
|
|
|
struct tcmu_dev *udev;
|
|
|
|
struct tcmu_nl_cmd *nl_cmd;
|
|
|
|
int dev_id, rc, ret = 0;
|
|
|
|
bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
|
|
|
|
|
|
|
|
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
|
|
|
|
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
|
|
|
|
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
|
|
|
|
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
|
|
|
|
|
|
|
|
dev = target_find_device(dev_id, !is_removed);
|
|
|
|
if (!dev) {
|
|
|
|
printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
|
|
|
|
completed_cmd, rc, dev_id);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
udev = TCMU_DEV(dev);
|
|
|
|
|
|
|
|
spin_lock(&udev->nl_cmd_lock);
|
|
|
|
nl_cmd = &udev->curr_nl_cmd;
|
|
|
|
|
|
|
|
pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
|
|
|
|
nl_cmd->cmd, completed_cmd, rc);
|
|
|
|
|
|
|
|
if (nl_cmd->cmd != completed_cmd) {
|
|
|
|
printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
|
|
|
|
completed_cmd, nl_cmd->cmd);
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
|
|
|
nl_cmd->status = rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&udev->nl_cmd_lock);
|
|
|
|
if (!is_removed)
|
|
|
|
target_undepend_item(&dev->dev_group.cg_item);
|
|
|
|
if (!ret)
|
|
|
|
complete(&nl_cmd->complete);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
|
|
|
|
struct genl_info *info)
|
|
|
|
{
|
|
|
|
return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
|
|
|
|
tcmu_kern_cmd_reply_supported =
|
|
|
|
nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
|
|
|
|
printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
|
|
|
|
tcmu_kern_cmd_reply_supported);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct genl_ops tcmu_genl_ops[] = {
|
|
|
|
{
|
|
|
|
.cmd = TCMU_CMD_SET_FEATURES,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
.policy = tcmu_attr_policy,
|
|
|
|
.doit = tcmu_genl_set_features,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = TCMU_CMD_ADDED_DEVICE_DONE,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
.policy = tcmu_attr_policy,
|
|
|
|
.doit = tcmu_genl_add_dev_done,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
.policy = tcmu_attr_policy,
|
|
|
|
.doit = tcmu_genl_rm_dev_done,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
.policy = tcmu_attr_policy,
|
|
|
|
.doit = tcmu_genl_reconfig_dev_done,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
/* Our generic netlink family */
|
2016-10-24 15:40:05 +03:00
|
|
|
static struct genl_family tcmu_genl_family __ro_after_init = {
|
2016-10-24 15:40:03 +03:00
|
|
|
.module = THIS_MODULE,
|
2014-10-02 03:07:05 +04:00
|
|
|
.hdrsize = 0,
|
|
|
|
.name = "TCM-USER",
|
2017-06-23 09:18:15 +03:00
|
|
|
.version = 2,
|
2014-10-02 03:07:05 +04:00
|
|
|
.maxattr = TCMU_ATTR_MAX,
|
|
|
|
.mcgrps = tcmu_mcgrps,
|
|
|
|
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
|
2016-01-14 04:26:13 +03:00
|
|
|
.netnsok = true,
|
2017-06-23 09:18:15 +03:00
|
|
|
.ops = tcmu_genl_ops,
|
|
|
|
.n_ops = ARRAY_SIZE(tcmu_genl_ops),
|
2014-10-02 03:07:05 +04:00
|
|
|
};
|
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
|
|
|
|
#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
|
|
|
|
#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
|
|
|
|
#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
|
|
uint32_t i;
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
for (i = 0; i < len; i++)
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
|
|
|
|
}
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
|
|
|
|
struct tcmu_cmd *tcmu_cmd)
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
{
|
2017-05-02 06:38:06 +03:00
|
|
|
struct page *page;
|
|
|
|
int ret, dbi;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
|
|
|
|
if (dbi == udev->dbi_thresh)
|
|
|
|
return false;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
page = radix_tree_lookup(&udev->data_blocks, dbi);
|
|
|
|
if (!page) {
|
|
|
|
if (atomic_add_return(1, &global_db_count) >
|
2017-11-28 21:40:39 +03:00
|
|
|
TCMU_GLOBAL_MAX_BLOCKS)
|
|
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
/* try to get new page from the mm */
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!page)
|
2017-07-11 13:06:41 +03:00
|
|
|
goto err_alloc;
|
2017-05-02 06:38:06 +03:00
|
|
|
|
|
|
|
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
|
2017-07-11 13:06:41 +03:00
|
|
|
if (ret)
|
|
|
|
goto err_insert;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
}
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
if (dbi > udev->dbi_max)
|
|
|
|
udev->dbi_max = dbi;
|
|
|
|
|
|
|
|
set_bit(dbi, udev->data_bitmap);
|
|
|
|
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
|
|
|
|
|
|
|
|
return true;
|
2017-07-11 13:06:41 +03:00
|
|
|
err_insert:
|
|
|
|
__free_page(page);
|
|
|
|
err_alloc:
|
|
|
|
atomic_dec(&global_db_count);
|
|
|
|
return false;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
}
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
|
|
|
|
struct tcmu_cmd *tcmu_cmd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
|
|
|
|
if (!tcmu_get_empty_block(udev, tcmu_cmd))
|
2017-11-28 21:40:39 +03:00
|
|
|
return false;
|
2017-05-02 06:38:06 +03:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct page *
|
|
|
|
tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
{
|
|
|
|
return radix_tree_lookup(&udev->data_blocks, dbi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
|
|
|
|
{
|
|
|
|
kfree(tcmu_cmd->dbi);
|
|
|
|
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
|
|
|
|
{
|
|
|
|
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
|
|
|
|
size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
|
|
|
|
|
|
|
|
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
|
|
|
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
|
|
|
data_length += round_up(se_cmd->t_bidi_data_sg->length,
|
|
|
|
DATA_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return data_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
|
|
|
|
{
|
|
|
|
size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
|
|
|
|
|
|
|
|
return data_length / DATA_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
|
|
|
{
|
|
|
|
struct se_device *se_dev = se_cmd->se_dev;
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(se_dev);
|
|
|
|
struct tcmu_cmd *tcmu_cmd;
|
|
|
|
|
|
|
|
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
|
|
|
|
if (!tcmu_cmd)
|
|
|
|
return NULL;
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
|
2014-10-02 03:07:05 +04:00
|
|
|
tcmu_cmd->se_cmd = se_cmd;
|
|
|
|
tcmu_cmd->tcmu_dev = udev;
|
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
|
|
|
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
|
|
|
|
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!tcmu_cmd->dbi) {
|
|
|
|
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
return tcmu_cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
|
|
|
|
{
|
2015-11-25 16:49:27 +03:00
|
|
|
unsigned long offset = offset_in_page(vaddr);
|
2017-11-28 21:40:27 +03:00
|
|
|
void *start = vaddr - offset;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
size = round_up(size+offset, PAGE_SIZE);
|
|
|
|
|
|
|
|
while (size) {
|
2017-11-28 21:40:27 +03:00
|
|
|
flush_dcache_page(virt_to_page(start));
|
|
|
|
start += PAGE_SIZE;
|
2014-10-02 03:07:05 +04:00
|
|
|
size -= PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some ring helper functions. We don't assume size is a power of 2 so
|
|
|
|
* we can't use circ_buf.h.
|
|
|
|
*/
|
|
|
|
static inline size_t spc_used(size_t head, size_t tail, size_t size)
|
|
|
|
{
|
|
|
|
int diff = head - tail;
|
|
|
|
|
|
|
|
if (diff >= 0)
|
|
|
|
return diff;
|
|
|
|
else
|
|
|
|
return size + diff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t spc_free(size_t head, size_t tail, size_t size)
|
|
|
|
{
|
|
|
|
/* Keep 1 byte unused or we can't tell full from empty */
|
|
|
|
return (size - spc_used(head, tail, size) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t head_to_end(size_t head, size_t size)
|
|
|
|
{
|
|
|
|
return size - head;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:36 +03:00
|
|
|
static inline void new_iov(struct iovec **iov, int *iov_cnt)
|
2016-02-27 01:59:55 +03:00
|
|
|
{
|
|
|
|
struct iovec *iovec;
|
|
|
|
|
|
|
|
if (*iov_cnt != 0)
|
|
|
|
(*iov)++;
|
|
|
|
(*iov_cnt)++;
|
|
|
|
|
|
|
|
iovec = *iov;
|
|
|
|
memset(iovec, 0, sizeof(struct iovec));
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
|
|
|
|
|
2016-02-27 01:59:57 +03:00
|
|
|
/* offset is relative to mb_addr */
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
static inline size_t get_block_offset_user(struct tcmu_dev *dev,
|
|
|
|
int dbi, int remaining)
|
2016-02-27 01:59:57 +03:00
|
|
|
{
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
return dev->data_off + dbi * DATA_BLOCK_SIZE +
|
2016-02-27 01:59:57 +03:00
|
|
|
DATA_BLOCK_SIZE - remaining;
|
|
|
|
}
|
|
|
|
|
2017-07-11 13:06:41 +03:00
|
|
|
static inline size_t iov_tail(struct iovec *iov)
|
2016-02-27 01:59:57 +03:00
|
|
|
{
|
|
|
|
return (size_t)iov->iov_base + iov->iov_len;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:34 +03:00
|
|
|
static void scatter_data_area(struct tcmu_dev *udev,
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
|
|
|
|
unsigned int data_nents, struct iovec **iov,
|
|
|
|
int *iov_cnt, bool copy_data)
|
2015-04-23 21:47:00 +03:00
|
|
|
{
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
int i, dbi;
|
2016-02-27 01:59:57 +03:00
|
|
|
int block_remaining = 0;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
void *from, *to = NULL;
|
|
|
|
size_t copy_bytes, to_offset, offset;
|
2015-04-23 21:47:00 +03:00
|
|
|
struct scatterlist *sg;
|
2017-05-02 06:38:06 +03:00
|
|
|
struct page *page;
|
2015-04-23 21:47:00 +03:00
|
|
|
|
|
|
|
for_each_sg(data_sg, sg, data_nents, i) {
|
2016-02-27 01:59:57 +03:00
|
|
|
int sg_remaining = sg->length;
|
2015-04-23 21:47:00 +03:00
|
|
|
from = kmap_atomic(sg_page(sg)) + sg->offset;
|
2016-02-27 01:59:57 +03:00
|
|
|
while (sg_remaining > 0) {
|
|
|
|
if (block_remaining == 0) {
|
2017-05-02 06:38:06 +03:00
|
|
|
if (to)
|
|
|
|
kunmap_atomic(to);
|
|
|
|
|
2016-02-27 01:59:57 +03:00
|
|
|
block_remaining = DATA_BLOCK_SIZE;
|
2017-05-02 06:38:06 +03:00
|
|
|
dbi = tcmu_cmd_get_dbi(tcmu_cmd);
|
|
|
|
page = tcmu_get_block_page(udev, dbi);
|
|
|
|
to = kmap_atomic(page);
|
2016-02-27 01:59:57 +03:00
|
|
|
}
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
|
2017-11-28 21:40:36 +03:00
|
|
|
/*
|
|
|
|
* Covert to virtual offset of the ring data area.
|
|
|
|
*/
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
to_offset = get_block_offset_user(udev, dbi,
|
2016-02-27 01:59:57 +03:00
|
|
|
block_remaining);
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
|
2017-11-28 21:40:36 +03:00
|
|
|
/*
|
|
|
|
* The following code will gather and map the blocks
|
|
|
|
* to the same iovec when the blocks are all next to
|
|
|
|
* each other.
|
|
|
|
*/
|
|
|
|
copy_bytes = min_t(size_t, sg_remaining,
|
|
|
|
block_remaining);
|
2016-02-27 01:59:57 +03:00
|
|
|
if (*iov_cnt != 0 &&
|
2017-07-11 13:06:41 +03:00
|
|
|
to_offset == iov_tail(*iov)) {
|
2017-11-28 21:40:36 +03:00
|
|
|
/*
|
|
|
|
* Will append to the current iovec, because
|
|
|
|
* the current block page is next to the
|
|
|
|
* previous one.
|
|
|
|
*/
|
2016-02-27 01:59:57 +03:00
|
|
|
(*iov)->iov_len += copy_bytes;
|
|
|
|
} else {
|
2017-11-28 21:40:36 +03:00
|
|
|
/*
|
|
|
|
* Will allocate a new iovec because we are
|
|
|
|
* first time here or the current block page
|
|
|
|
* is not next to the previous one.
|
|
|
|
*/
|
|
|
|
new_iov(iov, iov_cnt);
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
(*iov)->iov_base = (void __user *)to_offset;
|
2016-02-27 01:59:57 +03:00
|
|
|
(*iov)->iov_len = copy_bytes;
|
|
|
|
}
|
2017-11-28 21:40:36 +03:00
|
|
|
|
2015-04-23 21:47:00 +03:00
|
|
|
if (copy_data) {
|
2017-07-12 10:51:17 +03:00
|
|
|
offset = DATA_BLOCK_SIZE - block_remaining;
|
|
|
|
memcpy(to + offset,
|
|
|
|
from + sg->length - sg_remaining,
|
|
|
|
copy_bytes);
|
2015-04-23 21:47:00 +03:00
|
|
|
tcmu_flush_dcache_range(to, copy_bytes);
|
|
|
|
}
|
2017-11-28 21:40:36 +03:00
|
|
|
|
2016-02-27 01:59:57 +03:00
|
|
|
sg_remaining -= copy_bytes;
|
|
|
|
block_remaining -= copy_bytes;
|
2015-04-23 21:47:00 +03:00
|
|
|
}
|
2015-06-11 19:58:34 +03:00
|
|
|
kunmap_atomic(from - sg->offset);
|
2015-04-23 21:47:00 +03:00
|
|
|
}
|
2017-11-28 21:40:36 +03:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
if (to)
|
|
|
|
kunmap_atomic(to);
|
2016-02-27 01:59:56 +03:00
|
|
|
}
|
|
|
|
|
2017-03-31 05:35:25 +03:00
|
|
|
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
|
|
|
|
bool bidi)
|
2015-04-23 21:47:00 +03:00
|
|
|
{
|
2017-03-31 05:35:25 +03:00
|
|
|
struct se_cmd *se_cmd = cmd->se_cmd;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
int i, dbi;
|
2016-02-27 01:59:57 +03:00
|
|
|
int block_remaining = 0;
|
2017-05-02 06:38:06 +03:00
|
|
|
void *from = NULL, *to;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
size_t copy_bytes, offset;
|
2017-03-31 05:35:25 +03:00
|
|
|
struct scatterlist *sg, *data_sg;
|
2017-05-02 06:38:06 +03:00
|
|
|
struct page *page;
|
2017-03-31 05:35:25 +03:00
|
|
|
unsigned int data_nents;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
uint32_t count = 0;
|
2017-03-31 05:35:25 +03:00
|
|
|
|
|
|
|
if (!bidi) {
|
|
|
|
data_sg = se_cmd->t_data_sg;
|
|
|
|
data_nents = se_cmd->t_data_nents;
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For bidi case, the first count blocks are for Data-Out
|
|
|
|
* buffer blocks, and before gathering the Data-In buffer
|
|
|
|
* the Data-Out buffer blocks should be discarded.
|
|
|
|
*/
|
|
|
|
count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
|
|
|
|
|
|
|
|
data_sg = se_cmd->t_bidi_data_sg;
|
|
|
|
data_nents = se_cmd->t_bidi_data_nents;
|
|
|
|
}
|
2015-04-23 21:47:00 +03:00
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
tcmu_cmd_set_dbi_cur(cmd, count);
|
|
|
|
|
2015-04-23 21:47:00 +03:00
|
|
|
for_each_sg(data_sg, sg, data_nents, i) {
|
2016-02-27 01:59:57 +03:00
|
|
|
int sg_remaining = sg->length;
|
2015-04-23 21:47:00 +03:00
|
|
|
to = kmap_atomic(sg_page(sg)) + sg->offset;
|
2016-02-27 01:59:57 +03:00
|
|
|
while (sg_remaining > 0) {
|
|
|
|
if (block_remaining == 0) {
|
2017-05-02 06:38:06 +03:00
|
|
|
if (from)
|
|
|
|
kunmap_atomic(from);
|
|
|
|
|
2016-02-27 01:59:57 +03:00
|
|
|
block_remaining = DATA_BLOCK_SIZE;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
dbi = tcmu_cmd_get_dbi(cmd);
|
2017-05-02 06:38:06 +03:00
|
|
|
page = tcmu_get_block_page(udev, dbi);
|
|
|
|
from = kmap_atomic(page);
|
2016-02-27 01:59:57 +03:00
|
|
|
}
|
|
|
|
copy_bytes = min_t(size_t, sg_remaining,
|
|
|
|
block_remaining);
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
offset = DATA_BLOCK_SIZE - block_remaining;
|
2015-04-23 21:47:00 +03:00
|
|
|
tcmu_flush_dcache_range(from, copy_bytes);
|
2017-07-12 10:51:17 +03:00
|
|
|
memcpy(to + sg->length - sg_remaining, from + offset,
|
2016-02-27 01:59:57 +03:00
|
|
|
copy_bytes);
|
2015-04-23 21:47:00 +03:00
|
|
|
|
2016-02-27 01:59:57 +03:00
|
|
|
sg_remaining -= copy_bytes;
|
|
|
|
block_remaining -= copy_bytes;
|
2015-04-23 21:47:00 +03:00
|
|
|
}
|
2015-06-11 19:58:34 +03:00
|
|
|
kunmap_atomic(to - sg->offset);
|
2015-04-23 21:47:00 +03:00
|
|
|
}
|
2017-05-02 06:38:06 +03:00
|
|
|
if (from)
|
|
|
|
kunmap_atomic(from);
|
2015-04-23 21:47:00 +03:00
|
|
|
}
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
|
2016-02-27 01:59:57 +03:00
|
|
|
{
|
2017-11-28 21:40:35 +03:00
|
|
|
return thresh - bitmap_weight(bitmap, thresh);
|
2016-02-27 01:59:57 +03:00
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
/*
|
2015-04-23 21:47:00 +03:00
|
|
|
* We can't queue a command until we have space available on the cmd ring *and*
|
2016-08-25 18:55:54 +03:00
|
|
|
* space available on the data area.
|
2014-10-02 03:07:05 +04:00
|
|
|
*
|
|
|
|
* Called with ring lock held.
|
|
|
|
*/
|
2017-05-02 06:38:06 +03:00
|
|
|
static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
|
|
|
|
size_t cmd_size, size_t data_needed)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
|
|
|
struct tcmu_mailbox *mb = udev->mb_addr;
|
2017-05-02 06:38:06 +03:00
|
|
|
uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
|
|
|
|
/ DATA_BLOCK_SIZE;
|
2016-02-28 05:25:22 +03:00
|
|
|
size_t space, cmd_needed;
|
2014-10-02 03:07:05 +04:00
|
|
|
u32 cmd_head;
|
|
|
|
|
|
|
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
|
|
|
|
|
|
|
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
|
|
|
|
2014-10-02 21:23:15 +04:00
|
|
|
/*
|
|
|
|
* If cmd end-of-ring space is too small then we need space for a NOP plus
|
|
|
|
* original cmd - cmds are internally contiguous.
|
|
|
|
*/
|
|
|
|
if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
|
|
|
|
cmd_needed = cmd_size;
|
|
|
|
else
|
|
|
|
cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
|
|
|
|
if (space < cmd_needed) {
|
|
|
|
pr_debug("no cmd space: %u %u %u\n", cmd_head,
|
|
|
|
udev->cmdr_last_cleaned, udev->cmdr_size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
/* try to check and get the data blocks as needed */
|
|
|
|
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
|
2017-11-28 21:40:35 +03:00
|
|
|
if ((space * DATA_BLOCK_SIZE) < data_needed) {
|
|
|
|
unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh +
|
|
|
|
space;
|
2017-05-02 06:38:06 +03:00
|
|
|
|
|
|
|
if (blocks_left < blocks_needed) {
|
|
|
|
pr_debug("no data space: only %lu available, but ask for %zu\n",
|
|
|
|
blocks_left * DATA_BLOCK_SIZE,
|
|
|
|
data_needed);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:38 +03:00
|
|
|
udev->dbi_thresh += blocks_needed;
|
|
|
|
if (udev->dbi_thresh > DATA_BLOCK_BITS)
|
|
|
|
udev->dbi_thresh = DATA_BLOCK_BITS;
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:06:41 +03:00
|
|
|
return tcmu_get_empty_blocks(udev, cmd);
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
2017-05-02 10:54:29 +03:00
|
|
|
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
|
|
|
|
{
|
|
|
|
return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
|
|
|
|
sizeof(struct tcmu_cmd_entry));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
|
|
|
|
size_t base_command_size)
|
|
|
|
{
|
|
|
|
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
|
|
|
|
size_t command_size;
|
|
|
|
|
|
|
|
command_size = base_command_size +
|
|
|
|
round_up(scsi_command_size(se_cmd->t_task_cdb),
|
|
|
|
TCMU_OP_ALIGN_SIZE);
|
|
|
|
|
|
|
|
WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
|
|
|
|
|
|
|
|
return command_size;
|
|
|
|
}
|
|
|
|
|
2017-10-25 19:47:15 +03:00
|
|
|
static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
|
|
unsigned long tmo = udev->cmd_time_out;
|
|
|
|
int cmd_id;
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
/*
|
|
|
|
* If it was on the cmdr queue waiting we do not reset the timer
|
|
|
|
* for requeues and when it is finally sent to userspace.
|
|
|
|
*/
|
2017-10-25 19:47:15 +03:00
|
|
|
if (tcmu_cmd->cmd_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
|
|
|
|
if (cmd_id < 0) {
|
|
|
|
pr_err("tcmu: Could not allocate cmd id.\n");
|
|
|
|
return cmd_id;
|
|
|
|
}
|
|
|
|
tcmu_cmd->cmd_id = cmd_id;
|
|
|
|
|
|
|
|
if (!tmo)
|
2017-11-28 21:40:39 +03:00
|
|
|
tmo = TCMU_TIME_OUT;
|
|
|
|
|
|
|
|
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
|
|
|
|
udev->name, tmo / MSEC_PER_SEC);
|
2017-10-25 19:47:15 +03:00
|
|
|
|
|
|
|
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
|
|
|
|
mod_timer(&udev->timeout, tcmu_cmd->deadline);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = tcmu_setup_cmd_timer(tcmu_cmd);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
|
|
|
|
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
|
|
|
|
tcmu_cmd->cmd_id, udev->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:37 +03:00
|
|
|
/**
|
|
|
|
* queue_cmd_ring - queue cmd to ring or internally
|
|
|
|
* @tcmu_cmd: cmd to queue
|
|
|
|
* @scsi_err: TCM error code if failure (-1) returned.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* -1 we cannot queue internally or to the ring.
|
|
|
|
* 0 success
|
2017-11-28 21:40:39 +03:00
|
|
|
* 1 internally queued to wait for ring memory to free.
|
2017-11-28 21:40:37 +03:00
|
|
|
*/
|
|
|
|
static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
|
|
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
|
|
|
|
size_t base_command_size, command_size;
|
|
|
|
struct tcmu_mailbox *mb;
|
|
|
|
struct tcmu_cmd_entry *entry;
|
|
|
|
struct iovec *iov;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
int iov_cnt, ret;
|
2014-10-02 03:07:05 +04:00
|
|
|
uint32_t cmd_head;
|
|
|
|
uint64_t cdb_off;
|
2015-04-23 21:47:00 +03:00
|
|
|
bool copy_to_data_area;
|
2017-03-27 12:07:40 +03:00
|
|
|
size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:37 +03:00
|
|
|
*scsi_err = TCM_NO_SENSE;
|
|
|
|
|
|
|
|
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
|
|
|
|
*scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
|
|
return -1;
|
|
|
|
}
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be a certain minimum size for response sense info, but
|
|
|
|
* also may be larger if the iov array is large.
|
|
|
|
*
|
2017-05-02 10:54:29 +03:00
|
|
|
* We prepare as many iovs as possbile for potential uses here,
|
|
|
|
* because it's expensive to tell how many regions are freed in
|
|
|
|
* the bitmap & global data pool, as the size calculated here
|
|
|
|
* will only be used to do the checks.
|
|
|
|
*
|
|
|
|
* The size will be recalculated later as actually needed to save
|
|
|
|
* cmd area memories.
|
|
|
|
*/
|
|
|
|
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
|
|
|
|
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
if (!list_empty(&udev->cmdr_queue))
|
|
|
|
goto queue;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
mb = udev->mb_addr;
|
|
|
|
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
2016-08-25 18:55:53 +03:00
|
|
|
if ((command_size > (udev->cmdr_size / 2)) ||
|
|
|
|
data_length > udev->data_size) {
|
|
|
|
pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
|
2016-08-25 18:55:54 +03:00
|
|
|
"cmd ring/data area\n", command_size, data_length,
|
2014-10-02 03:07:05 +04:00
|
|
|
udev->cmdr_size, udev->data_size);
|
2017-11-28 21:40:37 +03:00
|
|
|
*scsi_err = TCM_INVALID_CDB_FIELD;
|
|
|
|
return -1;
|
2016-08-25 18:55:53 +03:00
|
|
|
}
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
|
2017-11-28 21:40:33 +03:00
|
|
|
/*
|
|
|
|
* Don't leave commands partially setup because the unmap
|
|
|
|
* thread might need the blocks to make forward progress.
|
|
|
|
*/
|
|
|
|
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
|
|
|
|
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
2017-11-28 21:40:39 +03:00
|
|
|
goto queue;
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
2014-10-02 21:23:15 +04:00
|
|
|
/* Insert a PAD if end-of-ring space is too small */
|
|
|
|
if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
|
|
|
|
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
entry = (void *) mb + CMDR_OFF + cmd_head;
|
2015-04-15 03:30:04 +03:00
|
|
|
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
|
|
|
|
tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
|
|
|
|
entry->hdr.cmd_id = 0; /* not used for PAD */
|
|
|
|
entry->hdr.kflags = 0;
|
|
|
|
entry->hdr.uflags = 0;
|
2017-06-30 11:14:16 +03:00
|
|
|
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
|
2017-06-30 11:14:16 +03:00
|
|
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
|
|
|
WARN_ON(cmd_head != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = (void *) mb + CMDR_OFF + cmd_head;
|
2017-07-11 12:59:43 +03:00
|
|
|
memset(entry, 0, command_size);
|
2015-04-15 03:30:04 +03:00
|
|
|
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2016-08-25 18:55:54 +03:00
|
|
|
/* Handle allocating space from the data area */
|
2017-05-02 06:38:06 +03:00
|
|
|
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
2014-10-02 03:07:05 +04:00
|
|
|
iov = &entry->req.iov[0];
|
2015-04-23 21:47:00 +03:00
|
|
|
iov_cnt = 0;
|
2015-04-23 21:30:09 +03:00
|
|
|
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
|
|
|
|
|| se_cmd->se_cmd_flags & SCF_BIDI);
|
2017-11-28 21:40:34 +03:00
|
|
|
scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
|
|
|
|
se_cmd->t_data_nents, &iov, &iov_cnt,
|
|
|
|
copy_to_data_area);
|
2014-10-02 03:07:05 +04:00
|
|
|
entry->req.iov_cnt = iov_cnt;
|
|
|
|
|
2015-04-23 21:30:09 +03:00
|
|
|
/* Handle BIDI commands */
|
2017-07-11 12:59:43 +03:00
|
|
|
iov_cnt = 0;
|
2017-03-27 12:07:40 +03:00
|
|
|
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
|
|
|
iov++;
|
2017-11-28 21:40:34 +03:00
|
|
|
scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
|
|
|
|
se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
|
|
|
|
false);
|
2017-03-27 12:07:40 +03:00
|
|
|
}
|
2017-07-11 12:59:43 +03:00
|
|
|
entry->req.iov_bidi_cnt = iov_cnt;
|
2016-02-27 01:59:57 +03:00
|
|
|
|
2017-10-25 19:47:15 +03:00
|
|
|
ret = tcmu_setup_cmd_timer(tcmu_cmd);
|
|
|
|
if (ret) {
|
|
|
|
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
|
2017-11-08 11:44:15 +03:00
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
2017-11-28 21:40:37 +03:00
|
|
|
|
|
|
|
*scsi_err = TCM_OUT_OF_RESOURCES;
|
|
|
|
return -1;
|
2017-10-25 19:47:15 +03:00
|
|
|
}
|
|
|
|
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
|
|
|
|
|
2017-05-02 10:54:29 +03:00
|
|
|
/*
|
|
|
|
* Recalaulate the command's base size and size according
|
|
|
|
* to the actual needs
|
|
|
|
*/
|
|
|
|
base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
|
|
|
|
entry->req.iov_bidi_cnt);
|
|
|
|
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
|
|
|
|
|
|
|
|
tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
/* All offsets relative to mb_addr, not start of entry! */
|
|
|
|
cdb_off = CMDR_OFF + cmd_head + base_command_size;
|
|
|
|
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
|
|
|
|
entry->req.cdb_off = cdb_off;
|
|
|
|
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
|
|
|
|
|
|
|
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
|
|
|
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
|
|
|
|
|
|
|
/* TODO: only if FLUSH and FUA? */
|
|
|
|
uio_event_notify(&udev->uio_info);
|
|
|
|
|
2017-11-28 21:40:37 +03:00
|
|
|
return 0;
|
2017-11-28 21:40:39 +03:00
|
|
|
|
|
|
|
queue:
|
|
|
|
if (add_to_cmdr_queue(tcmu_cmd)) {
|
|
|
|
*scsi_err = TCM_OUT_OF_RESOURCES;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
2016-10-06 18:07:07 +03:00
|
|
|
static sense_reason_t
|
|
|
|
tcmu_queue_cmd(struct se_cmd *se_cmd)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
2017-11-28 21:40:39 +03:00
|
|
|
struct se_device *se_dev = se_cmd->se_dev;
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(se_dev);
|
2014-10-02 03:07:05 +04:00
|
|
|
struct tcmu_cmd *tcmu_cmd;
|
2017-11-28 21:40:37 +03:00
|
|
|
sense_reason_t scsi_ret;
|
2017-11-28 21:40:39 +03:00
|
|
|
int ret;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
|
|
|
|
if (!tcmu_cmd)
|
2016-10-06 18:07:07 +03:00
|
|
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
|
|
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
if (ret < 0)
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
tcmu_free_cmd(tcmu_cmd);
|
2017-11-28 21:40:37 +03:00
|
|
|
return scsi_ret;
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
|
|
|
|
{
|
|
|
|
struct se_cmd *se_cmd = cmd->se_cmd;
|
|
|
|
struct tcmu_dev *udev = cmd->tcmu_dev;
|
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
/*
|
|
|
|
* cmd has been completed already from timeout, just reclaim
|
|
|
|
* data area space and free cmd
|
|
|
|
*/
|
|
|
|
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
|
|
|
|
goto out;
|
2016-02-27 01:59:58 +03:00
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
tcmu_cmd_reset_dbi_cur(cmd);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2015-04-15 03:30:04 +03:00
|
|
|
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
|
|
|
|
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
|
|
|
|
cmd->se_cmd);
|
2015-09-04 02:03:44 +03:00
|
|
|
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
|
|
|
|
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
|
2017-05-31 23:52:40 +03:00
|
|
|
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
|
2015-04-23 21:30:09 +03:00
|
|
|
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
2016-02-27 01:59:57 +03:00
|
|
|
/* Get Data-In buffer before clean up */
|
2017-03-31 05:35:25 +03:00
|
|
|
gather_data_area(udev, cmd, true);
|
2015-04-23 21:30:09 +03:00
|
|
|
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
|
2017-03-31 05:35:25 +03:00
|
|
|
gather_data_area(udev, cmd, false);
|
2014-10-02 03:07:05 +04:00
|
|
|
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
/* TODO: */
|
2015-04-23 21:30:05 +03:00
|
|
|
} else if (se_cmd->data_direction != DMA_NONE) {
|
|
|
|
pr_warn("TCMU: data direction was %d!\n",
|
|
|
|
se_cmd->data_direction);
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
|
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
out:
|
|
|
|
cmd->se_cmd = NULL;
|
2017-05-02 06:38:06 +03:00
|
|
|
tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
tcmu_free_cmd(cmd);
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
|
|
{
|
|
|
|
struct tcmu_mailbox *mb;
|
|
|
|
int handled = 0;
|
|
|
|
|
|
|
|
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
|
|
|
|
pr_err("ring broken, not handling completions\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mb = udev->mb_addr;
|
|
|
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
|
|
|
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
|
|
|
|
struct tcmu_cmd *cmd;
|
|
|
|
|
|
|
|
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
|
|
|
|
2015-04-15 03:30:04 +03:00
|
|
|
if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
|
|
|
|
UPDATE_HEAD(udev->cmdr_last_cleaned,
|
|
|
|
tcmu_hdr_get_len(entry->hdr.len_op),
|
|
|
|
udev->cmdr_size);
|
2014-10-02 03:07:05 +04:00
|
|
|
continue;
|
|
|
|
}
|
2015-04-15 03:30:04 +03:00
|
|
|
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2016-12-22 21:30:22 +03:00
|
|
|
cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
|
2014-10-02 03:07:05 +04:00
|
|
|
if (!cmd) {
|
|
|
|
pr_err("cmd_id not found, ring is broken\n");
|
|
|
|
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcmu_handle_completion(cmd, entry);
|
|
|
|
|
2015-04-15 03:30:04 +03:00
|
|
|
UPDATE_HEAD(udev->cmdr_last_cleaned,
|
|
|
|
tcmu_hdr_get_len(entry->hdr.len_op),
|
|
|
|
udev->cmdr_size);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
handled++;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
if (mb->cmd_tail == mb->cmd_head && list_empty(&udev->cmdr_queue)) {
|
|
|
|
del_timer(&udev->timeout);
|
|
|
|
/*
|
|
|
|
* not more pending or waiting commands so try to reclaim
|
|
|
|
* blocks if needed.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&global_db_count) > TCMU_GLOBAL_MAX_BLOCKS)
|
|
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
|
|
}
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
return handled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
|
|
{
|
|
|
|
struct tcmu_cmd *cmd = p;
|
2017-11-28 21:40:39 +03:00
|
|
|
struct tcmu_dev *udev = cmd->tcmu_dev;
|
|
|
|
u8 scsi_status;
|
|
|
|
struct se_cmd *se_cmd;
|
|
|
|
bool is_running;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
|
|
|
|
return 0;
|
|
|
|
|
2015-11-13 21:42:19 +03:00
|
|
|
if (!time_after(jiffies, cmd->deadline))
|
2014-10-02 03:07:05 +04:00
|
|
|
return 0;
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
is_running = list_empty(&cmd->cmdr_queue_entry);
|
|
|
|
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
|
|
|
id, udev->name, is_running ? "inflight" : "queued");
|
|
|
|
|
|
|
|
se_cmd = cmd->se_cmd;
|
2014-10-02 03:07:05 +04:00
|
|
|
cmd->se_cmd = NULL;
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
if (is_running) {
|
|
|
|
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
|
|
|
|
/*
|
|
|
|
* target_complete_cmd will translate this to LUN COMM FAILURE
|
|
|
|
*/
|
|
|
|
scsi_status = SAM_STAT_CHECK_CONDITION;
|
|
|
|
} else {
|
|
|
|
list_del_init(&cmd->cmdr_queue_entry);
|
|
|
|
|
|
|
|
idr_remove(&udev->commands, id);
|
|
|
|
tcmu_free_cmd(cmd);
|
|
|
|
scsi_status = SAM_STAT_TASK_SET_FULL;
|
|
|
|
}
|
|
|
|
target_complete_cmd(se_cmd, scsi_status);
|
2014-10-02 03:07:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void tcmu_device_timedout(struct timer_list *t)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
struct tcmu_dev *udev = from_timer(udev, t, timeout);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:31 +03:00
|
|
|
pr_debug("%s cmd timeout has expired\n", udev->name);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:31 +03:00
|
|
|
spin_lock(&timed_out_udevs_lock);
|
|
|
|
if (list_empty(&udev->timedout_entry))
|
|
|
|
list_add_tail(&udev->timedout_entry, &timed_out_udevs);
|
|
|
|
spin_unlock(&timed_out_udevs_lock);
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
|
|
|
|
{
|
|
|
|
struct tcmu_hba *tcmu_hba;
|
|
|
|
|
|
|
|
tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
|
|
|
|
if (!tcmu_hba)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
tcmu_hba->host_id = host_id;
|
|
|
|
hba->hba_ptr = tcmu_hba;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcmu_detach_hba(struct se_hba *hba)
|
|
|
|
{
|
|
|
|
kfree(hba->hba_ptr);
|
|
|
|
hba->hba_ptr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev;
|
|
|
|
|
|
|
|
udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
|
|
|
|
if (!udev)
|
|
|
|
return NULL;
|
2017-05-17 12:34:37 +03:00
|
|
|
kref_init(&udev->kref);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
udev->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!udev->name) {
|
|
|
|
kfree(udev);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
udev->hba = hba;
|
2017-03-09 11:42:09 +03:00
|
|
|
udev->cmd_time_out = TCMU_TIME_OUT;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
mutex_init(&udev->cmdr_lock);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:31 +03:00
|
|
|
INIT_LIST_HEAD(&udev->timedout_entry);
|
2017-11-28 21:40:39 +03:00
|
|
|
INIT_LIST_HEAD(&udev->cmdr_queue);
|
2014-10-02 03:07:05 +04:00
|
|
|
idr_init(&udev->commands);
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
timer_setup(&udev->timeout, tcmu_device_timedout, 0);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
init_waitqueue_head(&udev->nl_cmd_wq);
|
|
|
|
spin_lock_init(&udev->nl_cmd_lock);
|
|
|
|
|
2017-09-14 04:30:05 +03:00
|
|
|
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
return &udev->se_dev;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
static bool run_cmdr_queue(struct tcmu_dev *udev)
|
|
|
|
{
|
|
|
|
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
|
|
|
|
LIST_HEAD(cmds);
|
|
|
|
bool drained = true;
|
|
|
|
sense_reason_t scsi_ret;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (list_empty(&udev->cmdr_queue))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
pr_debug("running %s's cmdr queue\n", udev->name);
|
|
|
|
|
|
|
|
list_splice_init(&udev->cmdr_queue, &cmds);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
|
|
|
|
list_del_init(&tcmu_cmd->cmdr_queue_entry);
|
|
|
|
|
|
|
|
pr_debug("removing cmd %u on dev %s from queue\n",
|
|
|
|
tcmu_cmd->cmd_id, udev->name);
|
|
|
|
|
|
|
|
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_debug("cmd %u on dev %s failed with %u\n",
|
|
|
|
tcmu_cmd->cmd_id, udev->name, scsi_ret);
|
|
|
|
|
|
|
|
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
|
|
|
|
/*
|
|
|
|
* Ignore scsi_ret for now. target_complete_cmd
|
|
|
|
* drops it.
|
|
|
|
*/
|
|
|
|
target_complete_cmd(tcmu_cmd->se_cmd,
|
|
|
|
SAM_STAT_CHECK_CONDITION);
|
|
|
|
tcmu_free_cmd(tcmu_cmd);
|
|
|
|
} else if (ret > 0) {
|
|
|
|
pr_debug("ran out of space during cmdr queue run\n");
|
|
|
|
/*
|
|
|
|
* cmd was requeued, so just put all cmds back in
|
|
|
|
* the queue
|
|
|
|
*/
|
|
|
|
list_splice_tail(&cmds, &udev->cmdr_queue);
|
|
|
|
drained = false;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
return drained;
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
|
|
|
|
{
|
2017-11-28 21:40:39 +03:00
|
|
|
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
|
|
tcmu_handle_completions(udev);
|
|
|
|
run_cmdr_queue(udev);
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mmap code from uio.c. Copied here because we want to hook mmap()
|
|
|
|
* and this stuff must come along.
|
|
|
|
*/
|
|
|
|
static int tcmu_find_mem_index(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = vma->vm_private_data;
|
|
|
|
struct uio_info *info = &udev->uio_info;
|
|
|
|
|
|
|
|
if (vma->vm_pgoff < MAX_UIO_MAPS) {
|
|
|
|
if (info->mem[vma->vm_pgoff].size == 0)
|
|
|
|
return -1;
|
|
|
|
return (int)vma->vm_pgoff;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
|
|
page = tcmu_get_block_page(udev, dbi);
|
|
|
|
if (likely(page)) {
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally it shouldn't be here:
|
|
|
|
* Only when the userspace has touched the blocks which
|
|
|
|
* are out of the tcmu_cmd's data iov[], and will return
|
|
|
|
* one zeroed page.
|
|
|
|
*/
|
|
|
|
pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
|
|
|
|
pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
|
|
|
|
|
|
|
|
if (dbi >= udev->dbi_thresh) {
|
|
|
|
/* Extern the udev->dbi_thresh to dbi + 1 */
|
|
|
|
udev->dbi_thresh = dbi + 1;
|
|
|
|
udev->dbi_max = dbi;
|
|
|
|
}
|
|
|
|
|
|
|
|
page = radix_tree_lookup(&udev->data_blocks, dbi);
|
|
|
|
if (!page) {
|
|
|
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (!page) {
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
|
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
__free_page(page);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since this case is rare in page fault routine, here we
|
|
|
|
* will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
|
|
|
|
* to reduce possible page fault call trace.
|
|
|
|
*/
|
|
|
|
atomic_inc(&global_db_count);
|
|
|
|
}
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2017-02-25 01:56:41 +03:00
|
|
|
static int tcmu_vma_fault(struct vm_fault *vmf)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
2017-02-25 01:56:41 +03:00
|
|
|
struct tcmu_dev *udev = vmf->vma->vm_private_data;
|
2014-10-02 03:07:05 +04:00
|
|
|
struct uio_info *info = &udev->uio_info;
|
|
|
|
struct page *page;
|
|
|
|
unsigned long offset;
|
|
|
|
void *addr;
|
|
|
|
|
2017-02-25 01:56:41 +03:00
|
|
|
int mi = tcmu_find_mem_index(vmf->vma);
|
2014-10-02 03:07:05 +04:00
|
|
|
if (mi < 0)
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to subtract mi because userspace uses offset = N*PAGE_SIZE
|
|
|
|
* to use mem[N].
|
|
|
|
*/
|
|
|
|
offset = (vmf->pgoff - mi) << PAGE_SHIFT;
|
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
if (offset < udev->data_off) {
|
|
|
|
/* For the vmalloc()ed cmd area pages */
|
|
|
|
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
|
2014-10-02 03:07:05 +04:00
|
|
|
page = vmalloc_to_page(addr);
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
} else {
|
|
|
|
uint32_t dbi;
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
/* For the dynamically growing data area pages */
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
|
2017-05-02 06:38:06 +03:00
|
|
|
page = tcmu_try_get_block_page(udev, dbi);
|
|
|
|
if (!page)
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
return VM_FAULT_NOPAGE;
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
get_page(page);
|
|
|
|
vmf->page = page;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vm_operations_struct tcmu_vm_ops = {
|
|
|
|
.fault = tcmu_vma_fault,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
|
|
|
|
|
|
|
|
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
|
|
|
vma->vm_ops = &tcmu_vm_ops;
|
|
|
|
|
|
|
|
vma->vm_private_data = udev;
|
|
|
|
|
|
|
|
/* Ensure the mmap is exactly the right size */
|
|
|
|
if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_open(struct uio_info *info, struct inode *inode)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
|
|
|
|
|
|
|
|
/* O_EXCL not supported for char devs, so fake it? */
|
|
|
|
if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
udev->inode = inode;
|
2017-06-23 09:18:20 +03:00
|
|
|
kref_get(&udev->kref);
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
pr_debug("open\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-17 12:34:37 +03:00
|
|
|
static void tcmu_dev_call_rcu(struct rcu_head *p)
|
|
|
|
{
|
|
|
|
struct se_device *dev = container_of(p, struct se_device, rcu_head);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
|
|
|
|
kfree(udev->uio_info.name);
|
|
|
|
kfree(udev->name);
|
|
|
|
kfree(udev);
|
|
|
|
}
|
|
|
|
|
2017-09-14 04:30:05 +03:00
|
|
|
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
|
|
|
{
|
|
|
|
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
|
|
|
kmem_cache_free(tcmu_cmd_cache, cmd);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:28 +03:00
|
|
|
static void tcmu_blocks_release(struct radix_tree_root *blocks,
|
|
|
|
int start, int end)
|
2017-09-14 04:30:05 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct page *page;
|
|
|
|
|
2017-11-28 21:40:28 +03:00
|
|
|
for (i = start; i < end; i++) {
|
|
|
|
page = radix_tree_delete(blocks, i);
|
2017-09-14 04:30:05 +03:00
|
|
|
if (page) {
|
|
|
|
__free_page(page);
|
|
|
|
atomic_dec(&global_db_count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 12:34:37 +03:00
|
|
|
static void tcmu_dev_kref_release(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
|
|
|
|
struct se_device *dev = &udev->se_dev;
|
2017-09-14 04:30:05 +03:00
|
|
|
struct tcmu_cmd *cmd;
|
|
|
|
bool all_expired = true;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
vfree(udev->mb_addr);
|
|
|
|
udev->mb_addr = NULL;
|
|
|
|
|
2017-11-28 21:40:31 +03:00
|
|
|
spin_lock_bh(&timed_out_udevs_lock);
|
|
|
|
if (!list_empty(&udev->timedout_entry))
|
|
|
|
list_del(&udev->timedout_entry);
|
|
|
|
spin_unlock_bh(&timed_out_udevs_lock);
|
|
|
|
|
2017-09-14 04:30:05 +03:00
|
|
|
/* Upper layer should drain all requests before calling this */
|
2017-11-28 21:40:32 +03:00
|
|
|
mutex_lock(&udev->cmdr_lock);
|
2017-09-14 04:30:05 +03:00
|
|
|
idr_for_each_entry(&udev->commands, cmd, i) {
|
|
|
|
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
|
|
|
all_expired = false;
|
|
|
|
}
|
|
|
|
idr_destroy(&udev->commands);
|
|
|
|
WARN_ON(!all_expired);
|
|
|
|
|
2017-11-28 21:40:28 +03:00
|
|
|
tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
2017-05-17 12:34:37 +03:00
|
|
|
|
|
|
|
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static int tcmu_release(struct uio_info *info, struct inode *inode)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
|
|
|
|
|
|
|
|
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
|
|
|
|
|
|
|
|
pr_debug("close\n");
|
2017-06-23 09:18:20 +03:00
|
|
|
/* release ref from open */
|
2017-05-17 12:34:37 +03:00
|
|
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
2014-10-02 03:07:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
|
|
|
|
{
|
|
|
|
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
|
|
|
|
|
|
|
|
if (!tcmu_kern_cmd_reply_supported)
|
|
|
|
return;
|
2017-09-13 08:01:22 +03:00
|
|
|
|
|
|
|
if (udev->nl_reply_supported <= 0)
|
|
|
|
return;
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
relock:
|
|
|
|
spin_lock(&udev->nl_cmd_lock);
|
|
|
|
|
|
|
|
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
|
|
|
|
spin_unlock(&udev->nl_cmd_lock);
|
|
|
|
pr_debug("sleeping for open nl cmd\n");
|
|
|
|
wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
|
|
|
|
goto relock;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(nl_cmd, 0, sizeof(*nl_cmd));
|
|
|
|
nl_cmd->cmd = cmd;
|
|
|
|
init_completion(&nl_cmd->complete);
|
|
|
|
|
|
|
|
spin_unlock(&udev->nl_cmd_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
|
|
|
|
{
|
|
|
|
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
|
|
|
|
int ret;
|
|
|
|
DEFINE_WAIT(__wait);
|
|
|
|
|
|
|
|
if (!tcmu_kern_cmd_reply_supported)
|
|
|
|
return 0;
|
|
|
|
|
2017-09-13 08:01:22 +03:00
|
|
|
if (udev->nl_reply_supported <= 0)
|
|
|
|
return 0;
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
pr_debug("sleeping for nl reply\n");
|
|
|
|
wait_for_completion(&nl_cmd->complete);
|
|
|
|
|
|
|
|
spin_lock(&udev->nl_cmd_lock);
|
|
|
|
nl_cmd->cmd = TCMU_CMD_UNSPEC;
|
|
|
|
ret = nl_cmd->status;
|
|
|
|
nl_cmd->status = 0;
|
|
|
|
spin_unlock(&udev->nl_cmd_lock);
|
|
|
|
|
|
|
|
wake_up_all(&udev->nl_cmd_wq);
|
|
|
|
|
|
|
|
return ret;;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
|
|
|
|
int reconfig_attr, const void *reconfig_data)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
void *msg_header;
|
2014-10-02 10:01:15 +04:00
|
|
|
int ret = -ENOMEM;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
|
|
|
|
if (!skb)
|
2014-10-02 10:01:15 +04:00
|
|
|
return ret;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
|
2014-10-02 10:01:15 +04:00
|
|
|
if (!msg_header)
|
|
|
|
goto free_skb;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
|
2014-10-02 10:01:15 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto free_skb;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
|
|
|
|
if (ret < 0)
|
|
|
|
goto free_skb;
|
|
|
|
|
|
|
|
ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
|
2014-10-02 10:01:15 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto free_skb;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-12 09:34:28 +03:00
|
|
|
if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
|
|
|
|
switch (reconfig_attr) {
|
|
|
|
case TCMU_ATTR_DEV_CFG:
|
|
|
|
ret = nla_put_string(skb, reconfig_attr, reconfig_data);
|
|
|
|
break;
|
|
|
|
case TCMU_ATTR_DEV_SIZE:
|
|
|
|
ret = nla_put_u64_64bit(skb, reconfig_attr,
|
|
|
|
*((u64 *)reconfig_data),
|
|
|
|
TCMU_ATTR_PAD);
|
|
|
|
break;
|
|
|
|
case TCMU_ATTR_WRITECACHE:
|
|
|
|
ret = nla_put_u8(skb, reconfig_attr,
|
|
|
|
*((u8 *)reconfig_data));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto free_skb;
|
|
|
|
}
|
2017-06-06 17:28:52 +03:00
|
|
|
|
2015-01-17 00:09:00 +03:00
|
|
|
genlmsg_end(skb, msg_header);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
tcmu_init_genl_cmd_reply(udev, cmd);
|
|
|
|
|
2016-01-14 04:26:13 +03:00
|
|
|
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
|
2014-10-02 03:07:05 +04:00
|
|
|
TCMU_MCGRP_CONFIG, GFP_KERNEL);
|
|
|
|
/* We don't care if no one is listening */
|
|
|
|
if (ret == -ESRCH)
|
|
|
|
ret = 0;
|
2017-06-23 09:18:15 +03:00
|
|
|
if (!ret)
|
|
|
|
ret = tcmu_wait_genl_cmd_reply(udev);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
return ret;
|
2014-10-02 10:01:15 +04:00
|
|
|
free_skb:
|
|
|
|
nlmsg_free(skb);
|
|
|
|
return ret;
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
2017-07-07 22:20:00 +03:00
|
|
|
static int tcmu_update_uio_info(struct tcmu_dev *udev)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
|
|
|
struct tcmu_hba *hba = udev->hba->hba_ptr;
|
|
|
|
struct uio_info *info;
|
2017-07-07 22:20:00 +03:00
|
|
|
size_t size, used;
|
2014-10-02 03:07:05 +04:00
|
|
|
char *str;
|
|
|
|
|
|
|
|
info = &udev->uio_info;
|
|
|
|
size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
|
|
|
|
udev->dev_config);
|
|
|
|
size += 1; /* for \0 */
|
|
|
|
str = kmalloc(size, GFP_KERNEL);
|
|
|
|
if (!str)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
|
|
|
|
if (udev->dev_config[0])
|
|
|
|
snprintf(str + used, size - used, "/%s", udev->dev_config);
|
|
|
|
|
2017-07-14 16:11:04 +03:00
|
|
|
/* If the old string exists, free it */
|
|
|
|
kfree(info->name);
|
2014-10-02 03:07:05 +04:00
|
|
|
info->name = str;
|
|
|
|
|
2017-07-07 22:20:00 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcmu_configure_device(struct se_device *dev)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
struct uio_info *info;
|
|
|
|
struct tcmu_mailbox *mb;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ret = tcmu_update_uio_info(udev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
info = &udev->uio_info;
|
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
udev->mb_addr = vzalloc(CMDR_SIZE);
|
2014-10-02 03:07:05 +04:00
|
|
|
if (!udev->mb_addr) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_vzalloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mailbox fits in first part of CMDR space */
|
|
|
|
udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
|
|
|
|
udev->data_off = CMDR_SIZE;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
udev->data_size = DATA_SIZE;
|
2017-05-02 06:38:06 +03:00
|
|
|
udev->dbi_thresh = 0; /* Default in Idle state */
|
2014-10-02 03:07:05 +04:00
|
|
|
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
/* Initialise the mailbox of the ring buffer */
|
2014-10-02 03:07:05 +04:00
|
|
|
mb = udev->mb_addr;
|
2015-04-15 03:30:04 +03:00
|
|
|
mb->version = TCMU_MAILBOX_VERSION;
|
2016-03-01 03:02:15 +03:00
|
|
|
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
|
2014-10-02 03:07:05 +04:00
|
|
|
mb->cmdr_off = CMDR_OFF;
|
|
|
|
mb->cmdr_size = udev->cmdr_size;
|
|
|
|
|
|
|
|
WARN_ON(!PAGE_ALIGNED(udev->data_off));
|
|
|
|
WARN_ON(udev->data_size % PAGE_SIZE);
|
2016-02-27 01:59:57 +03:00
|
|
|
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2015-09-04 02:39:56 +03:00
|
|
|
info->version = __stringify(TCMU_MAILBOX_VERSION);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
info->mem[0].name = "tcm-user command & data buffer";
|
2016-02-01 19:29:45 +03:00
|
|
|
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
|
2014-10-02 03:07:05 +04:00
|
|
|
info->mem[0].size = TCMU_RING_SIZE;
|
tcmu: Add dynamic growing data area feature support
Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.
The struct tcmu_cmd_entry {} size is fixed about 112 bytes with
iovec[N] & N <= 4, and the size of struct iovec is about 16 bytes.
If N == 0, the ratio will be sizeof(cmd entry) : sizeof(datas) ==
112Bytes : (N * 4096)Bytes = 28 : 0, no data area is need.
If 0 < N <=4, the ratio will be sizeof(cmd entry) : sizeof(datas)
== 112Bytes : (N * 4096)Bytes = 28 : (N * 1024), so the max will
be 28 : 1024.
If N > 4, the sizeof(cmd entry) will be [(N - 4) *16 + 112] bytes,
and its corresponding data size will be [N * 4096], so the ratio
of sizeof(cmd entry) : sizeof(datas) == [(N - 4) * 16 + 112)Bytes
: (N * 4096)Bytes == 4/1024 - 12/(N * 1024), so the max is about
4 : 1024.
When N is bigger, the ratio will be smaller.
As the initial patch, we will set the cmd area size to 2M, and
the cmd area size to 32M. The TCMU will dynamically grows the data
area from 0 to max 32M size as needed.
The cmd area memory will be allocated through vmalloc(), and the
data area's blocks will be allocated individually later when needed.
The allocated data area block memory will be managed via radix tree.
For now the bitmap still be the most efficient way to search and
manage the block index, this could be update later.
Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: Jianfei Hu <hujianfei@cmss.chinamobile.com>
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-05-02 06:38:05 +03:00
|
|
|
info->mem[0].memtype = UIO_MEM_NONE;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
info->irqcontrol = tcmu_irqcontrol;
|
|
|
|
info->irq = UIO_IRQ_CUSTOM;
|
|
|
|
|
|
|
|
info->mmap = tcmu_mmap;
|
|
|
|
info->open = tcmu_open;
|
|
|
|
info->release = tcmu_release;
|
|
|
|
|
|
|
|
ret = uio_register_device(tcmu_root_device, info);
|
|
|
|
if (ret)
|
|
|
|
goto err_register;
|
|
|
|
|
2015-12-28 22:57:39 +03:00
|
|
|
/* User can set hw_block_size before enable the device */
|
|
|
|
if (dev->dev_attrib.hw_block_size == 0)
|
|
|
|
dev->dev_attrib.hw_block_size = 512;
|
2014-10-02 03:07:05 +04:00
|
|
|
/* Other attributes can be configured in userspace */
|
2017-03-02 08:14:39 +03:00
|
|
|
if (!dev->dev_attrib.hw_max_sectors)
|
|
|
|
dev->dev_attrib.hw_max_sectors = 128;
|
2017-06-06 17:28:48 +03:00
|
|
|
if (!dev->dev_attrib.emulate_write_cache)
|
|
|
|
dev->dev_attrib.emulate_write_cache = 0;
|
2014-10-02 03:07:05 +04:00
|
|
|
dev->dev_attrib.hw_queue_depth = 128;
|
|
|
|
|
2017-09-13 08:01:22 +03:00
|
|
|
/* If user didn't explicitly disable netlink reply support, use
|
|
|
|
* module scope setting.
|
|
|
|
*/
|
|
|
|
if (udev->nl_reply_supported >= 0)
|
|
|
|
udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
|
|
|
|
|
2017-05-17 12:34:37 +03:00
|
|
|
/*
|
|
|
|
* Get a ref incase userspace does a close on the uio device before
|
|
|
|
* LIO has initiated tcmu_free_device.
|
|
|
|
*/
|
|
|
|
kref_get(&udev->kref);
|
|
|
|
|
2017-06-23 09:18:15 +03:00
|
|
|
ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
|
2014-10-02 03:07:05 +04:00
|
|
|
if (ret)
|
|
|
|
goto err_netlink;
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
mutex_lock(&root_udev_mutex);
|
|
|
|
list_add(&udev->node, &root_udev);
|
|
|
|
mutex_unlock(&root_udev_mutex);
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_netlink:
|
2017-05-17 12:34:37 +03:00
|
|
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
2014-10-02 03:07:05 +04:00
|
|
|
uio_unregister_device(&udev->uio_info);
|
|
|
|
err_register:
|
|
|
|
vfree(udev->mb_addr);
|
2017-09-14 04:30:05 +03:00
|
|
|
udev->mb_addr = NULL;
|
2014-10-02 03:07:05 +04:00
|
|
|
err_vzalloc:
|
|
|
|
kfree(info->name);
|
2017-05-17 12:34:37 +03:00
|
|
|
info->name = NULL;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-09 11:42:08 +03:00
|
|
|
static bool tcmu_dev_configured(struct tcmu_dev *udev)
|
|
|
|
{
|
|
|
|
return udev->uio_info.uio_dev ? true : false;
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static void tcmu_free_device(struct se_device *dev)
|
2017-06-23 09:18:12 +03:00
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
|
|
|
|
/* release ref from init */
|
|
|
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcmu_destroy_device(struct se_device *dev)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
|
|
|
|
del_timer_sync(&udev->timeout);
|
|
|
|
|
2017-05-02 06:38:06 +03:00
|
|
|
mutex_lock(&root_udev_mutex);
|
|
|
|
list_del(&udev->node);
|
|
|
|
mutex_unlock(&root_udev_mutex);
|
|
|
|
|
2017-06-23 09:18:19 +03:00
|
|
|
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
2017-06-23 09:18:19 +03:00
|
|
|
uio_unregister_device(&udev->uio_info);
|
2017-06-23 09:18:20 +03:00
|
|
|
|
|
|
|
/* release ref from configure */
|
|
|
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
enum {
|
2017-03-02 08:14:39 +03:00
|
|
|
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
|
2017-09-13 08:01:22 +03:00
|
|
|
Opt_nl_reply_supported, Opt_err,
|
2014-10-02 03:07:05 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static match_table_t tokens = {
|
|
|
|
{Opt_dev_config, "dev_config=%s"},
|
|
|
|
{Opt_dev_size, "dev_size=%u"},
|
2015-05-20 00:44:39 +03:00
|
|
|
{Opt_hw_block_size, "hw_block_size=%u"},
|
2017-03-02 08:14:39 +03:00
|
|
|
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
|
2017-09-13 08:01:22 +03:00
|
|
|
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
|
2014-10-02 03:07:05 +04:00
|
|
|
{Opt_err, NULL}
|
|
|
|
};
|
|
|
|
|
2017-03-02 08:14:39 +03:00
|
|
|
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
|
|
|
|
{
|
|
|
|
unsigned long tmp_ul;
|
|
|
|
char *arg_p;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
arg_p = match_strdup(arg);
|
|
|
|
if (!arg_p)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = kstrtoul(arg_p, 0, &tmp_ul);
|
|
|
|
kfree(arg_p);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("kstrtoul() failed for dev attrib\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (!tmp_ul) {
|
|
|
|
pr_err("dev attrib must be nonzero\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
*dev_attrib = tmp_ul;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
|
|
|
|
const char *page, ssize_t count)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
char *orig, *ptr, *opts, *arg_p;
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
int ret = 0, token;
|
|
|
|
|
|
|
|
opts = kstrdup(page, GFP_KERNEL);
|
|
|
|
if (!opts)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
orig = opts;
|
|
|
|
|
|
|
|
while ((ptr = strsep(&opts, ",\n")) != NULL) {
|
|
|
|
if (!*ptr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
token = match_token(ptr, tokens, args);
|
|
|
|
switch (token) {
|
|
|
|
case Opt_dev_config:
|
|
|
|
if (match_strlcpy(udev->dev_config, &args[0],
|
|
|
|
TCMU_CONFIG_LEN) == 0) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
|
|
|
|
break;
|
|
|
|
case Opt_dev_size:
|
|
|
|
arg_p = match_strdup(&args[0]);
|
|
|
|
if (!arg_p) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
|
|
|
|
kfree(arg_p);
|
|
|
|
if (ret < 0)
|
|
|
|
pr_err("kstrtoul() failed for dev_size=\n");
|
|
|
|
break;
|
2015-05-20 00:44:39 +03:00
|
|
|
case Opt_hw_block_size:
|
2017-03-02 08:14:39 +03:00
|
|
|
ret = tcmu_set_dev_attrib(&args[0],
|
|
|
|
&(dev->dev_attrib.hw_block_size));
|
|
|
|
break;
|
|
|
|
case Opt_hw_max_sectors:
|
|
|
|
ret = tcmu_set_dev_attrib(&args[0],
|
|
|
|
&(dev->dev_attrib.hw_max_sectors));
|
2015-05-20 00:44:39 +03:00
|
|
|
break;
|
2017-09-13 08:01:22 +03:00
|
|
|
case Opt_nl_reply_supported:
|
|
|
|
arg_p = match_strdup(&args[0]);
|
|
|
|
if (!arg_p) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2017-11-08 11:43:44 +03:00
|
|
|
ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
|
2017-09-13 08:01:22 +03:00
|
|
|
kfree(arg_p);
|
|
|
|
if (ret < 0)
|
2017-11-08 11:43:44 +03:00
|
|
|
pr_err("kstrtoint() failed for nl_reply_supported=\n");
|
2017-09-13 08:01:22 +03:00
|
|
|
break;
|
2014-10-02 03:07:05 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-03-02 08:14:40 +03:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
break;
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
kfree(orig);
|
|
|
|
return (!ret) ? count : ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
ssize_t bl = 0;
|
|
|
|
|
|
|
|
bl = sprintf(b + bl, "Config: %s ",
|
|
|
|
udev->dev_config[0] ? udev->dev_config : "NULL");
|
2017-03-19 01:04:13 +03:00
|
|
|
bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
return bl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static sector_t tcmu_get_blocks(struct se_device *dev)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
|
|
|
|
return div_u64(udev->dev_size - dev->dev_attrib.block_size,
|
|
|
|
dev->dev_attrib.block_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static sense_reason_t
|
2015-05-20 00:44:39 +03:00
|
|
|
tcmu_parse_cdb(struct se_cmd *cmd)
|
2014-10-02 03:07:05 +04:00
|
|
|
{
|
2016-10-06 18:07:07 +03:00
|
|
|
return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
|
2014-10-02 03:07:05 +04:00
|
|
|
}
|
|
|
|
|
2017-03-19 01:04:13 +03:00
|
|
|
static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
2017-09-15 08:44:55 +03:00
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
2017-03-19 01:04:13 +03:00
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = container_of(da->da_dev,
|
|
|
|
struct tcmu_dev, se_dev);
|
|
|
|
u32 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (da->da_dev->export_count) {
|
|
|
|
pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kstrtou32(page, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
udev->cmd_time_out = val * MSEC_PER_SEC;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
CONFIGFS_ATTR(tcmu_, cmd_time_out);
|
|
|
|
|
2017-06-12 09:34:28 +03:00
|
|
|
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
|
2017-06-06 17:28:51 +03:00
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
|
|
|
|
}
|
|
|
|
|
2017-06-12 09:34:28 +03:00
|
|
|
static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
|
|
|
|
size_t count)
|
2017-06-06 17:28:51 +03:00
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
2017-06-12 09:34:28 +03:00
|
|
|
int ret, len;
|
2017-06-06 17:28:51 +03:00
|
|
|
|
2017-06-12 09:34:28 +03:00
|
|
|
len = strlen(page);
|
|
|
|
if (!len || len > TCMU_CONFIG_LEN - 1)
|
2017-06-06 17:28:51 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Check if device has been configured before */
|
|
|
|
if (tcmu_dev_configured(udev)) {
|
2017-06-23 09:18:15 +03:00
|
|
|
ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
|
2017-06-12 09:34:28 +03:00
|
|
|
TCMU_ATTR_DEV_CFG, page);
|
2017-06-06 17:28:51 +03:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to reconfigure device\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-07 22:20:00 +03:00
|
|
|
strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
|
|
|
|
|
|
|
|
ret = tcmu_update_uio_info(udev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return count;
|
2017-06-06 17:28:51 +03:00
|
|
|
}
|
2017-06-12 09:34:28 +03:00
|
|
|
strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
|
2017-06-06 17:28:51 +03:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
2017-06-12 09:34:28 +03:00
|
|
|
CONFIGFS_ATTR(tcmu_, dev_config);
|
2017-06-06 17:28:51 +03:00
|
|
|
|
2017-06-06 17:28:50 +03:00
|
|
|
static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
2017-06-12 09:34:28 +03:00
|
|
|
u64 val;
|
2017-06-06 17:28:50 +03:00
|
|
|
int ret;
|
|
|
|
|
2017-06-12 09:34:28 +03:00
|
|
|
ret = kstrtou64(page, 0, &val);
|
2017-06-06 17:28:50 +03:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Check if device has been configured before */
|
|
|
|
if (tcmu_dev_configured(udev)) {
|
2017-06-23 09:18:15 +03:00
|
|
|
ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
|
2017-06-12 09:34:28 +03:00
|
|
|
TCMU_ATTR_DEV_SIZE, &val);
|
2017-06-06 17:28:50 +03:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to reconfigure device\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2017-06-12 09:34:28 +03:00
|
|
|
udev->dev_size = val;
|
2017-06-06 17:28:50 +03:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
CONFIGFS_ATTR(tcmu_, dev_size);
|
|
|
|
|
2017-09-13 08:01:22 +03:00
|
|
|
static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
|
|
|
s8 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtos8(page, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
udev->nl_reply_supported = val;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
CONFIGFS_ATTR(tcmu_, nl_reply_supported);
|
|
|
|
|
2017-06-06 17:28:48 +03:00
|
|
|
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
struct se_dev_attrib, da_group);
|
2017-06-06 17:28:49 +03:00
|
|
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
2017-06-12 09:34:28 +03:00
|
|
|
u8 val;
|
2017-06-06 17:28:48 +03:00
|
|
|
int ret;
|
|
|
|
|
2017-06-12 09:34:28 +03:00
|
|
|
ret = kstrtou8(page, 0, &val);
|
2017-06-06 17:28:48 +03:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2017-06-06 17:28:49 +03:00
|
|
|
/* Check if device has been configured before */
|
|
|
|
if (tcmu_dev_configured(udev)) {
|
2017-06-23 09:18:15 +03:00
|
|
|
ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
|
2017-06-12 09:34:28 +03:00
|
|
|
TCMU_ATTR_WRITECACHE, &val);
|
2017-06-06 17:28:49 +03:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to reconfigure device\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2017-06-12 09:34:28 +03:00
|
|
|
|
|
|
|
da->emulate_write_cache = val;
|
2017-06-06 17:28:48 +03:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
CONFIGFS_ATTR(tcmu_, emulate_write_cache);
|
|
|
|
|
2017-06-13 16:29:09 +03:00
|
|
|
static struct configfs_attribute *tcmu_attrib_attrs[] = {
|
2017-06-06 17:28:50 +03:00
|
|
|
&tcmu_attr_cmd_time_out,
|
2017-06-12 09:34:28 +03:00
|
|
|
&tcmu_attr_dev_config,
|
2017-06-06 17:28:50 +03:00
|
|
|
&tcmu_attr_dev_size,
|
|
|
|
&tcmu_attr_emulate_write_cache,
|
2017-09-13 08:01:22 +03:00
|
|
|
&tcmu_attr_nl_reply_supported,
|
2017-06-06 17:28:50 +03:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2017-03-19 01:04:13 +03:00
|
|
|
static struct configfs_attribute **tcmu_attrs;
|
|
|
|
|
|
|
|
static struct target_backend_ops tcmu_ops = {
|
2014-10-02 03:07:05 +04:00
|
|
|
.name = "user",
|
|
|
|
.owner = THIS_MODULE,
|
2015-05-20 00:44:41 +03:00
|
|
|
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
|
2014-10-02 03:07:05 +04:00
|
|
|
.attach_hba = tcmu_attach_hba,
|
|
|
|
.detach_hba = tcmu_detach_hba,
|
|
|
|
.alloc_device = tcmu_alloc_device,
|
|
|
|
.configure_device = tcmu_configure_device,
|
2017-06-23 09:18:12 +03:00
|
|
|
.destroy_device = tcmu_destroy_device,
|
2014-10-02 03:07:05 +04:00
|
|
|
.free_device = tcmu_free_device,
|
|
|
|
.parse_cdb = tcmu_parse_cdb,
|
|
|
|
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
|
|
|
|
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
|
|
|
|
.get_device_type = sbc_get_device_type,
|
|
|
|
.get_blocks = tcmu_get_blocks,
|
2017-03-19 01:04:13 +03:00
|
|
|
.tb_dev_attrib_attrs = NULL,
|
2014-10-02 03:07:05 +04:00
|
|
|
};
|
|
|
|
|
2017-11-28 21:40:29 +03:00
|
|
|
static void find_free_blocks(void)
|
2017-05-02 06:38:06 +03:00
|
|
|
{
|
|
|
|
struct tcmu_dev *udev;
|
|
|
|
loff_t off;
|
2017-11-28 21:40:39 +03:00
|
|
|
u32 start, end, block, total_freed = 0;
|
|
|
|
|
|
|
|
if (atomic_read(&global_db_count) <= TCMU_GLOBAL_MAX_BLOCKS)
|
|
|
|
return;
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:29 +03:00
|
|
|
mutex_lock(&root_udev_mutex);
|
|
|
|
list_for_each_entry(udev, &root_udev, node) {
|
|
|
|
mutex_lock(&udev->cmdr_lock);
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:29 +03:00
|
|
|
/* Try to complete the finished commands first */
|
|
|
|
tcmu_handle_completions(udev);
|
2017-05-03 07:57:05 +03:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
/* Skip the udevs in idle */
|
|
|
|
if (!udev->dbi_thresh) {
|
2017-11-28 21:40:29 +03:00
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
continue;
|
|
|
|
}
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:29 +03:00
|
|
|
end = udev->dbi_max + 1;
|
|
|
|
block = find_last_bit(udev->data_bitmap, end);
|
|
|
|
if (block == udev->dbi_max) {
|
|
|
|
/*
|
2017-11-28 21:40:39 +03:00
|
|
|
* The last bit is dbi_max, so it is not possible
|
|
|
|
* reclaim any blocks.
|
2017-11-28 21:40:29 +03:00
|
|
|
*/
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
continue;
|
|
|
|
} else if (block == end) {
|
|
|
|
/* The current udev will goto idle state */
|
|
|
|
udev->dbi_thresh = start = 0;
|
|
|
|
udev->dbi_max = 0;
|
|
|
|
} else {
|
|
|
|
udev->dbi_thresh = start = block + 1;
|
|
|
|
udev->dbi_max = block;
|
|
|
|
}
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:29 +03:00
|
|
|
/* Here will truncate the data area from off */
|
|
|
|
off = udev->data_off + start * DATA_BLOCK_SIZE;
|
|
|
|
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:29 +03:00
|
|
|
/* Release the block pages */
|
|
|
|
tcmu_blocks_release(&udev->data_blocks, start, end);
|
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
2017-05-02 06:38:06 +03:00
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
total_freed += end - start;
|
|
|
|
pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
|
|
|
|
total_freed, udev->name);
|
2017-11-28 21:40:29 +03:00
|
|
|
}
|
|
|
|
mutex_unlock(&root_udev_mutex);
|
2017-11-28 21:40:39 +03:00
|
|
|
|
|
|
|
if (atomic_read(&global_db_count) > TCMU_GLOBAL_MAX_BLOCKS)
|
|
|
|
schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
|
2017-11-28 21:40:29 +03:00
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:31 +03:00
|
|
|
static void check_timedout_devices(void)
|
|
|
|
{
|
|
|
|
struct tcmu_dev *udev, *tmp_dev;
|
|
|
|
LIST_HEAD(devs);
|
|
|
|
|
|
|
|
spin_lock_bh(&timed_out_udevs_lock);
|
|
|
|
list_splice_init(&timed_out_udevs, &devs);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
|
|
|
|
list_del_init(&udev->timedout_entry);
|
|
|
|
spin_unlock_bh(&timed_out_udevs_lock);
|
|
|
|
|
2017-11-28 21:40:32 +03:00
|
|
|
mutex_lock(&udev->cmdr_lock);
|
2017-11-28 21:40:31 +03:00
|
|
|
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
|
2017-11-28 21:40:32 +03:00
|
|
|
mutex_unlock(&udev->cmdr_lock);
|
2017-11-28 21:40:31 +03:00
|
|
|
|
|
|
|
spin_lock_bh(&timed_out_udevs_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&timed_out_udevs_lock);
|
|
|
|
}
|
|
|
|
|
2017-11-28 21:40:30 +03:00
|
|
|
static void tcmu_unmap_work_fn(struct work_struct *work)
|
2017-11-28 21:40:29 +03:00
|
|
|
{
|
2017-11-28 21:40:31 +03:00
|
|
|
check_timedout_devices();
|
2017-11-28 21:40:30 +03:00
|
|
|
find_free_blocks();
|
2017-05-02 06:38:06 +03:00
|
|
|
}
|
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
static int __init tcmu_module_init(void)
|
|
|
|
{
|
2017-06-06 17:28:50 +03:00
|
|
|
int ret, i, k, len = 0;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
|
|
|
|
|
2017-11-28 21:40:39 +03:00
|
|
|
INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
|
2017-11-28 21:40:30 +03:00
|
|
|
|
2014-10-02 03:07:05 +04:00
|
|
|
tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
|
|
|
|
sizeof(struct tcmu_cmd),
|
|
|
|
__alignof__(struct tcmu_cmd),
|
|
|
|
0, NULL);
|
|
|
|
if (!tcmu_cmd_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
tcmu_root_device = root_device_register("tcm_user");
|
|
|
|
if (IS_ERR(tcmu_root_device)) {
|
|
|
|
ret = PTR_ERR(tcmu_root_device);
|
|
|
|
goto out_free_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = genl_register_family(&tcmu_genl_family);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out_unreg_device;
|
|
|
|
}
|
|
|
|
|
2017-03-19 01:04:13 +03:00
|
|
|
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
|
|
|
|
len += sizeof(struct configfs_attribute *);
|
|
|
|
}
|
2017-06-06 17:28:50 +03:00
|
|
|
for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
|
|
|
|
len += sizeof(struct configfs_attribute *);
|
|
|
|
}
|
|
|
|
len += sizeof(struct configfs_attribute *);
|
2017-03-19 01:04:13 +03:00
|
|
|
|
|
|
|
tcmu_attrs = kzalloc(len, GFP_KERNEL);
|
|
|
|
if (!tcmu_attrs) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_unreg_genl;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
|
|
|
|
tcmu_attrs[i] = passthrough_attrib_attrs[i];
|
|
|
|
}
|
2017-06-06 17:28:50 +03:00
|
|
|
for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
|
|
|
|
tcmu_attrs[i] = tcmu_attrib_attrs[k];
|
|
|
|
i++;
|
|
|
|
}
|
2017-03-19 01:04:13 +03:00
|
|
|
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
|
|
|
|
|
2015-05-10 19:14:56 +03:00
|
|
|
ret = transport_backend_register(&tcmu_ops);
|
2014-10-02 03:07:05 +04:00
|
|
|
if (ret)
|
2017-03-19 01:04:13 +03:00
|
|
|
goto out_attrs;
|
2014-10-02 03:07:05 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2017-03-19 01:04:13 +03:00
|
|
|
out_attrs:
|
|
|
|
kfree(tcmu_attrs);
|
2014-10-02 03:07:05 +04:00
|
|
|
out_unreg_genl:
|
|
|
|
genl_unregister_family(&tcmu_genl_family);
|
|
|
|
out_unreg_device:
|
|
|
|
root_device_unregister(tcmu_root_device);
|
|
|
|
out_free_cache:
|
|
|
|
kmem_cache_destroy(tcmu_cmd_cache);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit tcmu_module_exit(void)
|
|
|
|
{
|
2017-11-28 21:40:39 +03:00
|
|
|
cancel_delayed_work_sync(&tcmu_unmap_work);
|
2015-05-10 19:14:56 +03:00
|
|
|
target_backend_unregister(&tcmu_ops);
|
2017-03-19 01:04:13 +03:00
|
|
|
kfree(tcmu_attrs);
|
2014-10-02 03:07:05 +04:00
|
|
|
genl_unregister_family(&tcmu_genl_family);
|
|
|
|
root_device_unregister(tcmu_root_device);
|
|
|
|
kmem_cache_destroy(tcmu_cmd_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("TCM USER subsystem plugin");
|
|
|
|
MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
|
|
|
|
MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
|
|
module_init(tcmu_module_init);
|
|
|
|
module_exit(tcmu_module_exit);
|