s390/scm_block: handle multiple requests in one HW request
Handle up to 8 block layer requests per HW request. These requests can be processed in parallel on the device leading to better throughput (and less interrupts). The overhead for additional requests is small since we don't blindly allocate new aidaws but try to use what's left of the previous one. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
de88d0d28f
Коммит
bbc610a965
|
@ -117,13 +117,19 @@ out:
|
||||||
|
|
||||||
static void scm_request_done(struct scm_request *scmrq)
|
static void scm_request_done(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
struct msb *msb = &scmrq->aob->msb[0];
|
|
||||||
u64 aidaw = msb->data_addr;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct msb *msb;
|
||||||
|
u64 aidaw;
|
||||||
|
int i;
|
||||||
|
|
||||||
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
|
for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) {
|
||||||
IS_ALIGNED(aidaw, PAGE_SIZE))
|
msb = &scmrq->aob->msb[i];
|
||||||
mempool_free(virt_to_page(aidaw), aidaw_pool);
|
aidaw = msb->data_addr;
|
||||||
|
|
||||||
|
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
|
||||||
|
IS_ALIGNED(aidaw, PAGE_SIZE))
|
||||||
|
mempool_free(virt_to_page(aidaw), aidaw_pool);
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&list_lock, flags);
|
spin_lock_irqsave(&list_lock, flags);
|
||||||
list_add(&scmrq->list, &inactive_requests);
|
list_add(&scmrq->list, &inactive_requests);
|
||||||
|
@ -167,51 +173,57 @@ static int scm_request_prepare(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||||
struct scm_device *scmdev = bdev->gendisk->private_data;
|
struct scm_device *scmdev = bdev->gendisk->private_data;
|
||||||
struct msb *msb = &scmrq->aob->msb[0];
|
int pos = scmrq->aob->request.msb_count;
|
||||||
|
struct msb *msb = &scmrq->aob->msb[pos];
|
||||||
|
struct request *req = scmrq->request[pos];
|
||||||
struct req_iterator iter;
|
struct req_iterator iter;
|
||||||
struct aidaw *aidaw;
|
struct aidaw *aidaw;
|
||||||
struct bio_vec bv;
|
struct bio_vec bv;
|
||||||
|
|
||||||
aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request));
|
aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
|
||||||
if (!aidaw)
|
if (!aidaw)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
msb->bs = MSB_BS_4K;
|
msb->bs = MSB_BS_4K;
|
||||||
scmrq->aob->request.msb_count = 1;
|
scmrq->aob->request.msb_count++;
|
||||||
msb->scm_addr = scmdev->address +
|
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
|
||||||
((u64) blk_rq_pos(scmrq->request) << 9);
|
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
|
||||||
msb->oc = (rq_data_dir(scmrq->request) == READ) ?
|
|
||||||
MSB_OC_READ : MSB_OC_WRITE;
|
|
||||||
msb->flags |= MSB_FLAG_IDA;
|
msb->flags |= MSB_FLAG_IDA;
|
||||||
msb->data_addr = (u64) aidaw;
|
msb->data_addr = (u64) aidaw;
|
||||||
|
|
||||||
rq_for_each_segment(bv, scmrq->request, iter) {
|
rq_for_each_segment(bv, req, iter) {
|
||||||
WARN_ON(bv.bv_offset);
|
WARN_ON(bv.bv_offset);
|
||||||
msb->blk_count += bv.bv_len >> 12;
|
msb->blk_count += bv.bv_len >> 12;
|
||||||
aidaw->data_addr = (u64) page_address(bv.bv_page);
|
aidaw->data_addr = (u64) page_address(bv.bv_page);
|
||||||
aidaw++;
|
aidaw++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scmrq->next_aidaw = aidaw;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void scm_request_set(struct scm_request *scmrq,
|
||||||
|
struct request *req)
|
||||||
|
{
|
||||||
|
scmrq->request[scmrq->aob->request.msb_count] = req;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void scm_request_init(struct scm_blk_dev *bdev,
|
static inline void scm_request_init(struct scm_blk_dev *bdev,
|
||||||
struct scm_request *scmrq,
|
struct scm_request *scmrq)
|
||||||
struct request *req)
|
|
||||||
{
|
{
|
||||||
struct aob_rq_header *aobrq = to_aobrq(scmrq);
|
struct aob_rq_header *aobrq = to_aobrq(scmrq);
|
||||||
struct aob *aob = scmrq->aob;
|
struct aob *aob = scmrq->aob;
|
||||||
|
|
||||||
|
memset(scmrq->request, 0, sizeof(scmrq->request));
|
||||||
memset(aob, 0, sizeof(*aob));
|
memset(aob, 0, sizeof(*aob));
|
||||||
aobrq->scmdev = bdev->scmdev;
|
aobrq->scmdev = bdev->scmdev;
|
||||||
aob->request.cmd_code = ARQB_CMD_MOVE;
|
aob->request.cmd_code = ARQB_CMD_MOVE;
|
||||||
aob->request.data = (u64) aobrq;
|
aob->request.data = (u64) aobrq;
|
||||||
scmrq->request = req;
|
|
||||||
scmrq->bdev = bdev;
|
scmrq->bdev = bdev;
|
||||||
scmrq->retries = 4;
|
scmrq->retries = 4;
|
||||||
scmrq->error = 0;
|
scmrq->error = 0;
|
||||||
/* We don't use all msbs - place aidaws at the end of the aob page. */
|
/* We don't use all msbs - place aidaws at the end of the aob page. */
|
||||||
scmrq->next_aidaw = (void *) &aob->msb[1];
|
scmrq->next_aidaw = (void *) &aob->msb[SCM_RQ_PER_IO];
|
||||||
scm_request_cluster_init(scmrq);
|
scm_request_cluster_init(scmrq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,9 +239,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
|
||||||
void scm_request_requeue(struct scm_request *scmrq)
|
void scm_request_requeue(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||||
|
int i;
|
||||||
|
|
||||||
scm_release_cluster(scmrq);
|
scm_release_cluster(scmrq);
|
||||||
blk_requeue_request(bdev->rq, scmrq->request);
|
for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++)
|
||||||
|
blk_requeue_request(bdev->rq, scmrq->request[i]);
|
||||||
|
|
||||||
atomic_dec(&bdev->queued_reqs);
|
atomic_dec(&bdev->queued_reqs);
|
||||||
scm_request_done(scmrq);
|
scm_request_done(scmrq);
|
||||||
scm_ensure_queue_restart(bdev);
|
scm_ensure_queue_restart(bdev);
|
||||||
|
@ -238,20 +253,41 @@ void scm_request_requeue(struct scm_request *scmrq)
|
||||||
void scm_request_finish(struct scm_request *scmrq)
|
void scm_request_finish(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||||
|
int i;
|
||||||
|
|
||||||
scm_release_cluster(scmrq);
|
scm_release_cluster(scmrq);
|
||||||
blk_end_request_all(scmrq->request, scmrq->error);
|
for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++)
|
||||||
|
blk_end_request_all(scmrq->request[i], scmrq->error);
|
||||||
|
|
||||||
atomic_dec(&bdev->queued_reqs);
|
atomic_dec(&bdev->queued_reqs);
|
||||||
scm_request_done(scmrq);
|
scm_request_done(scmrq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int scm_request_start(struct scm_request *scmrq)
|
||||||
|
{
|
||||||
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
atomic_inc(&bdev->queued_reqs);
|
||||||
|
if (!scmrq->aob->request.msb_count) {
|
||||||
|
scm_request_requeue(scmrq);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = eadm_start_aob(scmrq->aob);
|
||||||
|
if (ret) {
|
||||||
|
SCM_LOG(5, "no subchannel");
|
||||||
|
scm_request_requeue(scmrq);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void scm_blk_request(struct request_queue *rq)
|
static void scm_blk_request(struct request_queue *rq)
|
||||||
{
|
{
|
||||||
struct scm_device *scmdev = rq->queuedata;
|
struct scm_device *scmdev = rq->queuedata;
|
||||||
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
|
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
|
||||||
struct scm_request *scmrq;
|
struct scm_request *scmrq = NULL;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
int ret;
|
|
||||||
|
|
||||||
while ((req = blk_peek_request(rq))) {
|
while ((req = blk_peek_request(rq))) {
|
||||||
if (req->cmd_type != REQ_TYPE_FS) {
|
if (req->cmd_type != REQ_TYPE_FS) {
|
||||||
|
@ -261,47 +297,64 @@ static void scm_blk_request(struct request_queue *rq)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!scm_permit_request(bdev, req)) {
|
if (!scm_permit_request(bdev, req))
|
||||||
scm_ensure_queue_restart(bdev);
|
goto out;
|
||||||
return;
|
|
||||||
}
|
|
||||||
scmrq = scm_request_fetch();
|
|
||||||
if (!scmrq) {
|
if (!scmrq) {
|
||||||
SCM_LOG(5, "no request");
|
scmrq = scm_request_fetch();
|
||||||
scm_ensure_queue_restart(bdev);
|
if (!scmrq) {
|
||||||
return;
|
SCM_LOG(5, "no request");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
scm_request_init(bdev, scmrq);
|
||||||
}
|
}
|
||||||
scm_request_init(bdev, scmrq, req);
|
scm_request_set(scmrq, req);
|
||||||
|
|
||||||
if (!scm_reserve_cluster(scmrq)) {
|
if (!scm_reserve_cluster(scmrq)) {
|
||||||
SCM_LOG(5, "cluster busy");
|
SCM_LOG(5, "cluster busy");
|
||||||
|
scm_request_set(scmrq, NULL);
|
||||||
|
if (scmrq->aob->request.msb_count)
|
||||||
|
goto out;
|
||||||
|
|
||||||
scm_request_done(scmrq);
|
scm_request_done(scmrq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (scm_need_cluster_request(scmrq)) {
|
if (scm_need_cluster_request(scmrq)) {
|
||||||
atomic_inc(&bdev->queued_reqs);
|
if (scmrq->aob->request.msb_count) {
|
||||||
blk_start_request(req);
|
/* Start cluster requests separately. */
|
||||||
scm_initiate_cluster_request(scmrq);
|
scm_request_set(scmrq, NULL);
|
||||||
return;
|
if (scm_request_start(scmrq))
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
atomic_inc(&bdev->queued_reqs);
|
||||||
|
blk_start_request(req);
|
||||||
|
scm_initiate_cluster_request(scmrq);
|
||||||
|
}
|
||||||
|
scmrq = NULL;
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (scm_request_prepare(scmrq)) {
|
if (scm_request_prepare(scmrq)) {
|
||||||
SCM_LOG(5, "no aidaw");
|
SCM_LOG(5, "aidaw alloc failed");
|
||||||
scm_release_cluster(scmrq);
|
scm_request_set(scmrq, NULL);
|
||||||
scm_request_done(scmrq);
|
goto out;
|
||||||
scm_ensure_queue_restart(bdev);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_inc(&bdev->queued_reqs);
|
|
||||||
blk_start_request(req);
|
blk_start_request(req);
|
||||||
|
|
||||||
ret = eadm_start_aob(scmrq->aob);
|
if (scmrq->aob->request.msb_count < SCM_RQ_PER_IO)
|
||||||
if (ret) {
|
continue;
|
||||||
SCM_LOG(5, "no subchannel");
|
|
||||||
scm_request_requeue(scmrq);
|
if (scm_request_start(scmrq))
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
scmrq = NULL;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
if (scmrq)
|
||||||
|
scm_request_start(scmrq);
|
||||||
|
else
|
||||||
|
scm_ensure_queue_restart(bdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __scmrq_log_error(struct scm_request *scmrq)
|
static void __scmrq_log_error(struct scm_request *scmrq)
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <asm/eadm.h>
|
#include <asm/eadm.h>
|
||||||
|
|
||||||
#define SCM_NR_PARTS 8
|
#define SCM_NR_PARTS 8
|
||||||
|
#define SCM_RQ_PER_IO 8
|
||||||
#define SCM_QUEUE_DELAY 5
|
#define SCM_QUEUE_DELAY 5
|
||||||
|
|
||||||
struct scm_blk_dev {
|
struct scm_blk_dev {
|
||||||
|
@ -31,7 +32,7 @@ struct scm_blk_dev {
|
||||||
struct scm_request {
|
struct scm_request {
|
||||||
struct scm_blk_dev *bdev;
|
struct scm_blk_dev *bdev;
|
||||||
struct aidaw *next_aidaw;
|
struct aidaw *next_aidaw;
|
||||||
struct request *request;
|
struct request *request[SCM_RQ_PER_IO];
|
||||||
struct aob *aob;
|
struct aob *aob;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
u8 retries;
|
u8 retries;
|
||||||
|
|
|
@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq)
|
||||||
scmrq->cluster.state = CLUSTER_NONE;
|
scmrq->cluster.state = CLUSTER_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
|
static bool clusters_intersect(struct request *A, struct request *B)
|
||||||
{
|
{
|
||||||
unsigned long firstA, lastA, firstB, lastB;
|
unsigned long firstA, lastA, firstB, lastB;
|
||||||
|
|
||||||
firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
|
firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
|
||||||
lastA = (((u64) blk_rq_pos(A->request) << 9) +
|
lastA = (((u64) blk_rq_pos(A) << 9) +
|
||||||
blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
|
blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
|
||||||
|
|
||||||
firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
|
firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
|
||||||
lastB = (((u64) blk_rq_pos(B->request) << 9) +
|
lastB = (((u64) blk_rq_pos(B) << 9) +
|
||||||
blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
|
blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
|
||||||
|
|
||||||
return (firstB <= lastA && firstA <= lastB);
|
return (firstB <= lastA && firstA <= lastB);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool scm_reserve_cluster(struct scm_request *scmrq)
|
bool scm_reserve_cluster(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
|
struct request *req = scmrq->request[scmrq->aob->request.msb_count];
|
||||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||||
struct scm_request *iter;
|
struct scm_request *iter;
|
||||||
|
int pos, add = 1;
|
||||||
|
|
||||||
if (write_cluster_size == 0)
|
if (write_cluster_size == 0)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
spin_lock(&bdev->lock);
|
spin_lock(&bdev->lock);
|
||||||
list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
|
list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
|
||||||
if (clusters_intersect(scmrq, iter) &&
|
if (iter == scmrq) {
|
||||||
(rq_data_dir(scmrq->request) == WRITE ||
|
/*
|
||||||
rq_data_dir(iter->request) == WRITE)) {
|
* We don't have to use clusters_intersect here, since
|
||||||
spin_unlock(&bdev->lock);
|
* cluster requests are always started separately.
|
||||||
return false;
|
*/
|
||||||
|
add = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
|
||||||
|
if (clusters_intersect(req, iter->request[pos]) &&
|
||||||
|
(rq_data_dir(req) == WRITE ||
|
||||||
|
rq_data_dir(iter->request[pos]) == WRITE)) {
|
||||||
|
spin_unlock(&bdev->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list_add(&scmrq->cluster.list, &bdev->cluster_list);
|
if (add)
|
||||||
|
list_add(&scmrq->cluster.list, &bdev->cluster_list);
|
||||||
spin_unlock(&bdev->lock);
|
spin_unlock(&bdev->lock);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -118,7 +131,7 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||||
struct scm_device *scmdev = bdev->gendisk->private_data;
|
struct scm_device *scmdev = bdev->gendisk->private_data;
|
||||||
struct request *req = scmrq->request;
|
struct request *req = scmrq->request[0];
|
||||||
struct msb *msb = &scmrq->aob->msb[0];
|
struct msb *msb = &scmrq->aob->msb[0];
|
||||||
struct req_iterator iter;
|
struct req_iterator iter;
|
||||||
struct aidaw *aidaw;
|
struct aidaw *aidaw;
|
||||||
|
@ -183,10 +196,12 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
|
||||||
|
|
||||||
bool scm_need_cluster_request(struct scm_request *scmrq)
|
bool scm_need_cluster_request(struct scm_request *scmrq)
|
||||||
{
|
{
|
||||||
if (rq_data_dir(scmrq->request) == READ)
|
int pos = scmrq->aob->request.msb_count;
|
||||||
|
|
||||||
|
if (rq_data_dir(scmrq->request[pos]) == READ)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
|
return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with queue lock held. */
|
/* Called with queue lock held. */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче