lightnvm: manage lun partitions internally in mm

LUNs are exclusively owned by targets implementing a block device FTL.
Doing this reservation requires at the moment a 2-way callback gennvm
<-> target. The reason behind this is that LUNs were not assumed to
always be exclusively owned by targets. However, this design decision
goes against I/O determinism QoS (two targets would mix I/O on the same
parallel unit in the device).

This patch makes LUN reservation as part of the target creation on the
media manager. This makes that LUNs are always exclusively owned by the
target instantiated on top of them. LUN stripping and/or sharing should
be implemented on the target itself or the layers on top.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Javier González 2016-11-28 22:39:05 +01:00 коммит произвёл Jens Axboe
Родитель de93434fcf
Коммит 8176117b82
3 изменённых файлов: 51 добавлений и 24 удалений

Просмотреть файл

@ -35,6 +35,45 @@ static const struct block_device_operations gen_fops = {
.owner = THIS_MODULE,
};
static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
int lun_begin, int lun_end)
{
struct gen_dev *gn = dev->mp;
struct nvm_lun *lun;
int i;
for (i = lun_begin; i <= lun_end; i++) {
if (test_and_set_bit(i, dev->lun_map)) {
pr_err("nvm: lun %d already allocated\n", i);
goto err;
}
lun = &gn->luns[i];
list_add_tail(&lun->list, &t->lun_list);
}
return 0;
err:
while (--i > lun_begin) {
lun = &gn->luns[i];
clear_bit(i, dev->lun_map);
list_del(&lun->list);
}
return -EBUSY;
}
static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
{
struct nvm_lun *lun, *tmp;
list_for_each_entry_safe(lun, tmp, &t->lun_list, list) {
WARN_ON(!test_and_clear_bit(lun->id, dev->lun_map));
list_del(&lun->list);
}
}
static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct gen_dev *gn = dev->mp;
@ -64,9 +103,14 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
if (!t)
return -ENOMEM;
INIT_LIST_HEAD(&t->lun_list);
if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
goto err_t;
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
if (!tqueue)
goto err_t;
goto err_reserve;
blk_queue_make_request(tqueue, tt->make_rq);
tdisk = alloc_disk(0);
@ -105,6 +149,8 @@ err_init:
put_disk(tdisk);
err_queue:
blk_cleanup_queue(tqueue);
err_reserve:
gen_release_luns(dev, t);
err_t:
kfree(t);
return -ENOMEM;
@ -122,6 +168,7 @@ static void __gen_remove_target(struct nvm_target *t)
if (tt->exit)
tt->exit(tdisk->private_data);
gen_release_luns(t->dev, t);
put_disk(tdisk);
list_del(&t->list);
@ -253,6 +300,7 @@ static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
INIT_LIST_HEAD(&lun->free_list);
INIT_LIST_HEAD(&lun->used_list);
INIT_LIST_HEAD(&lun->bb_list);
INIT_LIST_HEAD(&lun->list);
spin_lock_init(&lun->lock);
@ -569,16 +617,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
return nvm_erase_ppa(dev, &addr, 1, flags);
}
static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
{
return test_and_set_bit(lunid, dev->lun_map);
}
static void gen_release_lun(struct nvm_dev *dev, int lunid)
{
WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
}
static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
{
struct gen_dev *gn = dev->mp;
@ -625,8 +663,6 @@ static struct nvmm_type gen = {
.mark_blk = gen_mark_blk,
.get_lun = gen_get_lun,
.reserve_lun = gen_reserve_lun,
.release_lun = gen_release_lun,
.lun_info_print = gen_lun_info_print,
.get_area = gen_get_area,

Просмотреть файл

@ -1126,7 +1126,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
struct nvm_lun *lun;
struct rrpc_lun *rlun;
int i;
@ -1139,7 +1138,6 @@ static void rrpc_luns_free(struct rrpc *rrpc)
lun = rlun->parent;
if (!lun)
break;
dev->mt->release_lun(dev, lun->id);
vfree(rlun->blocks);
}
@ -1169,11 +1167,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
int lunid = lun_begin + i;
struct nvm_lun *lun;
if (dev->mt->reserve_lun(dev, lunid)) {
pr_err("rrpc: lun %u is already allocated\n", lunid);
goto err;
}
lun = dev->mt->get_lun(dev, lunid);
if (!lun)
goto err;

Просмотреть файл

@ -210,6 +210,7 @@ struct nvm_id {
struct nvm_target {
struct list_head list;
struct list_head lun_list;
struct nvm_dev *dev;
struct nvm_tgt_type *type;
struct gendisk *disk;
@ -273,6 +274,7 @@ struct nvm_lun {
int lun_id;
int chnl_id;
struct list_head list;
spinlock_t lock;
/* lun block lists */
@ -521,8 +523,6 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, int);
typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
typedef void (nvmm_release_lun)(struct nvm_dev *, int);
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@ -550,8 +550,6 @@ struct nvmm_type {
/* Configuration management */
nvmm_get_lun_fn *get_lun;
nvmm_reserve_lun *reserve_lun;
nvmm_release_lun *release_lun;
/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;