lightnvm: pblk: rename read request pool

Read requests allocate some extra memory to store its per I/O context.
Instead of requiring yet another memory pool for other type of requests,
generalize this context allocation (and change naming accordingly).

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Javier González 2017-06-26 16:27:13 -06:00 коммит произвёл Jens Axboe
Родитель d624f371d5
Коммит 084ec9ba07
5 изменённых файлов: 38 добавлений и 37 удалений

Просмотреть файл

@ -62,7 +62,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
struct pblk *pblk = rqd->private;
__pblk_end_io_erase(pblk, rqd);
mempool_free(rqd, pblk->r_rq_pool);
mempool_free(rqd, pblk->g_rq_pool);
}
static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
@ -171,8 +171,8 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
pool = pblk->w_rq_pool;
rq_size = pblk_w_rq_size;
} else {
pool = pblk->r_rq_pool;
rq_size = pblk_r_rq_size;
pool = pblk->g_rq_pool;
rq_size = pblk_g_rq_size;
}
rqd = mempool_alloc(pool, GFP_KERNEL);
@ -188,7 +188,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
if (rw == WRITE)
pool = pblk->w_rq_pool;
else
pool = pblk->r_rq_pool;
pool = pblk->g_rq_pool;
mempool_free(rqd, pool);
}
@ -1343,8 +1343,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_rq *rqd;
int err;
rqd = mempool_alloc(pblk->r_rq_pool, GFP_KERNEL);
memset(rqd, 0, pblk_r_rq_size);
rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
memset(rqd, 0, pblk_g_rq_size);
pblk_setup_e_rq(pblk, rqd, ppa);

Просмотреть файл

@ -20,8 +20,8 @@
#include "pblk.h"
static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
*pblk_w_rq_cache, *pblk_line_meta_cache;
static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
*pblk_w_rq_cache, *pblk_line_meta_cache;
static DECLARE_RWSEM(pblk_lock);
struct bio_set *pblk_bio_set;
@ -200,9 +200,9 @@ static int pblk_init_global_caches(struct pblk *pblk)
return -ENOMEM;
}
pblk_r_rq_cache = kmem_cache_create("pblk_r_rq", pblk_r_rq_size,
pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
0, 0, NULL);
if (!pblk_r_rq_cache) {
if (!pblk_g_rq_cache) {
kmem_cache_destroy(pblk_blk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
up_write(&pblk_lock);
@ -214,7 +214,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
if (!pblk_w_rq_cache) {
kmem_cache_destroy(pblk_blk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_r_rq_cache);
kmem_cache_destroy(pblk_g_rq_cache);
up_write(&pblk_lock);
return -ENOMEM;
}
@ -226,7 +226,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
if (!pblk_line_meta_cache) {
kmem_cache_destroy(pblk_blk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_r_rq_cache);
kmem_cache_destroy(pblk_g_rq_cache);
kmem_cache_destroy(pblk_w_rq_cache);
up_write(&pblk_lock);
return -ENOMEM;
@ -279,13 +279,13 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->rec_pool)
goto free_blk_ws_pool;
pblk->r_rq_pool = mempool_create_slab_pool(64, pblk_r_rq_cache);
if (!pblk->r_rq_pool)
pblk->g_rq_pool = mempool_create_slab_pool(64, pblk_g_rq_cache);
if (!pblk->g_rq_pool)
goto free_rec_pool;
pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
if (!pblk->w_rq_pool)
goto free_r_rq_pool;
goto free_g_rq_pool;
pblk->line_meta_pool =
mempool_create_slab_pool(16, pblk_line_meta_cache);
@ -312,8 +312,8 @@ free_line_meta_pool:
mempool_destroy(pblk->line_meta_pool);
free_w_rq_pool:
mempool_destroy(pblk->w_rq_pool);
free_r_rq_pool:
mempool_destroy(pblk->r_rq_pool);
free_g_rq_pool:
mempool_destroy(pblk->g_rq_pool);
free_rec_pool:
mempool_destroy(pblk->rec_pool);
free_blk_ws_pool:
@ -331,13 +331,13 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->page_pool);
mempool_destroy(pblk->line_ws_pool);
mempool_destroy(pblk->rec_pool);
mempool_destroy(pblk->r_rq_pool);
mempool_destroy(pblk->g_rq_pool);
mempool_destroy(pblk->w_rq_pool);
mempool_destroy(pblk->line_meta_pool);
kmem_cache_destroy(pblk_blk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_r_rq_cache);
kmem_cache_destroy(pblk_g_rq_cache);
kmem_cache_destroy(pblk_w_rq_cache);
kmem_cache_destroy(pblk_line_meta_cache);
}

Просмотреть файл

@ -110,7 +110,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
if (rqd->error)
@ -124,13 +124,14 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
bio_put(bio);
if (r_ctx->orig_bio) {
if (r_ctx->private) {
struct bio *orig_bio = r_ctx->private;
#ifdef CONFIG_NVM_DEBUG
WARN_ONCE(r_ctx->orig_bio->bi_status,
"pblk: corrupted read bio\n");
WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
#endif
bio_endio(r_ctx->orig_bio);
bio_put(r_ctx->orig_bio);
bio_endio(orig_bio);
bio_put(orig_bio);
}
#ifdef CONFIG_NVM_DEBUG
@ -345,7 +346,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* All sectors are to be read from the device */
if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
@ -355,7 +356,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
}
rqd->bio = int_bio;
r_ctx->orig_bio = bio;
r_ctx->private = bio;
ret = pblk_submit_read_io(pblk, rqd);
if (ret) {

Просмотреть файл

@ -240,7 +240,7 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
r_ptr_int = r_ptr;
next_read_rq:
memset(rqd, 0, pblk_r_rq_size);
memset(rqd, 0, pblk_g_rq_size);
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (!rq_ppas)
@ -361,7 +361,7 @@ next_pad_rq:
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
memset(rqd, 0, pblk_r_rq_size);
memset(rqd, 0, pblk_g_rq_size);
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
@ -456,7 +456,7 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
rec_round = 0;
next_rq:
memset(rqd, 0, pblk_r_rq_size);
memset(rqd, 0, pblk_g_rq_size);
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (!rq_ppas)
@ -591,7 +591,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
*done = 1;
next_rq:
memset(rqd, 0, pblk_r_rq_size);
memset(rqd, 0, pblk_g_rq_size);
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (!rq_ppas)

Просмотреть файл

@ -91,7 +91,7 @@ struct pblk_sec_meta {
#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
/* write completion context */
/* write buffer completion context */
struct pblk_c_ctx {
struct list_head list; /* Head for out-of-order completion */
@ -101,9 +101,9 @@ struct pblk_c_ctx {
unsigned int nr_padded;
};
/* Read context */
struct pblk_r_ctx {
struct bio *orig_bio;
/* generic context */
struct pblk_g_ctx {
void *private;
};
/* Recovery context */
@ -543,7 +543,7 @@ struct pblk {
mempool_t *page_pool;
mempool_t *line_ws_pool;
mempool_t *rec_pool;
mempool_t *r_rq_pool;
mempool_t *g_rq_pool;
mempool_t *w_rq_pool;
mempool_t *line_meta_pool;
@ -560,7 +560,7 @@ struct pblk_line_ws {
struct work_struct ws;
};
#define pblk_r_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_r_ctx))
#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
/*