libnvdimm, pmem: use REQ_FUA, REQ_FLUSH for nvdimm_flush()
Given that nvdimm_flush() has higher overhead than wmb_pmem() (pointer chasing through nd_region), and that we otherwise assume a platform has ADR capability when flush hints are not present, move nvdimm_flush() to REQ_FLUSH context. Note that we still arrange for nvdimm_flush() to be called even in the ADR case. We need at least once wmb() fence to push buffered writes in the cpu out to the ADR protected domain. Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Родитель
0c27af60d1
Коммит
7e267a8c79
|
@ -113,6 +113,11 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
|
||||
#ifndef REQ_FLUSH
|
||||
#define REQ_FLUSH REQ_PREFLUSH
|
||||
#endif
|
||||
|
||||
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
int rc = 0;
|
||||
|
@ -121,6 +126,10 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
|||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct pmem_device *pmem = q->queuedata;
|
||||
struct nd_region *nd_region = to_region(pmem);
|
||||
|
||||
if (bio->bi_rw & REQ_FLUSH)
|
||||
nvdimm_flush(nd_region);
|
||||
|
||||
do_acct = nd_iostat_start(bio, &start);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
|
@ -135,8 +144,8 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
|||
if (do_acct)
|
||||
nd_iostat_end(bio, start);
|
||||
|
||||
if (bio_data_dir(bio))
|
||||
nvdimm_flush(to_region(pmem));
|
||||
if (bio->bi_rw & REQ_FUA)
|
||||
nvdimm_flush(nd_region);
|
||||
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
|
@ -149,8 +158,6 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
|||
int rc;
|
||||
|
||||
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
|
||||
if (rw & WRITE)
|
||||
nvdimm_flush(to_region(pmem));
|
||||
|
||||
/*
|
||||
* The ->rw_page interface is subtle and tricky. The core
|
||||
|
@ -279,6 +286,7 @@ static int pmem_attach_disk(struct device *dev,
|
|||
return PTR_ERR(addr);
|
||||
pmem->virt_addr = (void __pmem *) addr;
|
||||
|
||||
blk_queue_write_cache(q, true, true);
|
||||
blk_queue_make_request(q, pmem_make_request);
|
||||
blk_queue_physical_block_size(q, PAGE_SIZE);
|
||||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||
|
|
Загрузка…
Ссылка в новой задаче