block: remove BLK_BOUNCE_ISA support
Remove the BLK_BOUNCE_ISA support now that all users are gone. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Link: https://lore.kernel.org/r/20210331073001.46776-7-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
aaff5ebaa2
Коммит
ce288e0535
|
@ -204,7 +204,6 @@ bool bio_integrity_prep(struct bio *bio)
|
|||
{
|
||||
struct bio_integrity_payload *bip;
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||
void *buf;
|
||||
unsigned long start, end;
|
||||
unsigned int len, nr_pages;
|
||||
|
@ -238,7 +237,7 @@ bool bio_integrity_prep(struct bio *bio)
|
|||
|
||||
/* Allocate kernel buffer for protection data */
|
||||
len = intervals * bi->tuple_size;
|
||||
buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
|
||||
buf = kmalloc(len, GFP_NOIO);
|
||||
status = BLK_STS_RESOURCE;
|
||||
if (unlikely(buf == NULL)) {
|
||||
printk(KERN_ERR "could not allocate integrity buffer\n");
|
||||
|
|
|
@ -181,7 +181,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
|||
|
||||
i++;
|
||||
} else {
|
||||
page = alloc_page(rq->q->bounce_gfp | gfp_mask);
|
||||
page = alloc_page(GFP_NOIO | gfp_mask);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
|
@ -486,7 +486,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
|
|||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
page = alloc_page(q->bounce_gfp | gfp_mask);
|
||||
page = alloc_page(GFP_NOIO | gfp_mask);
|
||||
if (!page)
|
||||
goto cleanup;
|
||||
|
||||
|
|
|
@ -103,28 +103,17 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
|
|||
void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
|
||||
{
|
||||
unsigned long b_pfn = max_addr >> PAGE_SHIFT;
|
||||
int dma = 0;
|
||||
|
||||
q->bounce_gfp = GFP_NOIO;
|
||||
#if BITS_PER_LONG == 64
|
||||
/*
|
||||
* Assume anything <= 4GB can be handled by IOMMU. Actually
|
||||
* some IOMMUs can handle everything, but I don't know of a
|
||||
* way to test this here.
|
||||
*/
|
||||
if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
|
||||
dma = 1;
|
||||
q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
|
||||
#else
|
||||
if (b_pfn < blk_max_low_pfn)
|
||||
dma = 1;
|
||||
q->limits.bounce_pfn = b_pfn;
|
||||
#endif
|
||||
if (dma) {
|
||||
init_emergency_isa_pool();
|
||||
q->bounce_gfp = GFP_NOIO | GFP_DMA;
|
||||
q->limits.bounce_pfn = b_pfn;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||
|
||||
|
|
|
@ -312,13 +312,8 @@ static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
#else
|
||||
static inline int init_emergency_isa_pool(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
||||
{
|
||||
}
|
||||
|
|
124
block/bounce.c
124
block/bounce.c
|
@ -29,7 +29,7 @@
|
|||
#define ISA_POOL_SIZE 16
|
||||
|
||||
static struct bio_set bounce_bio_set, bounce_bio_split;
|
||||
static mempool_t page_pool, isa_page_pool;
|
||||
static mempool_t page_pool;
|
||||
|
||||
static void init_bounce_bioset(void)
|
||||
{
|
||||
|
@ -89,41 +89,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
|
|||
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
/*
|
||||
* allocate pages in the DMA region for the ISA pool
|
||||
*/
|
||||
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(isa_mutex);
|
||||
|
||||
/*
|
||||
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
|
||||
* as the max address, so check if the pool has already been created.
|
||||
*/
|
||||
int init_emergency_isa_pool(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&isa_mutex);
|
||||
|
||||
if (mempool_initialized(&isa_page_pool)) {
|
||||
mutex_unlock(&isa_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
|
||||
mempool_free_pages, (void *) 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
|
||||
init_bounce_bioset();
|
||||
mutex_unlock(&isa_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Simple bounce buffer support for highmem pages. Depending on the
|
||||
* queue gfp mask set, *to may or may not be a highmem page. kmap it
|
||||
|
@ -159,7 +124,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
|
|||
}
|
||||
}
|
||||
|
||||
static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
||||
static void bounce_end_io(struct bio *bio)
|
||||
{
|
||||
struct bio *bio_orig = bio->bi_private;
|
||||
struct bio_vec *bvec, orig_vec;
|
||||
|
@ -173,7 +138,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
|||
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
|
||||
if (bvec->bv_page != orig_vec.bv_page) {
|
||||
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
|
||||
mempool_free(bvec->bv_page, pool);
|
||||
mempool_free(bvec->bv_page, &page_pool);
|
||||
}
|
||||
bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
|
||||
}
|
||||
|
@ -185,33 +150,17 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
|||
|
||||
static void bounce_end_io_write(struct bio *bio)
|
||||
{
|
||||
bounce_end_io(bio, &page_pool);
|
||||
bounce_end_io(bio);
|
||||
}
|
||||
|
||||
static void bounce_end_io_write_isa(struct bio *bio)
|
||||
{
|
||||
|
||||
bounce_end_io(bio, &isa_page_pool);
|
||||
}
|
||||
|
||||
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
|
||||
static void bounce_end_io_read(struct bio *bio)
|
||||
{
|
||||
struct bio *bio_orig = bio->bi_private;
|
||||
|
||||
if (!bio->bi_status)
|
||||
copy_to_high_bio_irq(bio_orig, bio);
|
||||
|
||||
bounce_end_io(bio, pool);
|
||||
}
|
||||
|
||||
static void bounce_end_io_read(struct bio *bio)
|
||||
{
|
||||
__bounce_end_io_read(bio, &page_pool);
|
||||
}
|
||||
|
||||
static void bounce_end_io_read_isa(struct bio *bio)
|
||||
{
|
||||
__bounce_end_io_read(bio, &isa_page_pool);
|
||||
bounce_end_io(bio);
|
||||
}
|
||||
|
||||
static struct bio *bounce_clone_bio(struct bio *bio_src)
|
||||
|
@ -287,8 +236,8 @@ err_put:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
mempool_t *pool)
|
||||
|
||||
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||
{
|
||||
struct bio *bio;
|
||||
int rw = bio_data_dir(*bio_orig);
|
||||
|
@ -298,6 +247,20 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
bool bounce = false;
|
||||
int sectors = 0;
|
||||
|
||||
/*
|
||||
* Data-less bio, nothing to bounce
|
||||
*/
|
||||
if (!bio_has_data(*bio_orig))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Just check if the bounce pfn is equal to or bigger than the highest
|
||||
* pfn in the system -- in that case, don't waste time iterating over
|
||||
* bio segments
|
||||
*/
|
||||
if (q->limits.bounce_pfn >= blk_max_pfn)
|
||||
return;
|
||||
|
||||
bio_for_each_segment(from, *bio_orig, iter) {
|
||||
if (i++ < BIO_MAX_VECS)
|
||||
sectors += from.bv_len >> 9;
|
||||
|
@ -327,7 +290,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
if (page_to_pfn(page) <= q->limits.bounce_pfn)
|
||||
continue;
|
||||
|
||||
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
|
||||
to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
|
||||
inc_zone_page_state(to->bv_page, NR_BOUNCE);
|
||||
|
||||
if (rw == WRITE) {
|
||||
|
@ -346,46 +309,11 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
|
||||
bio->bi_flags |= (1 << BIO_BOUNCED);
|
||||
|
||||
if (pool == &page_pool) {
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read;
|
||||
else
|
||||
bio->bi_end_io = bounce_end_io_write;
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read;
|
||||
} else {
|
||||
bio->bi_end_io = bounce_end_io_write_isa;
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read_isa;
|
||||
}
|
||||
|
||||
bio->bi_private = *bio_orig;
|
||||
*bio_orig = bio;
|
||||
}
|
||||
|
||||
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||
{
|
||||
mempool_t *pool;
|
||||
|
||||
/*
|
||||
* Data-less bio, nothing to bounce
|
||||
*/
|
||||
if (!bio_has_data(*bio_orig))
|
||||
return;
|
||||
|
||||
/*
|
||||
* for non-isa bounce case, just check if the bounce pfn is equal
|
||||
* to or bigger than the highest pfn in the system -- in that case,
|
||||
* don't waste time iterating over bio segments
|
||||
*/
|
||||
if (!(q->bounce_gfp & GFP_DMA)) {
|
||||
if (q->limits.bounce_pfn >= blk_max_pfn)
|
||||
return;
|
||||
pool = &page_pool;
|
||||
} else {
|
||||
BUG_ON(!mempool_initialized(&isa_page_pool));
|
||||
pool = &isa_page_pool;
|
||||
}
|
||||
|
||||
/*
|
||||
* slow path
|
||||
*/
|
||||
__blk_queue_bounce(q, bio_orig, pool);
|
||||
}
|
||||
|
|
|
@ -431,7 +431,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
|
||||
bytes = max(in_len, out_len);
|
||||
if (bytes) {
|
||||
buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
|
||||
buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1043,8 +1043,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
|||
blk_queue_max_segments(q, queue_max_segments(q) - 1);
|
||||
|
||||
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
|
||||
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len,
|
||||
q->bounce_gfp | GFP_KERNEL);
|
||||
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
|
||||
if (!sdev->dma_drain_buf) {
|
||||
ata_dev_err(dev, "drain buffer allocation failed\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -436,11 +436,6 @@ struct request_queue {
|
|||
*/
|
||||
int id;
|
||||
|
||||
/*
|
||||
* queue needs bounce pages for pages above this limit
|
||||
*/
|
||||
gfp_t bounce_gfp;
|
||||
|
||||
spinlock_t queue_lock;
|
||||
|
||||
/*
|
||||
|
@ -847,7 +842,6 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
|||
*
|
||||
* BLK_BOUNCE_HIGH : bounce all highmem pages
|
||||
* BLK_BOUNCE_ANY : don't bounce anything
|
||||
* BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
|
||||
*/
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
|
@ -856,7 +850,6 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
|||
#define BLK_BOUNCE_HIGH -1ULL
|
||||
#endif
|
||||
#define BLK_BOUNCE_ANY (-1ULL)
|
||||
#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
|
||||
|
||||
/*
|
||||
* default timeout for SG_IO if none specified
|
||||
|
|
|
@ -283,12 +283,11 @@ config PHYS_ADDR_T_64BIT
|
|||
config BOUNCE
|
||||
bool "Enable bounce buffers"
|
||||
default y
|
||||
depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
|
||||
depends on BLOCK && MMU && HIGHMEM
|
||||
help
|
||||
Enable bounce buffers for devices that cannot access
|
||||
the full range of memory available to the CPU. Enabled
|
||||
by default when ZONE_DMA or HIGHMEM is selected, but you
|
||||
may say n to override this.
|
||||
Enable bounce buffers for devices that cannot access the full range of
|
||||
memory available to the CPU. Enabled by default when HIGHMEM is
|
||||
selected, but you may say n to override this.
|
||||
|
||||
config VIRT_TO_BUS
|
||||
bool
|
||||
|
|
Загрузка…
Ссылка в новой задаче