pktdvd: stop using bdi congestion framework.

The bdi congestion framework isn't widely used and should be
deprecated.

pktdvd makes use of it to track congestion, but this can be done
entirely internally to pktdvd, so it doesn't need to use the framework.

So introduce a "congested" flag.  When waiting for bio_queue_size to
drop, set this flag and a var_waitqueue() to wait for it.  When
bio_queue_size does drop and this flag is set, clear the flag and call
wake_up_var().

We don't use a wait_var_event macro for the waiting as we need to set
the flag and drop the spinlock before calling schedule() and while that
is possible with __wait_var_event(), result is not easy to read.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: NeilBrown <neilb@suse.de>
Link: https://lore.kernel.org/r/163910843527.9928.857338663717630212@noble.neil.brown.name
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
NeilBrown 2021-12-09 21:31:56 -07:00 коммит произвёл Jens Axboe
Родитель 2385ebf38f
Коммит db67097aa6
2 изменённых файлов: 22 добавлений и 11 удалений

Просмотреть файл

@ -1107,7 +1107,6 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
sector_t zone = 0; /* Suppress gcc warning */ sector_t zone = 0; /* Suppress gcc warning */
struct pkt_rb_node *node, *first_node; struct pkt_rb_node *node, *first_node;
struct rb_node *n; struct rb_node *n;
int wakeup;
atomic_set(&pd->scan_queue, 0); atomic_set(&pd->scan_queue, 0);
@ -1179,12 +1178,14 @@ try_next_bio:
spin_unlock(&pkt->lock); spin_unlock(&pkt->lock);
} }
/* check write congestion marks, and if bio_queue_size is /* check write congestion marks, and if bio_queue_size is
below, wake up any waiters */ * below, wake up any waiters
wakeup = (pd->write_congestion_on > 0 */
&& pd->bio_queue_size <= pd->write_congestion_off); if (pd->congested &&
pd->bio_queue_size <= pd->write_congestion_off) {
pd->congested = false;
wake_up_var(&pd->congested);
}
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
if (wakeup)
clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1); pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE); pkt_set_state(pkt, PACKET_WAITING_STATE);
@ -2364,12 +2365,20 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock); spin_lock(&pd->lock);
if (pd->write_congestion_on > 0 if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) { && pd->bio_queue_size >= pd->write_congestion_on) {
set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC); struct wait_bit_queue_entry wqe;
do {
init_wait_var_entry(&wqe, &pd->congested, 0);
for (;;) {
prepare_to_wait_event(__var_waitqueue(&pd->congested),
&wqe.wq_entry,
TASK_UNINTERRUPTIBLE);
if (pd->bio_queue_size <= pd->write_congestion_off)
break;
pd->congested = true;
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ); schedule();
spin_lock(&pd->lock); spin_lock(&pd->lock);
} while(pd->bio_queue_size > pd->write_congestion_off); }
} }
spin_unlock(&pd->lock); spin_unlock(&pd->lock);

Просмотреть файл

@ -183,6 +183,8 @@ struct pktcdvd_device
spinlock_t lock; /* Serialize access to bio_queue */ spinlock_t lock; /* Serialize access to bio_queue */
struct rb_root bio_queue; /* Work queue of bios we need to handle */ struct rb_root bio_queue; /* Work queue of bios we need to handle */
int bio_queue_size; /* Number of nodes in bio_queue */ int bio_queue_size; /* Number of nodes in bio_queue */
bool congested; /* Someone is waiting for bio_queue_size
* to drop. */
sector_t current_sector; /* Keep track of where the elevator is */ sector_t current_sector; /* Keep track of where the elevator is */
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */ atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
/* needs to be run. */ /* needs to be run. */