Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: Don't count_vm_events for discard bio in submit_bio. cfq: fix recursive call in cfq_blkiocg_update_completion_stats() cfq-iosched: Fixed boot warning with BLK_CGROUP=y and CFQ_GROUP_IOSCHED=n cfq: Don't allow queue merges for queues that have no process references block: fix DISCARD_BARRIER requests cciss: set SCSI max cmd len to 16, as default is wrong cpqarray: fix two more wrong section type cpqarray: fix wrong __init type on pci probe function drbd: Fixed a race between disk-attach and unexpected state changes writeback: fix pin_sb_for_writeback writeback: add missing requeue_io in writeback_inodes_wb writeback: simplify and split bdi_start_writeback writeback: simplify wakeup_flusher_threads writeback: fix writeback_inodes_wb from writeback_inodes_sb writeback: enforce s_umount locking in writeback_inodes_sb writeback: queue work on stack in writeback_inodes_sb writeback: fix writeback completion notifications
This commit is contained in:
Коммит
984bc9601f
|
@ -1149,13 +1149,10 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
else
|
||||
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
|
||||
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
|
||||
if (bio_rw_flagged(bio, BIO_RW_DISCARD))
|
||||
req->cmd_flags |= REQ_DISCARD;
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||
req->cmd_flags |= REQ_SOFTBARRIER;
|
||||
} else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||
req->cmd_flags |= REQ_HARDBARRIER;
|
||||
|
||||
if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
||||
req->cmd_flags |= REQ_RW_SYNC;
|
||||
if (bio_rw_flagged(bio, BIO_RW_META))
|
||||
|
@ -1586,7 +1583,7 @@ void submit_bio(int rw, struct bio *bio)
|
|||
* If it's a regular read/write or a barrier with data attached,
|
||||
* go through the normal accounting stuff before submission.
|
||||
*/
|
||||
if (bio_has_data(bio)) {
|
||||
if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
|
||||
if (rw & WRITE) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
} else {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <linux/rbtree.h>
|
||||
#include <linux/ioprio.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
#include "blk-cgroup.h"
|
||||
#include "cfq.h"
|
||||
|
||||
/*
|
||||
* tunables
|
||||
|
@ -879,7 +879,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|||
if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
||||
cfq_rb_erase(&cfqg->rb_node, st);
|
||||
cfqg->saved_workload_slice = 0;
|
||||
blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
|
||||
cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
|
||||
}
|
||||
|
||||
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
|
||||
|
@ -939,8 +939,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
|
|||
|
||||
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
|
||||
st->min_vdisktime);
|
||||
blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
|
||||
blkiocg_set_start_empty_time(&cfqg->blkg);
|
||||
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
|
||||
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
|
@ -995,7 +995,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
|||
|
||||
/* Add group onto cgroup list */
|
||||
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
|
||||
blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
|
||||
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
|
||||
MKDEV(major, minor));
|
||||
cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
|
||||
|
||||
|
@ -1079,7 +1079,7 @@ static void cfq_release_cfq_groups(struct cfq_data *cfqd)
|
|||
* it from cgroup list, then it will take care of destroying
|
||||
* cfqg also.
|
||||
*/
|
||||
if (!blkiocg_del_blkio_group(&cfqg->blkg))
|
||||
if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
|
||||
cfq_destroy_cfqg(cfqd, cfqg);
|
||||
}
|
||||
}
|
||||
|
@ -1421,10 +1421,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
|
|||
{
|
||||
elv_rb_del(&cfqq->sort_list, rq);
|
||||
cfqq->queued[rq_is_sync(rq)]--;
|
||||
blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
|
||||
rq_is_sync(rq));
|
||||
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
|
||||
rq_data_dir(rq), rq_is_sync(rq));
|
||||
cfq_add_rq_rb(rq);
|
||||
blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
|
||||
cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
|
||||
&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
|
||||
rq_is_sync(rq));
|
||||
}
|
||||
|
@ -1482,8 +1482,8 @@ static void cfq_remove_request(struct request *rq)
|
|||
cfq_del_rq_rb(rq);
|
||||
|
||||
cfqq->cfqd->rq_queued--;
|
||||
blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
|
||||
rq_is_sync(rq));
|
||||
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
|
||||
rq_data_dir(rq), rq_is_sync(rq));
|
||||
if (rq_is_meta(rq)) {
|
||||
WARN_ON(!cfqq->meta_pending);
|
||||
cfqq->meta_pending--;
|
||||
|
@ -1518,8 +1518,8 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
|
|||
static void cfq_bio_merged(struct request_queue *q, struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
|
||||
cfq_bio_sync(bio));
|
||||
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
|
||||
bio_data_dir(bio), cfq_bio_sync(bio));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1539,8 +1539,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
|
|||
if (cfqq->next_rq == next)
|
||||
cfqq->next_rq = rq;
|
||||
cfq_remove_request(next);
|
||||
blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
|
||||
rq_is_sync(next));
|
||||
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
|
||||
rq_data_dir(next), rq_is_sync(next));
|
||||
}
|
||||
|
||||
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
||||
|
@ -1571,7 +1571,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
|||
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
del_timer(&cfqd->idle_slice_timer);
|
||||
blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
|
||||
cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
|
||||
}
|
||||
|
||||
static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
||||
|
@ -1580,7 +1580,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
|||
if (cfqq) {
|
||||
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
|
||||
cfqd->serving_prio, cfqd->serving_type);
|
||||
blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
|
||||
cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
|
||||
cfqq->slice_start = 0;
|
||||
cfqq->dispatch_start = jiffies;
|
||||
cfqq->allocated_slice = 0;
|
||||
|
@ -1911,7 +1911,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|||
sl = cfqd->cfq_slice_idle;
|
||||
|
||||
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
|
||||
blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
|
||||
cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
|
||||
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
|
||||
}
|
||||
|
||||
|
@ -1931,7 +1931,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
|
|||
elv_dispatch_sort(q, rq);
|
||||
|
||||
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
|
||||
blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
|
||||
cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
|
||||
rq_data_dir(rq), rq_is_sync(rq));
|
||||
}
|
||||
|
||||
|
@ -1986,6 +1986,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
|
|||
int process_refs, new_process_refs;
|
||||
struct cfq_queue *__cfqq;
|
||||
|
||||
/*
|
||||
* If there are no process references on the new_cfqq, then it is
|
||||
* unsafe to follow the ->new_cfqq chain as other cfqq's in the
|
||||
* chain may have dropped their last reference (not just their
|
||||
* last process reference).
|
||||
*/
|
||||
if (!cfqq_process_refs(new_cfqq))
|
||||
return;
|
||||
|
||||
/* Avoid a circular list and skip interim queue merges */
|
||||
while ((__cfqq = new_cfqq->new_cfqq)) {
|
||||
if (__cfqq == cfqq)
|
||||
|
@ -1994,17 +2003,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
|
|||
}
|
||||
|
||||
process_refs = cfqq_process_refs(cfqq);
|
||||
new_process_refs = cfqq_process_refs(new_cfqq);
|
||||
/*
|
||||
* If the process for the cfqq has gone away, there is no
|
||||
* sense in merging the queues.
|
||||
*/
|
||||
if (process_refs == 0)
|
||||
if (process_refs == 0 || new_process_refs == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Merge in the direction of the lesser amount of work.
|
||||
*/
|
||||
new_process_refs = cfqq_process_refs(new_cfqq);
|
||||
if (new_process_refs >= process_refs) {
|
||||
cfqq->new_cfqq = new_cfqq;
|
||||
atomic_add(process_refs, &new_cfqq->ref);
|
||||
|
@ -3248,7 +3257,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfq_clear_cfqq_wait_request(cfqq);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
} else {
|
||||
blkiocg_update_idle_time_stats(
|
||||
cfq_blkiocg_update_idle_time_stats(
|
||||
&cfqq->cfqg->blkg);
|
||||
cfq_mark_cfqq_must_dispatch(cfqq);
|
||||
}
|
||||
|
@ -3276,7 +3285,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
|
|||
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
|
||||
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
||||
cfq_add_rq_rb(rq);
|
||||
blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
|
||||
cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
|
||||
&cfqd->serving_group->blkg, rq_data_dir(rq),
|
||||
rq_is_sync(rq));
|
||||
cfq_rq_enqueued(cfqd, cfqq, rq);
|
||||
|
@ -3364,9 +3373,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
WARN_ON(!cfqq->dispatched);
|
||||
cfqd->rq_in_driver--;
|
||||
cfqq->dispatched--;
|
||||
blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
|
||||
rq_io_start_time_ns(rq), rq_data_dir(rq),
|
||||
rq_is_sync(rq));
|
||||
cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
|
||||
rq_start_time_ns(rq), rq_io_start_time_ns(rq),
|
||||
rq_data_dir(rq), rq_is_sync(rq));
|
||||
|
||||
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
|
||||
|
||||
|
@ -3730,7 +3739,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
|
|||
|
||||
cfq_put_async_queues(cfqd);
|
||||
cfq_release_cfq_groups(cfqd);
|
||||
blkiocg_del_blkio_group(&cfqd->root_group.blkg);
|
||||
cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
|
@ -3798,8 +3807,8 @@ static void *cfq_init_queue(struct request_queue *q)
|
|||
*/
|
||||
atomic_set(&cfqg->ref, 1);
|
||||
rcu_read_lock();
|
||||
blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
|
||||
0);
|
||||
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
|
||||
(void *)cfqd, 0);
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
#ifndef _CFQ_H
|
||||
#define _CFQ_H
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue)
|
||||
{
|
||||
blkiocg_update_dequeue_stats(blkg, dequeue);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time)
|
||||
{
|
||||
blkiocg_update_timeslice_used(blkg, time);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_set_start_empty_time(blkg);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_remove_stats(blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_merged_stats(blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_idle_time_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_avg_queue_size_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_set_idle_time_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
|
||||
direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev) {
|
||||
blkiocg_add_blkio_group(blkcg, blkg, key, dev);
|
||||
}
|
||||
|
||||
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
||||
{
|
||||
return blkiocg_del_blkio_group(blkg);
|
||||
}
|
||||
|
||||
#else /* CFQ_GROUP_IOSCHED */
|
||||
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time) {}
|
||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
}
|
||||
static inline void
|
||||
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
|
||||
|
||||
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev) {}
|
||||
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CFQ_GROUP_IOSCHED */
|
||||
#endif
|
|
@ -861,6 +861,7 @@ cciss_scsi_detect(int ctlr)
|
|||
sh->n_io_port = 0; // I don't think we use these two...
|
||||
sh->this_id = SELF_SCSI_ID;
|
||||
sh->sg_tablesize = hba[ctlr]->maxsgentries;
|
||||
sh->max_cmd_len = MAX_COMMAND_SIZE;
|
||||
|
||||
((struct cciss_scsi_adapter_data_t *)
|
||||
hba[ctlr]->scsi_ctlr)->scsi_host = sh;
|
||||
|
|
|
@ -386,7 +386,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
|
|||
}
|
||||
|
||||
/* pdev is NULL for eisa */
|
||||
static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
|
||||
static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int j;
|
||||
|
@ -503,7 +503,7 @@ Enomem4:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int __init cpqarray_init_one( struct pci_dev *pdev,
|
||||
static int __devinit cpqarray_init_one( struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
int i;
|
||||
|
@ -740,7 +740,7 @@ __setup("smart2=", cpqarray_setup);
|
|||
/*
|
||||
* Find an EISA controller's signature. Set up an hba if we find it.
|
||||
*/
|
||||
static int __init cpqarray_eisa_detect(void)
|
||||
static int __devinit cpqarray_eisa_detect(void)
|
||||
{
|
||||
int i=0, j;
|
||||
__u32 board_id;
|
||||
|
|
|
@ -1236,8 +1236,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
|
|||
/* Last part of the attaching process ... */
|
||||
if (ns.conn >= C_CONNECTED &&
|
||||
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
|
||||
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
|
||||
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
|
||||
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
|
||||
drbd_send_uuids(mdev);
|
||||
drbd_send_state(mdev);
|
||||
|
|
|
@ -1114,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
mdev->new_state_tmp.i = ns.i;
|
||||
ns.i = os.i;
|
||||
ns.disk = D_NEGOTIATING;
|
||||
|
||||
/* We expect to receive up-to-date UUIDs soon.
|
||||
To avoid a race in receive_state, free p_uuid while
|
||||
holding req_lock. I.e. atomic with the state change */
|
||||
kfree(mdev->p_uuid);
|
||||
mdev->p_uuid = NULL;
|
||||
}
|
||||
|
||||
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
|
||||
|
|
|
@ -63,24 +63,16 @@ struct bdi_work {
|
|||
};
|
||||
|
||||
enum {
|
||||
WS_USED_B = 0,
|
||||
WS_ONSTACK_B,
|
||||
WS_INPROGRESS = 0,
|
||||
WS_ONSTACK,
|
||||
};
|
||||
|
||||
#define WS_USED (1 << WS_USED_B)
|
||||
#define WS_ONSTACK (1 << WS_ONSTACK_B)
|
||||
|
||||
static inline bool bdi_work_on_stack(struct bdi_work *work)
|
||||
{
|
||||
return test_bit(WS_ONSTACK_B, &work->state);
|
||||
}
|
||||
|
||||
static inline void bdi_work_init(struct bdi_work *work,
|
||||
struct wb_writeback_args *args)
|
||||
{
|
||||
INIT_RCU_HEAD(&work->rcu_head);
|
||||
work->args = *args;
|
||||
work->state = WS_USED;
|
||||
__set_bit(WS_INPROGRESS, &work->state);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,43 +87,16 @@ int writeback_in_progress(struct backing_dev_info *bdi)
|
|||
return !list_empty(&bdi->work_list);
|
||||
}
|
||||
|
||||
static void bdi_work_clear(struct bdi_work *work)
|
||||
{
|
||||
clear_bit(WS_USED_B, &work->state);
|
||||
smp_mb__after_clear_bit();
|
||||
/*
|
||||
* work can have disappeared at this point. bit waitq functions
|
||||
* should be able to tolerate this, provided bdi_sched_wait does
|
||||
* not dereference it's pointer argument.
|
||||
*/
|
||||
wake_up_bit(&work->state, WS_USED_B);
|
||||
}
|
||||
|
||||
static void bdi_work_free(struct rcu_head *head)
|
||||
{
|
||||
struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
|
||||
|
||||
if (!bdi_work_on_stack(work))
|
||||
clear_bit(WS_INPROGRESS, &work->state);
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_bit(&work->state, WS_INPROGRESS);
|
||||
|
||||
if (!test_bit(WS_ONSTACK, &work->state))
|
||||
kfree(work);
|
||||
else
|
||||
bdi_work_clear(work);
|
||||
}
|
||||
|
||||
static void wb_work_complete(struct bdi_work *work)
|
||||
{
|
||||
const enum writeback_sync_modes sync_mode = work->args.sync_mode;
|
||||
int onstack = bdi_work_on_stack(work);
|
||||
|
||||
/*
|
||||
* For allocated work, we can clear the done/seen bit right here.
|
||||
* For on-stack work, we need to postpone both the clear and free
|
||||
* to after the RCU grace period, since the stack could be invalidated
|
||||
* as soon as bdi_work_clear() has done the wakeup.
|
||||
*/
|
||||
if (!onstack)
|
||||
bdi_work_clear(work);
|
||||
if (sync_mode == WB_SYNC_NONE || onstack)
|
||||
call_rcu(&work->rcu_head, bdi_work_free);
|
||||
}
|
||||
|
||||
static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
|
||||
|
@ -147,7 +112,7 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
|
|||
list_del_rcu(&work->list);
|
||||
spin_unlock(&bdi->wb_lock);
|
||||
|
||||
wb_work_complete(work);
|
||||
call_rcu(&work->rcu_head, bdi_work_free);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,9 +150,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
|
|||
* Used for on-stack allocated work items. The caller needs to wait until
|
||||
* the wb threads have acked the work before it's safe to continue.
|
||||
*/
|
||||
static void bdi_wait_on_work_clear(struct bdi_work *work)
|
||||
static void bdi_wait_on_work_done(struct bdi_work *work)
|
||||
{
|
||||
wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
|
||||
wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
|
@ -213,37 +178,28 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
|
|||
}
|
||||
|
||||
/**
|
||||
* bdi_sync_writeback - start and wait for writeback
|
||||
* @bdi: the backing device to write from
|
||||
* bdi_queue_work_onstack - start and wait for writeback
|
||||
* @sb: write inodes from this super_block
|
||||
*
|
||||
* Description:
|
||||
* This does WB_SYNC_ALL data integrity writeback and waits for the
|
||||
* IO to complete. Callers must hold the sb s_umount semaphore for
|
||||
* This function initiates writeback and waits for the operation to
|
||||
* complete. Callers must hold the sb s_umount semaphore for
|
||||
* reading, to avoid having the super disappear before we are done.
|
||||
*/
|
||||
static void bdi_sync_writeback(struct backing_dev_info *bdi,
|
||||
struct super_block *sb)
|
||||
static void bdi_queue_work_onstack(struct wb_writeback_args *args)
|
||||
{
|
||||
struct wb_writeback_args args = {
|
||||
.sb = sb,
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_pages = LONG_MAX,
|
||||
.range_cyclic = 0,
|
||||
};
|
||||
struct bdi_work work;
|
||||
|
||||
bdi_work_init(&work, &args);
|
||||
work.state |= WS_ONSTACK;
|
||||
bdi_work_init(&work, args);
|
||||
__set_bit(WS_ONSTACK, &work.state);
|
||||
|
||||
bdi_queue_work(bdi, &work);
|
||||
bdi_wait_on_work_clear(&work);
|
||||
bdi_queue_work(args->sb->s_bdi, &work);
|
||||
bdi_wait_on_work_done(&work);
|
||||
}
|
||||
|
||||
/**
|
||||
* bdi_start_writeback - start writeback
|
||||
* @bdi: the backing device to write from
|
||||
* @sb: write inodes from this super_block
|
||||
* @nr_pages: the number of pages to write
|
||||
*
|
||||
* Description:
|
||||
|
@ -252,25 +208,34 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi,
|
|||
* completion. Caller need not hold sb s_umount semaphore.
|
||||
*
|
||||
*/
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
|
||||
long nr_pages)
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
|
||||
{
|
||||
struct wb_writeback_args args = {
|
||||
.sb = sb,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_pages = nr_pages,
|
||||
.range_cyclic = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* We treat @nr_pages=0 as the special case to do background writeback,
|
||||
* ie. to sync pages until the background dirty threshold is reached.
|
||||
*/
|
||||
if (!nr_pages) {
|
||||
args.nr_pages = LONG_MAX;
|
||||
args.for_background = 1;
|
||||
}
|
||||
bdi_alloc_queue_work(bdi, &args);
|
||||
}
|
||||
|
||||
/**
|
||||
* bdi_start_background_writeback - start background writeback
|
||||
* @bdi: the backing device to write from
|
||||
*
|
||||
* Description:
|
||||
* This does WB_SYNC_NONE background writeback. The IO is only
|
||||
* started when this function returns, we make no guarentees on
|
||||
* completion. Caller need not hold sb s_umount semaphore.
|
||||
*/
|
||||
void bdi_start_background_writeback(struct backing_dev_info *bdi)
|
||||
{
|
||||
struct wb_writeback_args args = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_pages = LONG_MAX,
|
||||
.for_background = 1,
|
||||
.range_cyclic = 1,
|
||||
};
|
||||
bdi_alloc_queue_work(bdi, &args);
|
||||
}
|
||||
|
||||
|
@ -561,48 +526,30 @@ select_queue:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void unpin_sb_for_writeback(struct super_block *sb)
|
||||
{
|
||||
up_read(&sb->s_umount);
|
||||
put_super(sb);
|
||||
}
|
||||
|
||||
enum sb_pin_state {
|
||||
SB_PINNED,
|
||||
SB_NOT_PINNED,
|
||||
SB_PIN_FAILED
|
||||
};
|
||||
|
||||
/*
|
||||
* For WB_SYNC_NONE writeback, the caller does not have the sb pinned
|
||||
* For background writeback the caller does not have the sb pinned
|
||||
* before calling writeback. So make sure that we do pin it, so it doesn't
|
||||
* go away while we are writing inodes from it.
|
||||
*/
|
||||
static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
|
||||
struct super_block *sb)
|
||||
static bool pin_sb_for_writeback(struct super_block *sb)
|
||||
{
|
||||
/*
|
||||
* Caller must already hold the ref for this
|
||||
*/
|
||||
if (wbc->sync_mode == WB_SYNC_ALL) {
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
return SB_NOT_PINNED;
|
||||
}
|
||||
spin_lock(&sb_lock);
|
||||
if (list_empty(&sb->s_instances)) {
|
||||
spin_unlock(&sb_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
|
||||
if (down_read_trylock(&sb->s_umount)) {
|
||||
if (sb->s_root) {
|
||||
spin_unlock(&sb_lock);
|
||||
return SB_PINNED;
|
||||
}
|
||||
/*
|
||||
* umounted, drop rwsem again and fall through to failure
|
||||
*/
|
||||
if (sb->s_root)
|
||||
return true;
|
||||
up_read(&sb->s_umount);
|
||||
}
|
||||
sb->s_count--;
|
||||
spin_unlock(&sb_lock);
|
||||
return SB_PIN_FAILED;
|
||||
|
||||
put_super(sb);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -681,24 +628,31 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
|
|||
struct inode *inode = list_entry(wb->b_io.prev,
|
||||
struct inode, i_list);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
enum sb_pin_state state;
|
||||
|
||||
if (wbc->sb && sb != wbc->sb) {
|
||||
/* super block given and doesn't
|
||||
match, skip this inode */
|
||||
redirty_tail(inode);
|
||||
continue;
|
||||
if (wbc->sb) {
|
||||
/*
|
||||
* We are requested to write out inodes for a specific
|
||||
* superblock. This means we already have s_umount
|
||||
* taken by the caller which also waits for us to
|
||||
* complete the writeout.
|
||||
*/
|
||||
if (sb != wbc->sb) {
|
||||
redirty_tail(inode);
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
||||
ret = writeback_sb_inodes(sb, wb, wbc);
|
||||
} else {
|
||||
if (!pin_sb_for_writeback(sb)) {
|
||||
requeue_io(inode);
|
||||
continue;
|
||||
}
|
||||
ret = writeback_sb_inodes(sb, wb, wbc);
|
||||
drop_super(sb);
|
||||
}
|
||||
state = pin_sb_for_writeback(wbc, sb);
|
||||
|
||||
if (state == SB_PIN_FAILED) {
|
||||
requeue_io(inode);
|
||||
continue;
|
||||
}
|
||||
ret = writeback_sb_inodes(sb, wb, wbc);
|
||||
|
||||
if (state == SB_PINNED)
|
||||
unpin_sb_for_writeback(sb);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
@ -911,7 +865,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
|
|||
* If this isn't a data integrity operation, just notify
|
||||
* that we have seen this work and we are now starting it.
|
||||
*/
|
||||
if (args.sync_mode == WB_SYNC_NONE)
|
||||
if (!test_bit(WS_ONSTACK, &work->state))
|
||||
wb_clear_pending(wb, work);
|
||||
|
||||
wrote += wb_writeback(wb, &args);
|
||||
|
@ -920,7 +874,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
|
|||
* This is a data integrity writeback, so only do the
|
||||
* notification when we have completed the work.
|
||||
*/
|
||||
if (args.sync_mode == WB_SYNC_ALL)
|
||||
if (test_bit(WS_ONSTACK, &work->state))
|
||||
wb_clear_pending(wb, work);
|
||||
}
|
||||
|
||||
|
@ -977,41 +931,31 @@ int bdi_writeback_task(struct bdi_writeback *wb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Schedule writeback for all backing devices. This does WB_SYNC_NONE
|
||||
* writeback, for integrity writeback see bdi_sync_writeback().
|
||||
*/
|
||||
static void bdi_writeback_all(struct super_block *sb, long nr_pages)
|
||||
{
|
||||
struct wb_writeback_args args = {
|
||||
.sb = sb,
|
||||
.nr_pages = nr_pages,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
};
|
||||
struct backing_dev_info *bdi;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
|
||||
if (!bdi_has_dirty_io(bdi))
|
||||
continue;
|
||||
|
||||
bdi_alloc_queue_work(bdi, &args);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
|
||||
* the whole world.
|
||||
*/
|
||||
void wakeup_flusher_threads(long nr_pages)
|
||||
{
|
||||
if (nr_pages == 0)
|
||||
nr_pages = global_page_state(NR_FILE_DIRTY) +
|
||||
struct backing_dev_info *bdi;
|
||||
struct wb_writeback_args args = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
};
|
||||
|
||||
if (nr_pages) {
|
||||
args.nr_pages = nr_pages;
|
||||
} else {
|
||||
args.nr_pages = global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS);
|
||||
bdi_writeback_all(NULL, nr_pages);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
|
||||
if (!bdi_has_dirty_io(bdi))
|
||||
continue;
|
||||
bdi_alloc_queue_work(bdi, &args);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
|
||||
|
@ -1218,12 +1162,17 @@ void writeback_inodes_sb(struct super_block *sb)
|
|||
{
|
||||
unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
|
||||
unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
|
||||
long nr_to_write;
|
||||
struct wb_writeback_args args = {
|
||||
.sb = sb,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
};
|
||||
|
||||
nr_to_write = nr_dirty + nr_unstable +
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
||||
args.nr_pages = nr_dirty + nr_unstable +
|
||||
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
|
||||
|
||||
bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
|
||||
bdi_queue_work_onstack(&args);
|
||||
}
|
||||
EXPORT_SYMBOL(writeback_inodes_sb);
|
||||
|
||||
|
@ -1237,7 +1186,9 @@ EXPORT_SYMBOL(writeback_inodes_sb);
|
|||
int writeback_inodes_sb_if_idle(struct super_block *sb)
|
||||
{
|
||||
if (!writeback_in_progress(sb->s_bdi)) {
|
||||
down_read(&sb->s_umount);
|
||||
writeback_inodes_sb(sb);
|
||||
up_read(&sb->s_umount);
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
|
@ -1253,7 +1204,16 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
|
|||
*/
|
||||
void sync_inodes_sb(struct super_block *sb)
|
||||
{
|
||||
bdi_sync_writeback(sb->s_bdi, sb);
|
||||
struct wb_writeback_args args = {
|
||||
.sb = sb,
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_pages = LONG_MAX,
|
||||
.range_cyclic = 0,
|
||||
};
|
||||
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
||||
bdi_queue_work_onstack(&args);
|
||||
wait_sb_inodes(sb);
|
||||
}
|
||||
EXPORT_SYMBOL(sync_inodes_sb);
|
||||
|
|
|
@ -62,7 +62,9 @@
|
|||
*/
|
||||
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
|
||||
{
|
||||
down_read(&c->vfs_sb->s_umount);
|
||||
writeback_inodes_sb(c->vfs_sb);
|
||||
up_read(&c->vfs_sb->s_umount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -105,8 +105,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
|||
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
|
||||
void bdi_unregister(struct backing_dev_info *bdi);
|
||||
int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
|
||||
long nr_pages);
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
|
||||
void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
||||
int bdi_writeback_task(struct bdi_writeback *wb);
|
||||
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
||||
void bdi_arm_supers_timer(void);
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
|
||||
|
||||
extern const char *drbd_buildtag(void);
|
||||
#define REL_VERSION "8.3.8rc2"
|
||||
#define REL_VERSION "8.3.8"
|
||||
#define API_VERSION 88
|
||||
#define PRO_VERSION_MIN 86
|
||||
#define PRO_VERSION_MAX 94
|
||||
|
|
|
@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping,
|
|||
(!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
|
||||
+ global_page_state(NR_UNSTABLE_NFS))
|
||||
> background_thresh)))
|
||||
bdi_start_writeback(bdi, NULL, 0);
|
||||
bdi_start_background_writeback(bdi);
|
||||
}
|
||||
|
||||
void set_page_dirty_balance(struct page *page, int page_mkwrite)
|
||||
|
@ -705,9 +705,8 @@ void laptop_mode_timer_fn(unsigned long data)
|
|||
* We want to write everything out, not just down to the dirty
|
||||
* threshold
|
||||
*/
|
||||
|
||||
if (bdi_has_dirty_io(&q->backing_dev_info))
|
||||
bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages);
|
||||
bdi_start_writeback(&q->backing_dev_info, nr_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче