drbd: move the drbd_work_queue from drbd_socket to drbd_connection
cherry-picked and adapted from drbd 9 devel branch In 8.4, we don't distinguish between "resource work" and "connection work" yet, we have one worker for both, as we still have only one connection. We only ever used the "data.work", no need to keep the "meta.work" around. Move tconn->data.work to tconn->sender_work. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
Родитель
8c0785a5c9
Коммит
d5b27b01f1
|
@ -485,7 +485,7 @@ static int al_write_transaction(struct drbd_conf *mdev)
|
|||
init_completion(&al_work.event);
|
||||
al_work.w.cb = w_al_write_transaction;
|
||||
al_work.w.mdev = mdev;
|
||||
drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w);
|
||||
drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
|
||||
wait_for_completion(&al_work.event);
|
||||
|
||||
return al_work.err;
|
||||
|
@ -645,7 +645,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
|
|||
udw->enr = ext->lce.lc_number;
|
||||
udw->w.cb = w_update_odbm;
|
||||
udw->w.mdev = mdev;
|
||||
drbd_queue_work_front(&mdev->tconn->data.work, &udw->w);
|
||||
drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
|
||||
} else {
|
||||
dev_warn(DEV, "Could not kmalloc an udw\n");
|
||||
}
|
||||
|
|
|
@ -740,7 +740,6 @@ struct drbd_work_queue {
|
|||
};
|
||||
|
||||
struct drbd_socket {
|
||||
struct drbd_work_queue work;
|
||||
struct mutex mutex;
|
||||
struct socket *socket;
|
||||
/* this way we get our
|
||||
|
@ -871,6 +870,7 @@ struct drbd_tconn { /* is a resource from the config file */
|
|||
struct drbd_thread worker;
|
||||
struct drbd_thread asender;
|
||||
cpumask_var_t cpu_mask;
|
||||
struct drbd_work_queue sender_work;
|
||||
};
|
||||
|
||||
struct drbd_conf {
|
||||
|
@ -2228,7 +2228,7 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
|
|||
wake_up(&mdev->misc_wait);
|
||||
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
|
||||
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
|
||||
drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -379,7 +379,7 @@ void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
|
|||
set_bit(CREATE_BARRIER, &tconn->flags);
|
||||
}
|
||||
|
||||
drbd_queue_work(&tconn->data.work, &b->w);
|
||||
drbd_queue_work(&tconn->sender_work, &b->w);
|
||||
}
|
||||
pn = &b->next;
|
||||
} else {
|
||||
|
@ -2173,8 +2173,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
|
|||
D_ASSERT(list_empty(&mdev->read_ee));
|
||||
D_ASSERT(list_empty(&mdev->net_ee));
|
||||
D_ASSERT(list_empty(&mdev->resync_reads));
|
||||
D_ASSERT(list_empty(&mdev->tconn->data.work.q));
|
||||
D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
|
||||
D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
|
||||
D_ASSERT(list_empty(&mdev->resync_work.list));
|
||||
D_ASSERT(list_empty(&mdev->unplug_work.list));
|
||||
D_ASSERT(list_empty(&mdev->go_diskless.list));
|
||||
|
@ -2349,7 +2348,6 @@ void drbd_minor_destroy(struct kref *kref)
|
|||
|
||||
/* paranoia asserts */
|
||||
D_ASSERT(mdev->open_cnt == 0);
|
||||
D_ASSERT(list_empty(&mdev->tconn->data.work.q));
|
||||
/* end paranoia asserts */
|
||||
|
||||
/* cleanup stuff that may have been allocated during
|
||||
|
@ -2700,10 +2698,8 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
|
|||
init_waitqueue_head(&tconn->ping_wait);
|
||||
idr_init(&tconn->volumes);
|
||||
|
||||
drbd_init_workqueue(&tconn->data.work);
|
||||
drbd_init_workqueue(&tconn->sender_work);
|
||||
mutex_init(&tconn->data.mutex);
|
||||
|
||||
drbd_init_workqueue(&tconn->meta.work);
|
||||
mutex_init(&tconn->meta.mutex);
|
||||
|
||||
drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
|
||||
|
@ -3356,7 +3352,7 @@ void drbd_go_diskless(struct drbd_conf *mdev)
|
|||
{
|
||||
D_ASSERT(mdev->state.disk == D_FAILED);
|
||||
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
|
||||
drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3394,7 +3390,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
|
|||
set_bit(BITMAP_IO, &mdev->flags);
|
||||
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
|
||||
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
|
||||
drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
|
||||
}
|
||||
spin_unlock_irq(&mdev->tconn->req_lock);
|
||||
}
|
||||
|
@ -3452,7 +3448,7 @@ static void md_sync_timer_fn(unsigned long data)
|
|||
{
|
||||
struct drbd_conf *mdev = (struct drbd_conf *) data;
|
||||
|
||||
drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
|
||||
drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
|
||||
}
|
||||
|
||||
static int w_md_sync(struct drbd_work *w, int unused)
|
||||
|
|
|
@ -4413,7 +4413,7 @@ void conn_flush_workqueue(struct drbd_tconn *tconn)
|
|||
barr.w.cb = w_prev_work_done;
|
||||
barr.w.tconn = tconn;
|
||||
init_completion(&barr.done);
|
||||
drbd_queue_work(&tconn->data.work, &barr.w);
|
||||
drbd_queue_work(&tconn->sender_work, &barr.w);
|
||||
wait_for_completion(&barr.done);
|
||||
}
|
||||
|
||||
|
@ -5147,7 +5147,7 @@ static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
|
|||
if (w) {
|
||||
w->cb = w_ov_finished;
|
||||
w->mdev = mdev;
|
||||
drbd_queue_work_front(&mdev->tconn->data.work, w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, w);
|
||||
} else {
|
||||
dev_err(DEV, "kmalloc(w) failed.");
|
||||
ov_out_of_sync_print(mdev);
|
||||
|
|
|
@ -170,7 +170,7 @@ static void queue_barrier(struct drbd_conf *mdev)
|
|||
* dec_ap_pending will be done in got_BarrierAck
|
||||
* or (on connection loss) in tl_clear. */
|
||||
inc_ap_pending(mdev);
|
||||
drbd_queue_work(&tconn->data.work, &b->w);
|
||||
drbd_queue_work(&tconn->sender_work, &b->w);
|
||||
set_bit(CREATE_BARRIER, &tconn->flags);
|
||||
}
|
||||
|
||||
|
@ -483,7 +483,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
|
||||
req->rq_state |= RQ_NET_QUEUED;
|
||||
req->w.cb = w_send_read_req;
|
||||
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
|
||||
break;
|
||||
|
||||
case QUEUE_FOR_NET_WRITE:
|
||||
|
@ -527,7 +527,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
D_ASSERT(req->rq_state & RQ_NET_PENDING);
|
||||
req->rq_state |= RQ_NET_QUEUED;
|
||||
req->w.cb = w_send_dblock;
|
||||
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
|
||||
|
||||
/* close the epoch, in case it outgrew the limit */
|
||||
rcu_read_lock();
|
||||
|
@ -542,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
case QUEUE_FOR_SEND_OOS:
|
||||
req->rq_state |= RQ_NET_QUEUED;
|
||||
req->w.cb = w_send_out_of_sync;
|
||||
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
|
||||
break;
|
||||
|
||||
case READ_RETRY_REMOTE_CANCELED:
|
||||
|
@ -682,7 +682,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
|
||||
get_ldev(mdev);
|
||||
req->w.cb = w_restart_disk_io;
|
||||
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
|
||||
break;
|
||||
|
||||
case RESEND:
|
||||
|
@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
During connection handshake, we ensure that the peer was not rebooted. */
|
||||
if (!(req->rq_state & RQ_NET_OK)) {
|
||||
if (req->w.cb) {
|
||||
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
|
||||
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -1090,7 +1090,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
|
|||
ascw->w.cb = w_after_state_ch;
|
||||
ascw->w.mdev = mdev;
|
||||
ascw->done = done;
|
||||
drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
|
||||
} else {
|
||||
dev_err(DEV, "Could not kmalloc an ascw\n");
|
||||
}
|
||||
|
@ -1764,7 +1764,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
|
|||
acscw->w.cb = w_after_conn_state_ch;
|
||||
kref_get(&tconn->kref);
|
||||
acscw->w.tconn = tconn;
|
||||
drbd_queue_work(&tconn->data.work, &acscw->w);
|
||||
drbd_queue_work(&tconn->sender_work, &acscw->w);
|
||||
} else {
|
||||
conn_err(tconn, "Could not kmalloc an acscw\n");
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(lo
|
|||
__drbd_chk_io_error(mdev, false);
|
||||
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
|
||||
|
||||
drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
|
@ -401,7 +401,7 @@ void resync_timer_fn(unsigned long data)
|
|||
struct drbd_conf *mdev = (struct drbd_conf *) data;
|
||||
|
||||
if (list_empty(&mdev->resync_work.list))
|
||||
drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
|
||||
}
|
||||
|
||||
static void fifo_set(struct fifo_buffer *fb, int value)
|
||||
|
@ -783,7 +783,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
|
|||
if (w) {
|
||||
w->cb = w_resync_finished;
|
||||
w->mdev = mdev;
|
||||
drbd_queue_work(&mdev->tconn->data.work, w);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, w);
|
||||
return 1;
|
||||
}
|
||||
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
|
||||
|
@ -1484,7 +1484,7 @@ void start_resync_timer_fn(unsigned long data)
|
|||
{
|
||||
struct drbd_conf *mdev = (struct drbd_conf *) data;
|
||||
|
||||
drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work);
|
||||
drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
|
||||
}
|
||||
|
||||
int w_start_resync(struct drbd_work *w, int cancel)
|
||||
|
@ -1706,7 +1706,7 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
/* as long as we use drbd_queue_work_front(),
|
||||
* we may only dequeue single work items here, not batches. */
|
||||
if (list_empty(&work_list))
|
||||
dequeue_work_item(&tconn->data.work, &work_list);
|
||||
dequeue_work_item(&tconn->sender_work, &work_list);
|
||||
|
||||
/* Still nothing to do? Poke TCP, just in case,
|
||||
* then wait for new work (or signal). */
|
||||
|
@ -1721,8 +1721,8 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
drbd_tcp_uncork(tconn->data.socket);
|
||||
mutex_unlock(&tconn->data.mutex);
|
||||
|
||||
wait_event_interruptible(tconn->data.work.q_wait,
|
||||
dequeue_work_item(&tconn->data.work, &work_list));
|
||||
wait_event_interruptible(tconn->sender_work.q_wait,
|
||||
dequeue_work_item(&tconn->sender_work, &work_list));
|
||||
|
||||
mutex_lock(&tconn->data.mutex);
|
||||
if (tconn->data.socket && cork)
|
||||
|
@ -1758,7 +1758,7 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
list_del_init(&w->list);
|
||||
w->cb(w, 1);
|
||||
}
|
||||
dequeue_work_batch(&tconn->data.work, &work_list);
|
||||
dequeue_work_batch(&tconn->sender_work, &work_list);
|
||||
} while (!list_empty(&work_list));
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
Загрузка…
Ссылка в новой задаче