cifs: smbd: Don't destroy transport on RDMA disconnect

Now upper layer is handling the transport shutdown and reconnect, remove
the code that handling transport shutdown on RDMA disconnect.

Signed-off-by: Long Li <longli@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Long Li 2019-04-05 21:36:31 +00:00 коммит произвёл Steve French
Родитель 050b8c3740
Коммит e8b3bfe9bc
3 изменённых файлов: 10 добавлений и 127 удалений

Просмотреть файл

@ -312,12 +312,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
atomic_read(&server->smbd_conn->send_credits), atomic_read(&server->smbd_conn->send_credits),
atomic_read(&server->smbd_conn->receive_credits), atomic_read(&server->smbd_conn->receive_credits),
server->smbd_conn->receive_credit_target); server->smbd_conn->receive_credit_target);
seq_printf(m, "\nPending send_pending: %x send_payload_pending:" seq_printf(m, "\nPending send_pending: %x "
" %x smbd_send_pending: %x smbd_recv_pending: %x", "send_payload_pending: %x",
atomic_read(&server->smbd_conn->send_pending), atomic_read(&server->smbd_conn->send_pending),
atomic_read(&server->smbd_conn->send_payload_pending), atomic_read(&server->smbd_conn->send_payload_pending));
server->smbd_conn->smbd_send_pending,
server->smbd_conn->smbd_recv_pending);
seq_printf(m, "\nReceive buffers count_receive_queue: %x " seq_printf(m, "\nReceive buffers count_receive_queue: %x "
"count_empty_packet_queue: %x", "count_empty_packet_queue: %x",
server->smbd_conn->count_receive_queue, server->smbd_conn->count_receive_queue,

Просмотреть файл

@ -164,95 +164,6 @@ do { \
#define log_rdma_mr(level, fmt, args...) \ #define log_rdma_mr(level, fmt, args...) \
log_rdma(level, LOG_RDMA_MR, fmt, ##args) log_rdma(level, LOG_RDMA_MR, fmt, ##args)
/*
* Destroy the transport and related RDMA and memory resources
* Need to go through all the pending counters and make sure on one is using
* the transport while it is destroyed
*/
static void smbd_destroy_rdma_work(struct work_struct *work)
{
struct smbd_response *response;
struct smbd_connection *info =
container_of(work, struct smbd_connection, destroy_work);
unsigned long flags;
log_rdma_event(INFO, "destroying qp\n");
ib_drain_qp(info->id->qp);
rdma_destroy_qp(info->id);
/* Unblock all I/O waiting on the send queue */
wake_up_interruptible_all(&info->wait_send_queue);
log_rdma_event(INFO, "cancelling idle timer\n");
cancel_delayed_work_sync(&info->idle_timer_work);
log_rdma_event(INFO, "cancelling send immediate work\n");
cancel_delayed_work_sync(&info->send_immediate_work);
log_rdma_event(INFO, "wait for all send to finish\n");
wait_event(info->wait_smbd_send_pending,
info->smbd_send_pending == 0);
log_rdma_event(INFO, "wait for all recv to finish\n");
wake_up_interruptible(&info->wait_reassembly_queue);
wait_event(info->wait_smbd_recv_pending,
info->smbd_recv_pending == 0);
log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
wait_event(info->wait_send_pending,
atomic_read(&info->send_pending) == 0);
wait_event(info->wait_send_payload_pending,
atomic_read(&info->send_payload_pending) == 0);
log_rdma_event(INFO, "freeing mr list\n");
wake_up_interruptible_all(&info->wait_mr);
wait_event(info->wait_for_mr_cleanup,
atomic_read(&info->mr_used_count) == 0);
destroy_mr_list(info);
/* It's not posssible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
spin_lock_irqsave(&info->reassembly_queue_lock, flags);
response = _get_first_reassembly(info);
if (response) {
list_del(&response->list);
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
put_receive_buffer(info, response);
} else
spin_unlock_irqrestore(&info->reassembly_queue_lock, flags);
} while (response);
info->reassembly_data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
wait_event(info->wait_receive_queues,
info->count_receive_queue + info->count_empty_packet_queue
== info->receive_credit_max);
destroy_receive_buffers(info);
ib_free_cq(info->send_cq);
ib_free_cq(info->recv_cq);
ib_dealloc_pd(info->pd);
rdma_destroy_id(info->id);
/* free mempools */
mempool_destroy(info->request_mempool);
kmem_cache_destroy(info->request_cache);
mempool_destroy(info->response_mempool);
kmem_cache_destroy(info->response_cache);
info->transport_status = SMBD_DESTROYED;
wake_up_all(&info->wait_destroy);
}
static int smbd_process_disconnected(struct smbd_connection *info)
{
schedule_work(&info->destroy_work);
return 0;
}
static void smbd_disconnect_rdma_work(struct work_struct *work) static void smbd_disconnect_rdma_work(struct work_struct *work)
{ {
struct smbd_connection *info = struct smbd_connection *info =
@ -319,8 +230,7 @@ static int smbd_conn_upcall(
} }
info->transport_status = SMBD_DISCONNECTED; info->transport_status = SMBD_DISCONNECTED;
smbd_process_disconnected(info); wake_up_interruptible(&info->disconn_wait);
wake_up(&info->disconn_wait);
wake_up_interruptible(&info->wait_reassembly_queue); wake_up_interruptible(&info->wait_reassembly_queue);
wake_up_interruptible_all(&info->wait_send_queue); wake_up_interruptible_all(&info->wait_send_queue);
break; break;
@ -1501,7 +1411,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
if (info->transport_status != SMBD_DISCONNECTED) { if (info->transport_status != SMBD_DISCONNECTED) {
rdma_disconnect(server->smbd_conn->id); rdma_disconnect(server->smbd_conn->id);
log_rdma_event(INFO, "wait for transport being disconnected\n"); log_rdma_event(INFO, "wait for transport being disconnected\n");
wait_event( wait_event_interruptible(
info->disconn_wait, info->disconn_wait,
info->transport_status == SMBD_DISCONNECTED); info->transport_status == SMBD_DISCONNECTED);
} }
@ -1849,12 +1759,6 @@ static struct smbd_connection *_smbd_get_connection(
queue_delayed_work(info->workqueue, &info->idle_timer_work, queue_delayed_work(info->workqueue, &info->idle_timer_work,
info->keep_alive_interval*HZ); info->keep_alive_interval*HZ);
init_waitqueue_head(&info->wait_smbd_send_pending);
info->smbd_send_pending = 0;
init_waitqueue_head(&info->wait_smbd_recv_pending);
info->smbd_recv_pending = 0;
init_waitqueue_head(&info->wait_send_pending); init_waitqueue_head(&info->wait_send_pending);
atomic_set(&info->send_pending, 0); atomic_set(&info->send_pending, 0);
@ -1862,7 +1766,6 @@ static struct smbd_connection *_smbd_get_connection(
atomic_set(&info->send_payload_pending, 0); atomic_set(&info->send_payload_pending, 0);
INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
INIT_WORK(&info->destroy_work, smbd_destroy_rdma_work);
INIT_WORK(&info->recv_done_work, smbd_recv_done_work); INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
info->new_credits_offered = 0; info->new_credits_offered = 0;
@ -1956,11 +1859,6 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
int rc; int rc;
again: again:
if (info->transport_status != SMBD_CONNECTED) {
log_read(ERR, "disconnected\n");
return -ENODEV;
}
/* /*
* No need to hold the reassembly queue lock all the time as we are * No need to hold the reassembly queue lock all the time as we are
* the only one reading from the front of the queue. The transport * the only one reading from the front of the queue. The transport
@ -2076,6 +1974,11 @@ read_rfc1002_done:
if (rc) if (rc)
return -ENODEV; return -ENODEV;
if (info->transport_status != SMBD_CONNECTED) {
log_read(ERR, "disconnected\n");
return 0;
}
goto again; goto again;
} }
@ -2126,8 +2029,6 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
unsigned int to_read, page_offset; unsigned int to_read, page_offset;
int rc; int rc;
info->smbd_recv_pending++;
if (iov_iter_rw(&msg->msg_iter) == WRITE) { if (iov_iter_rw(&msg->msg_iter) == WRITE) {
/* It's a bug in upper layer to get there */ /* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n", cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
@ -2158,9 +2059,6 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
} }
out: out:
info->smbd_recv_pending--;
wake_up(&info->wait_smbd_recv_pending);
/* SMBDirect will read it all or nothing */ /* SMBDirect will read it all or nothing */
if (rc > 0) if (rc > 0)
msg->msg_iter.count = 0; msg->msg_iter.count = 0;
@ -2186,7 +2084,6 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
struct kvec *iov; struct kvec *iov;
int rc; int rc;
info->smbd_send_pending++;
if (info->transport_status != SMBD_CONNECTED) { if (info->transport_status != SMBD_CONNECTED) {
rc = -ENODEV; rc = -ENODEV;
goto done; goto done;
@ -2342,9 +2239,6 @@ done:
wait_event(info->wait_send_payload_pending, wait_event(info->wait_send_payload_pending,
atomic_read(&info->send_payload_pending) == 0); atomic_read(&info->send_payload_pending) == 0);
info->smbd_send_pending--;
wake_up(&info->wait_smbd_send_pending);
return rc; return rc;
} }

Просмотреть файл

@ -70,13 +70,11 @@ struct smbd_connection {
int ri_rc; int ri_rc;
struct completion ri_done; struct completion ri_done;
wait_queue_head_t conn_wait; wait_queue_head_t conn_wait;
wait_queue_head_t wait_destroy;
wait_queue_head_t disconn_wait; wait_queue_head_t disconn_wait;
struct completion negotiate_completion; struct completion negotiate_completion;
bool negotiate_done; bool negotiate_done;
struct work_struct destroy_work;
struct work_struct disconnect_work; struct work_struct disconnect_work;
struct work_struct recv_done_work; struct work_struct recv_done_work;
struct work_struct post_send_credits_work; struct work_struct post_send_credits_work;
@ -124,13 +122,6 @@ struct smbd_connection {
wait_queue_head_t wait_for_mr_cleanup; wait_queue_head_t wait_for_mr_cleanup;
/* Activity accoutning */ /* Activity accoutning */
/* Pending reqeusts issued from upper layer */
int smbd_send_pending;
wait_queue_head_t wait_smbd_send_pending;
int smbd_recv_pending;
wait_queue_head_t wait_smbd_recv_pending;
atomic_t send_pending; atomic_t send_pending;
wait_queue_head_t wait_send_pending; wait_queue_head_t wait_send_pending;
atomic_t send_payload_pending; atomic_t send_payload_pending;