rds: Fix RDMA message reference counting

The RDS send_xmit code was trying to get fancy with message
counting and was dropping the final reference on the RDMA messages
too early.  This resulted in memory corruption and oopsen.

The fix here is to always add a ref as the parts of the message passes
through rds_send_xmit, and always drop a ref as the parts of the message
go through completion handling.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2010-05-11 15:14:16 -07:00 коммит произвёл Andy Grover
Родитель 7e3f2952ee
Коммит c9e65383a2
2 изменённых файлов: 14 добавлений и 8 удалений

Просмотреть файл

@ -268,11 +268,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
if (send->s_queued + HZ/2 < jiffies) if (send->s_queued + HZ/2 < jiffies)
rds_ib_stats_inc(s_ib_tx_stalled); rds_ib_stats_inc(s_ib_tx_stalled);
if (&send->s_op == &rm->m_final_op) { if (send->s_op) {
/* If anyone waited for this message to get flushed out, wake if (send->s_op == rm->m_final_op) {
* them up now */ /* If anyone waited for this message to get flushed out, wake
rds_message_unmapped(rm); * them up now */
rds_message_unmapped(rm);
}
rds_message_put(rm); rds_message_put(rm);
send->s_op = NULL; send->s_op = NULL;
} }

Просмотреть файл

@ -68,7 +68,6 @@ void rds_send_reset(struct rds_connection *conn)
* transport. This isn't entirely true (it's flushed out * transport. This isn't entirely true (it's flushed out
* independently) but as the connection is down, there's * independently) but as the connection is down, there's
* no ongoing RDMA to/from that memory */ * no ongoing RDMA to/from that memory */
printk(KERN_CRIT "send reset unmapping %p\n", rm);
rds_message_unmapped(rm); rds_message_unmapped(rm);
spin_unlock_irqrestore(&conn->c_send_lock, flags); spin_unlock_irqrestore(&conn->c_send_lock, flags);
@ -234,10 +233,13 @@ restart:
/* The transport either sends the whole rdma or none of it */ /* The transport either sends the whole rdma or none of it */
if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
rds_message_addref(rm);
rm->m_final_op = &rm->rdma; rm->m_final_op = &rm->rdma;
ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
if (ret) if (ret) {
rds_message_put(rm);
break; break;
}
conn->c_xmit_rdma_sent = 1; conn->c_xmit_rdma_sent = 1;
/* The transport owns the mapped memory for now. /* The transport owns the mapped memory for now.
@ -246,10 +248,13 @@ restart:
} }
if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
rds_message_addref(rm);
rm->m_final_op = &rm->atomic; rm->m_final_op = &rm->atomic;
ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
if (ret) if (ret) {
rds_message_put(rm);
break; break;
}
conn->c_xmit_atomic_sent = 1; conn->c_xmit_atomic_sent = 1;
/* The transport owns the mapped memory for now. /* The transport owns the mapped memory for now.