RDMA is now an intrinsic part of RDS, so it's easier to just have
a single header.

Signed-off-by: Andy Grover <andy.grover@oracle.com>
This commit is contained in:
Andy Grover 2010-01-12 12:57:27 -08:00
Родитель fc445084f1
Коммит 21f79afa5f
12 изменённых файлов: 77 добавлений и 95 удалений

Просмотреть файл

@ -39,7 +39,6 @@
#include <net/sock.h>
#include "rds.h"
#include "rdma.h"
/* this is just used for stats gathering :/ */
static DEFINE_SPINLOCK(rds_sock_lock);

Просмотреть файл

@ -37,7 +37,6 @@
#include "rds.h"
#include "loop.h"
#include "rdma.h"
#define RDS_CONNECTION_HASH_BITS 12
#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)

Просмотреть файл

@ -34,7 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
#include "rdma.h"
#include "ib.h"

Просмотреть файл

@ -36,7 +36,6 @@
#include <linux/dmapool.h>
#include "rds.h"
#include "rdma.h"
#include "ib.h"
static void rds_ib_send_rdma_complete(struct rds_message *rm,

Просмотреть файл

@ -34,7 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
#include "rdma.h"
#include "iw.h"

Просмотреть файл

@ -36,7 +36,6 @@
#include <linux/dmapool.h>
#include "rds.h"
#include "rdma.h"
#include "iw.h"
static void rds_iw_send_rdma_complete(struct rds_message *rm,

Просмотреть файл

@ -34,7 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
#include "rdma.h"
static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);

Просмотреть файл

@ -35,7 +35,7 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rdma.h"
#include "rds.h"
/*
* XXX

Просмотреть файл

@ -1,85 +0,0 @@
#ifndef _RDS_RDMA_H
#define _RDS_RDMA_H
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/scatterlist.h>
#include "rds.h"
struct rds_mr {
struct rb_node r_rb_node;
atomic_t r_refcount;
u32 r_key;
/* A copy of the creation flags */
unsigned int r_use_once:1;
unsigned int r_invalidate:1;
unsigned int r_write:1;
/* This is for RDS_MR_DEAD.
* It would be nice & consistent to make this part of the above
* bit field here, but we need to use test_and_set_bit.
*/
unsigned long r_state;
struct rds_sock *r_sock; /* back pointer to the socket that owns us */
struct rds_transport *r_trans;
void *r_trans_private;
};
/* Flags for mr->r_state */
#define RDS_MR_DEAD 0
struct rds_rdma_op {
u32 r_key;
u64 r_remote_addr;
unsigned int r_write:1;
unsigned int r_fence:1;
unsigned int r_notify:1;
unsigned int r_recverr:1;
unsigned int r_mapped:1;
struct rds_notifier *r_notifier;
unsigned int r_bytes;
unsigned int r_nents;
unsigned int r_count;
struct scatterlist r_sg[0];
};
static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
{
return r_key | (((u64) offset) << 32);
}
static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
{
return cookie;
}
static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
{
return cookie >> 32;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
void rds_rdma_drop_keys(struct rds_sock *rs);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
void rds_rdma_free_op(struct rds_rdma_op *ro);
void rds_rdma_send_complete(struct rds_message *rm, int);
extern void __rds_put_mr_final(struct rds_mr *mr);
static inline void rds_mr_put(struct rds_mr *mr)
{
if (atomic_dec_and_test(&mr->r_refcount))
__rds_put_mr_final(mr);
}
#endif

Просмотреть файл

@ -206,6 +206,60 @@ struct rds_incoming {
rds_rdma_cookie_t i_rdma_cookie;
};
struct rds_mr {
struct rb_node r_rb_node;
atomic_t r_refcount;
u32 r_key;
/* A copy of the creation flags */
unsigned int r_use_once:1;
unsigned int r_invalidate:1;
unsigned int r_write:1;
/* This is for RDS_MR_DEAD.
* It would be nice & consistent to make this part of the above
* bit field here, but we need to use test_and_set_bit.
*/
unsigned long r_state;
struct rds_sock *r_sock; /* back pointer to the socket that owns us */
struct rds_transport *r_trans;
void *r_trans_private;
};
/* Flags for mr->r_state */
#define RDS_MR_DEAD 0
struct rds_rdma_op {
u32 r_key;
u64 r_remote_addr;
unsigned int r_write:1;
unsigned int r_fence:1;
unsigned int r_notify:1;
unsigned int r_recverr:1;
unsigned int r_mapped:1;
unsigned int r_active:1;
struct rds_notifier *r_notifier;
unsigned int r_bytes;
unsigned int r_nents;
unsigned int r_count;
struct scatterlist *r_sg;
};
static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
{
return r_key | (((u64) offset) << 32);
}
static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
{
return cookie;
}
static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
{
return cookie >> 32;
}
/*
* m_sock_item and m_conn_item are on lists that are serialized under
* conn->c_lock. m_sock_item has additional meaning in that once it is empty
@ -654,6 +708,28 @@ struct rds_message *rds_send_get_message(struct rds_connection *,
/* rdma.c */
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
void rds_rdma_drop_keys(struct rds_sock *rs);
int rds_rdma_extra_size(struct rds_rdma_args *args);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
void rds_rdma_free_op(struct rds_rdma_op *ro);
void rds_rdma_send_complete(struct rds_message *rm, int);
extern void __rds_put_mr_final(struct rds_mr *mr);
static inline void rds_mr_put(struct rds_mr *mr)
{
if (atomic_dec_and_test(&mr->r_refcount))
__rds_put_mr_final(mr);
}
/* stats.c */
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);

Просмотреть файл

@ -36,7 +36,6 @@
#include <linux/in.h>
#include "rds.h"
#include "rdma.h"
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
__be32 saddr)

Просмотреть файл

@ -37,7 +37,6 @@
#include <linux/list.h>
#include "rds.h"
#include "rdma.h"
/* When transmitting messages in rds_send_xmit, we need to emerge from
* time to time and briefly release the CPU. Otherwise the softlock watchdog