[PATCH] IB: Update MAD client API

Automatically allocate a MR when registering a MAD agent.
MAD clients are modified to use this updated API.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Hal Rosenstock 2005-07-27 11:45:22 -07:00 коммит произвёл Linus Torvalds
Родитель c183a4c335
Коммит b82cab6b33
5 изменённых файлов: 24 добавлений и 40 удалений

Просмотреть файл

@ -134,7 +134,7 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent,
sizeof(mad_priv->mad), sizeof(mad_priv->mad),
DMA_TO_DEVICE); DMA_TO_DEVICE);
gather_list.length = sizeof(mad_priv->mad); gather_list.length = sizeof(mad_priv->mad);
gather_list.lkey = (*port_priv->mr).lkey; gather_list.lkey = mad_agent->mr->lkey;
send_wr.next = NULL; send_wr.next = NULL;
send_wr.opcode = IB_WR_SEND; send_wr.opcode = IB_WR_SEND;
@ -322,22 +322,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto error3; goto error3;
} }
port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(port_priv->mr)) {
printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
ret = PTR_ERR(port_priv->mr);
goto error4;
}
spin_lock_irqsave(&ib_agent_port_list_lock, flags); spin_lock_irqsave(&ib_agent_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_agent_port_list); list_add_tail(&port_priv->port_list, &ib_agent_port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
return 0; return 0;
error4:
ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
error3: error3:
ib_unregister_mad_agent(port_priv->smp_agent); ib_unregister_mad_agent(port_priv->smp_agent);
error2: error2:
@ -361,8 +351,6 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
list_del(&port_priv->port_list); list_del(&port_priv->port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_dereg_mr(port_priv->mr);
ib_unregister_mad_agent(port_priv->perf_mgmt_agent); ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
ib_unregister_mad_agent(port_priv->smp_agent); ib_unregister_mad_agent(port_priv->smp_agent);
kfree(port_priv); kfree(port_priv);

Просмотреть файл

@ -33,7 +33,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
* *
* $Id: agent_priv.h 1389 2004-12-27 22:56:47Z roland $ * $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
*/ */
#ifndef __IB_AGENT_PRIV_H__ #ifndef __IB_AGENT_PRIV_H__
@ -57,7 +57,6 @@ struct ib_agent_port_private {
int port_num; int port_num;
struct ib_mad_agent *smp_agent; /* SM class */ struct ib_mad_agent *smp_agent; /* SM class */
struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */ struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
struct ib_mr *mr;
}; };
#endif /* __IB_AGENT_PRIV_H__ */ #endif /* __IB_AGENT_PRIV_H__ */

Просмотреть файл

@ -261,19 +261,26 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto error1; goto error1;
} }
memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(mad_agent_priv->agent.mr)) {
ret = ERR_PTR(-ENOMEM);
goto error2;
}
if (mad_reg_req) { if (mad_reg_req) {
reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
if (!reg_req) { if (!reg_req) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto error2; goto error3;
} }
/* Make a copy of the MAD registration request */ /* Make a copy of the MAD registration request */
memcpy(reg_req, mad_reg_req, sizeof *reg_req); memcpy(reg_req, mad_reg_req, sizeof *reg_req);
} }
/* Now, fill in the various structures */ /* Now, fill in the various structures */
memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
mad_agent_priv->reg_req = reg_req; mad_agent_priv->reg_req = reg_req;
mad_agent_priv->rmpp_version = rmpp_version; mad_agent_priv->rmpp_version = rmpp_version;
@ -301,7 +308,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (method) { if (method) {
if (method_in_use(&method, if (method_in_use(&method,
mad_reg_req)) mad_reg_req))
goto error3; goto error4;
} }
} }
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
@ -317,14 +324,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (is_vendor_method_in_use( if (is_vendor_method_in_use(
vendor_class, vendor_class,
mad_reg_req)) mad_reg_req))
goto error3; goto error4;
} }
} }
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
} }
if (ret2) { if (ret2) {
ret = ERR_PTR(ret2); ret = ERR_PTR(ret2);
goto error3; goto error4;
} }
} }
@ -346,11 +353,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
return &mad_agent_priv->agent; return &mad_agent_priv->agent;
error3: error4:
spin_unlock_irqrestore(&port_priv->reg_lock, flags); spin_unlock_irqrestore(&port_priv->reg_lock, flags);
kfree(reg_req); kfree(reg_req);
error2: error3:
kfree(mad_agent_priv); kfree(mad_agent_priv);
error2:
ib_dereg_mr(mad_agent_priv->agent.mr);
error1: error1:
return ret; return ret;
} }
@ -487,18 +496,15 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
* MADs, preventing us from queuing additional work * MADs, preventing us from queuing additional work
*/ */
cancel_mads(mad_agent_priv); cancel_mads(mad_agent_priv);
port_priv = mad_agent_priv->qp_info->port_priv; port_priv = mad_agent_priv->qp_info->port_priv;
cancel_delayed_work(&mad_agent_priv->timed_work); cancel_delayed_work(&mad_agent_priv->timed_work);
flush_workqueue(port_priv->wq);
spin_lock_irqsave(&port_priv->reg_lock, flags); spin_lock_irqsave(&port_priv->reg_lock, flags);
remove_mad_reg_req(mad_agent_priv); remove_mad_reg_req(mad_agent_priv);
list_del(&mad_agent_priv->agent_list); list_del(&mad_agent_priv->agent_list);
spin_unlock_irqrestore(&port_priv->reg_lock, flags); spin_unlock_irqrestore(&port_priv->reg_lock, flags);
/* XXX: Cleanup pending RMPP receives for this agent */ flush_workqueue(port_priv->wq);
atomic_dec(&mad_agent_priv->refcount); atomic_dec(&mad_agent_priv->refcount);
wait_event(mad_agent_priv->wait, wait_event(mad_agent_priv->wait,
@ -506,6 +512,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
if (mad_agent_priv->reg_req) if (mad_agent_priv->reg_req)
kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv->reg_req);
ib_dereg_mr(mad_agent_priv->agent.mr);
kfree(mad_agent_priv); kfree(mad_agent_priv);
} }
@ -750,7 +757,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
list_add_tail(&local->completion_list, &mad_agent_priv->local_list); list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
queue_work(mad_agent_priv->qp_info->port_priv->wq, queue_work(mad_agent_priv->qp_info->port_priv->wq,
&mad_agent_priv->local_work); &mad_agent_priv->local_work);
ret = 1; ret = 1;
out: out:
return ret; return ret;

Просмотреть файл

@ -77,7 +77,6 @@ struct ib_sa_sm_ah {
struct ib_sa_port { struct ib_sa_port {
struct ib_mad_agent *agent; struct ib_mad_agent *agent;
struct ib_mr *mr;
struct ib_sa_sm_ah *sm_ah; struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task; struct work_struct update_task;
spinlock_t ah_lock; spinlock_t ah_lock;
@ -492,7 +491,7 @@ retry:
sizeof (struct ib_sa_mad), sizeof (struct ib_sa_mad),
DMA_TO_DEVICE); DMA_TO_DEVICE);
gather_list.length = sizeof (struct ib_sa_mad); gather_list.length = sizeof (struct ib_sa_mad);
gather_list.lkey = port->mr->lkey; gather_list.lkey = port->agent->mr->lkey;
pci_unmap_addr_set(query, mapping, gather_list.addr); pci_unmap_addr_set(query, mapping, gather_list.addr);
ret = ib_post_send_mad(port->agent, &wr, &bad_wr); ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
@ -780,7 +779,6 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->end_port = e; sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) { for (i = 0; i <= e - s; ++i) {
sa_dev->port[i].mr = NULL;
sa_dev->port[i].sm_ah = NULL; sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s; sa_dev->port[i].port_num = i + s;
spin_lock_init(&sa_dev->port[i].ah_lock); spin_lock_init(&sa_dev->port[i].ah_lock);
@ -792,13 +790,6 @@ static void ib_sa_add_one(struct ib_device *device)
if (IS_ERR(sa_dev->port[i].agent)) if (IS_ERR(sa_dev->port[i].agent))
goto err; goto err;
sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(sa_dev->port[i].mr)) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
goto err;
}
INIT_WORK(&sa_dev->port[i].update_task, INIT_WORK(&sa_dev->port[i].update_task,
update_sm_ah, &sa_dev->port[i]); update_sm_ah, &sa_dev->port[i]);
} }
@ -822,10 +813,8 @@ static void ib_sa_add_one(struct ib_device *device)
return; return;
err: err:
while (--i >= 0) { while (--i >= 0)
ib_dereg_mr(sa_dev->port[i].mr);
ib_unregister_mad_agent(sa_dev->port[i].agent); ib_unregister_mad_agent(sa_dev->port[i].agent);
}
kfree(sa_dev); kfree(sa_dev);

Просмотреть файл

@ -180,6 +180,7 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_agent { struct ib_mad_agent {
struct ib_device *device; struct ib_device *device;
struct ib_qp *qp; struct ib_qp *qp;
struct ib_mr *mr;
ib_mad_recv_handler recv_handler; ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler; ib_mad_send_handler send_handler;
ib_mad_snoop_handler snoop_handler; ib_mad_snoop_handler snoop_handler;