Fixups for -rc4 kernel
- Fix for dma_ops change in this kernel, resolving the s390, powerpc, and IOMMU operation - A few other oops fixes - The rest are all minor fixes -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJY1eNNAAoJELgmozMOVy/dS/MQAMK8z2j35Udiop19LQIoQM0B 5BAmy7Khrz8F+rB5SaFnQfWGT4mg9qOwEeI6eWoKmrByrrIjErZOKLZGgVecxkFx agzTPsApD6O3U/h8CYmoED3Hgg+DibQWcdYpVHeBkcYd6ljDYPrh9F28oDVmYVFI z3mkBnTNS3wzGdTFAdhW18H4shUeUOQlweWuCNA4LdPjlstITl0WBo6TIe2WfilD FEHIH/mkoZwmKAU1GCmLWo0mw9s9ng2YtKN4wKuDWm+BOSSYRr3z/ClGEAjk0aat 8JouAYZopcnATm5vvjMGeDPnbpgByydriQ7WinxzqFF5A7dg3CrHaN3DhhjowdXt ufJrZAcc1VZFhUOABAwmueUlGpLxF/oJD8FcjdvpRgHt/SY5njlQw/yhrqL/7Eew zvfFFw1GxLtyPXxB8olWHpaw3S9l7N5MoezlZlrZJvpc0416YGOUsljymyS4p7w1 Mpfe0kmbn/Whp0Vt7uBJ5WJ1NguGOi0F9hJFQ99Jmf7mfdplCIoRjSnQzWGfkmOd NVLHaYnPjmX3lO8RYfKabKTQ+X2D/uBvH0PDn/kc9J9y1jMYwHIq8GHNb1q+QPLq Wvlfsvo33sMbo2G2JJu31QOigMlBLy+P88m+j9uHs/nPHLYWzsvPMUhikwtO9a7j txOeyoDW+oAPTaIq459u =6axB -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma Pull rdma fixes from Doug Ledford: "This has been a slow -rc cycle for the RDMA subsystem. We really haven't had a lot of rc fixes come in. This pull request is the first of this entire rc cycle and it has all of the suitable fixes so far and it's still only about 20 patches. The fix for the minor breakage cause by the dma mapping patchset is in here, as well as a couple other potential oops fixes, but the rest is more minor. Summary: - fix for dma_ops change in this kernel, resolving the s390, powerpc, and IOMMU operation - a few other oops fixes - the rest are all minor fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: IB/qib: fix false-postive maybe-uninitialized warning RDMA/iser: Fix possible mr leak on device removal event IB/device: Convert ib-comp-wq to be CPU-bound IB/cq: Don't process more than the given budget IB/rxe: increment msn only when completing a request uapi: fix rdma/mlx5-abi.h userspace compilation errors IB/core: Restore I/O MMU, s390 and powerpc support IB/rxe: Update documentation link RDMA/ocrdma: fix a type issue in ocrdma_put_pd_num() IB/rxe: double free on error RDMA/vmw_pvrdma: Activate device on ethernet link up RDMA/vmw_pvrdma: Dont hardcode QP header page RDMA/vmw_pvrdma: Cleanup unused variables infiniband: Fix alignment of mmap cookies to support VIPT caching IB/core: Protect against self-requeue of a cq work item i40iw: Receive netdev events post INET_NOTIFIER state
This commit is contained in:
Коммит
4a01fa5e75
|
@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
|
|||
{
|
||||
int i, n, completed = 0;
|
||||
|
||||
while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) {
|
||||
/*
|
||||
* budget might be (-1) if the caller does not
|
||||
* want to bound this call, thus we need unsigned
|
||||
* minimum here.
|
||||
*/
|
||||
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
|
||||
budget - completed), cq->wc)) > 0) {
|
||||
for (i = 0; i < n; i++) {
|
||||
struct ib_wc *wc = &cq->wc[i];
|
||||
|
||||
|
@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq)
|
|||
irq_poll_disable(&cq->iop);
|
||||
break;
|
||||
case IB_POLL_WORKQUEUE:
|
||||
flush_work(&cq->work);
|
||||
cancel_work_sync(&cq->work);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
|
|
|
@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
|
|||
struct device *parent = device->dev.parent;
|
||||
|
||||
WARN_ON_ONCE(!parent);
|
||||
if (!device->dev.dma_ops)
|
||||
device->dev.dma_ops = parent->dma_ops;
|
||||
if (!device->dev.dma_mask)
|
||||
device->dev.dma_mask = parent->dma_mask;
|
||||
if (!device->dev.coherent_dma_mask)
|
||||
device->dev.coherent_dma_mask = parent->coherent_dma_mask;
|
||||
WARN_ON_ONCE(device->dma_device);
|
||||
if (device->dev.dma_ops) {
|
||||
/*
|
||||
* The caller provided custom DMA operations. Copy the
|
||||
* DMA-related fields that are used by e.g. dma_alloc_coherent()
|
||||
* into device->dev.
|
||||
*/
|
||||
device->dma_device = &device->dev;
|
||||
if (!device->dev.dma_mask)
|
||||
device->dev.dma_mask = parent->dma_mask;
|
||||
if (!device->dev.coherent_dma_mask)
|
||||
device->dev.coherent_dma_mask =
|
||||
parent->coherent_dma_mask;
|
||||
} else {
|
||||
/*
|
||||
* The caller did not provide custom DMA operations. Use the
|
||||
* DMA mapping operations of the parent device.
|
||||
*/
|
||||
device->dma_device = parent;
|
||||
}
|
||||
|
||||
mutex_lock(&device_mutex);
|
||||
|
||||
|
@ -1015,8 +1029,7 @@ static int __init ib_core_init(void)
|
|||
return -ENOMEM;
|
||||
|
||||
ib_comp_wq = alloc_workqueue("ib-comp-wq",
|
||||
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
|
||||
WQ_UNBOUND_MAX_ACTIVE);
|
||||
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
||||
if (!ib_comp_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
|
|
@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
|
|||
return NOTIFY_DONE;
|
||||
|
||||
iwdev = &hdl->device;
|
||||
if (iwdev->init_state < INET_NOTIFIER)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
netdev = iwdev->ldev->netdev;
|
||||
upper_dev = netdev_master_upper_dev_get(netdev);
|
||||
if (netdev != event_netdev)
|
||||
|
@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
|
|||
return NOTIFY_DONE;
|
||||
|
||||
iwdev = &hdl->device;
|
||||
if (iwdev->init_state < INET_NOTIFIER)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
netdev = iwdev->ldev->netdev;
|
||||
if (netdev != event_netdev)
|
||||
return NOTIFY_DONE;
|
||||
|
@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
|
|||
if (!iwhdl)
|
||||
return NOTIFY_DONE;
|
||||
iwdev = &iwhdl->device;
|
||||
if (iwdev->init_state < INET_NOTIFIER)
|
||||
return NOTIFY_DONE;
|
||||
p = (__be32 *)neigh->primary_key;
|
||||
i40iw_copy_ip_ntohl(local_ipaddr, p);
|
||||
if (neigh->nud_state & NUD_VALID) {
|
||||
|
|
|
@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
|
||||
static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
|
||||
bool dpp_pool)
|
||||
{
|
||||
int status;
|
||||
|
|
|
@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
|
|||
unsigned long flags;
|
||||
|
||||
while (wait) {
|
||||
unsigned long shadow;
|
||||
unsigned long shadow = 0;
|
||||
int cstart, previ = -1;
|
||||
|
||||
/*
|
||||
|
|
|
@ -69,6 +69,9 @@
|
|||
*/
|
||||
#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
|
||||
|
||||
#define PVRDMA_NUM_RING_PAGES 4
|
||||
#define PVRDMA_QP_NUM_HEADER_PAGES 1
|
||||
|
||||
struct pvrdma_dev;
|
||||
|
||||
struct pvrdma_page_dir {
|
||||
|
|
|
@ -132,7 +132,7 @@ enum pvrdma_pci_resource {
|
|||
|
||||
enum pvrdma_device_ctl {
|
||||
PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
|
||||
PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */
|
||||
PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
|
||||
PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
|
||||
};
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#include "pvrdma.h"
|
||||
|
||||
#define DRV_NAME "vmw_pvrdma"
|
||||
#define DRV_VERSION "1.0.0.0-k"
|
||||
#define DRV_VERSION "1.0.1.0-k"
|
||||
|
||||
static DEFINE_MUTEX(pvrdma_device_list_lock);
|
||||
static LIST_HEAD(pvrdma_device_list);
|
||||
|
@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
|
|||
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
|
||||
break;
|
||||
case NETDEV_UP:
|
||||
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
|
||||
pvrdma_write_reg(dev, PVRDMA_REG_CTL,
|
||||
PVRDMA_DEVICE_CTL_UNQUIESCE);
|
||||
|
||||
mb();
|
||||
|
||||
if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
|
||||
dev_err(&dev->pdev->dev,
|
||||
"failed to activate device during link up\n");
|
||||
else
|
||||
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
|
||||
|
@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
|||
dev->dsr->resp_slot_dma = (u64)slot_dma;
|
||||
|
||||
/* Async event ring */
|
||||
dev->dsr->async_ring_pages.num_pages = 4;
|
||||
dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
|
||||
ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
|
||||
dev->dsr->async_ring_pages.num_pages, true);
|
||||
if (ret)
|
||||
|
@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
|||
dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
|
||||
|
||||
/* CQ notification ring */
|
||||
dev->dsr->cq_ring_pages.num_pages = 4;
|
||||
dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
|
||||
ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
|
||||
dev->dsr->cq_ring_pages.num_pages, true);
|
||||
if (ret)
|
||||
|
|
|
@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
|
|||
sizeof(struct pvrdma_sge) *
|
||||
qp->sq.max_sg);
|
||||
/* Note: one extra page for the header. */
|
||||
qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
|
||||
PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
|
||||
(qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
|
||||
PAGE_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
|||
qp->npages = qp->npages_send + qp->npages_recv;
|
||||
|
||||
/* Skip header page. */
|
||||
qp->sq.offset = PAGE_SIZE;
|
||||
qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
|
||||
|
||||
/* Recv queue pages are after send pages. */
|
||||
qp->rq.offset = qp->npages_send * PAGE_SIZE;
|
||||
|
@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
|||
cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
|
||||
cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
|
||||
cmd->total_chunks = qp->npages;
|
||||
cmd->send_chunks = qp->npages_send - 1;
|
||||
cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
|
||||
cmd->pdir_dma = qp->pdir.dir_dma;
|
||||
|
||||
dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
|
||||
|
@ -554,13 +555,13 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
|
||||
static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
|
||||
{
|
||||
return pvrdma_page_dir_get_ptr(&qp->pdir,
|
||||
qp->sq.offset + n * qp->sq.wqe_size);
|
||||
}
|
||||
|
||||
static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
|
||||
static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
|
||||
{
|
||||
return pvrdma_page_dir_get_ptr(&qp->pdir,
|
||||
qp->rq.offset + n * qp->rq.wqe_size);
|
||||
|
@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
unsigned long flags;
|
||||
struct pvrdma_sq_wqe_hdr *wqe_hdr;
|
||||
struct pvrdma_sge *sge;
|
||||
int i, index;
|
||||
int nreq;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
/*
|
||||
* In states lower than RTS, we can fail immediately. In other states,
|
||||
|
@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
|
||||
spin_lock_irqsave(&qp->sq.lock, flags);
|
||||
|
||||
index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
|
||||
for (nreq = 0; wr; nreq++, wr = wr->next) {
|
||||
unsigned int tail;
|
||||
while (wr) {
|
||||
unsigned int tail = 0;
|
||||
|
||||
if (unlikely(!pvrdma_idx_ring_has_space(
|
||||
qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
|
||||
|
@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
}
|
||||
}
|
||||
|
||||
wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
|
||||
wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
|
||||
memset(wqe_hdr, 0, sizeof(*wqe_hdr));
|
||||
wqe_hdr->wr_id = wr->wr_id;
|
||||
wqe_hdr->num_sge = wr->num_sge;
|
||||
|
@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
/* Make sure wqe is written before index update */
|
||||
smp_wmb();
|
||||
|
||||
index++;
|
||||
if (unlikely(index >= qp->sq.wqe_cnt))
|
||||
index = 0;
|
||||
/* Update shared sq ring */
|
||||
pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
|
||||
qp->sq.wqe_cnt);
|
||||
|
||||
wr = wr->next;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
struct pvrdma_qp *qp = to_vqp(ibqp);
|
||||
struct pvrdma_rq_wqe_hdr *wqe_hdr;
|
||||
struct pvrdma_sge *sge;
|
||||
int index, nreq;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
|
@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
|
||||
spin_lock_irqsave(&qp->rq.lock, flags);
|
||||
|
||||
index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
|
||||
for (nreq = 0; wr; nreq++, wr = wr->next) {
|
||||
unsigned int tail;
|
||||
while (wr) {
|
||||
unsigned int tail = 0;
|
||||
|
||||
if (unlikely(wr->num_sge > qp->rq.max_sg ||
|
||||
wr->num_sge < 0)) {
|
||||
|
@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
goto out;
|
||||
}
|
||||
|
||||
wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
|
||||
wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
|
||||
wqe_hdr->wr_id = wr->wr_id;
|
||||
wqe_hdr->num_sge = wr->num_sge;
|
||||
wqe_hdr->total_len = 0;
|
||||
|
@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
/* Make sure wqe is written before index update */
|
||||
smp_wmb();
|
||||
|
||||
index++;
|
||||
if (unlikely(index >= qp->rq.wqe_cnt))
|
||||
index = 0;
|
||||
/* Update shared rq ring */
|
||||
pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
|
||||
qp->rq.wqe_cnt);
|
||||
|
||||
wr = wr->next;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qp->rq.lock, flags);
|
||||
|
|
|
@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
|
|||
|
||||
spin_lock_irq(&rdi->mmap_offset_lock);
|
||||
if (rdi->mmap_offset == 0)
|
||||
rdi->mmap_offset = PAGE_SIZE;
|
||||
rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
|
||||
ip->offset = rdi->mmap_offset;
|
||||
rdi->mmap_offset += size;
|
||||
rdi->mmap_offset += ALIGN(size, SHMLBA);
|
||||
spin_unlock_irq(&rdi->mmap_offset_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ip->pending_mmaps);
|
||||
|
|
|
@ -22,4 +22,4 @@ config RDMA_RXE
|
|||
To configure and work with soft-RoCE driver please use the
|
||||
following wiki page under "configure Soft-RoCE (RXE)" section:
|
||||
|
||||
https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
|
||||
https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
|
||||
|
|
|
@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
|
|||
spin_lock_bh(&rxe->mmap_offset_lock);
|
||||
|
||||
if (rxe->mmap_offset == 0)
|
||||
rxe->mmap_offset = PAGE_SIZE;
|
||||
rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
|
||||
|
||||
ip->info.offset = rxe->mmap_offset;
|
||||
rxe->mmap_offset += size;
|
||||
rxe->mmap_offset += ALIGN(size, SHMLBA);
|
||||
|
||||
spin_unlock_bh(&rxe->mmap_offset_lock);
|
||||
|
||||
|
|
|
@ -729,11 +729,11 @@ next_wqe:
|
|||
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
|
||||
if (ret) {
|
||||
qp->need_req_skb = 1;
|
||||
kfree_skb(skb);
|
||||
|
||||
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
kfree_skb(skb);
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
goto exit;
|
||||
}
|
||||
|
|
|
@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
|
|||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
/* We successfully processed this new request. */
|
||||
qp->resp.msn++;
|
||||
|
||||
/* next expected psn, read handles this separately */
|
||||
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
|
||||
|
||||
qp->resp.opcode = pkt->opcode;
|
||||
qp->resp.status = IB_WC_SUCCESS;
|
||||
|
||||
if (pkt->mask & RXE_COMP_MASK)
|
||||
if (pkt->mask & RXE_COMP_MASK) {
|
||||
/* We successfully processed this new request. */
|
||||
qp->resp.msn++;
|
||||
return RESPST_COMPLETE;
|
||||
else if (qp_type(qp) == IB_QPT_RC)
|
||||
} else if (qp_type(qp) == IB_QPT_RC)
|
||||
return RESPST_ACKNOWLEDGE;
|
||||
else
|
||||
return RESPST_CLEANUP;
|
||||
|
|
|
@ -430,6 +430,7 @@ struct iser_fr_desc {
|
|||
struct list_head list;
|
||||
struct iser_reg_resources rsc;
|
||||
struct iser_pi_context *pi_ctx;
|
||||
struct list_head all_list;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -443,6 +444,7 @@ struct iser_fr_pool {
|
|||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
int size;
|
||||
struct list_head all_list;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
|
|||
int i, ret;
|
||||
|
||||
INIT_LIST_HEAD(&fr_pool->list);
|
||||
INIT_LIST_HEAD(&fr_pool->all_list);
|
||||
spin_lock_init(&fr_pool->lock);
|
||||
fr_pool->size = 0;
|
||||
for (i = 0; i < cmds_max; i++) {
|
||||
|
@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
|
|||
}
|
||||
|
||||
list_add_tail(&desc->list, &fr_pool->list);
|
||||
list_add_tail(&desc->all_list, &fr_pool->all_list);
|
||||
fr_pool->size++;
|
||||
}
|
||||
|
||||
|
@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
|
|||
struct iser_fr_desc *desc, *tmp;
|
||||
int i = 0;
|
||||
|
||||
if (list_empty(&fr_pool->list))
|
||||
if (list_empty(&fr_pool->all_list))
|
||||
return;
|
||||
|
||||
iser_info("freeing conn %p fr pool\n", ib_conn);
|
||||
|
||||
list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
|
||||
list_del(&desc->list);
|
||||
list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
|
||||
list_del(&desc->all_list);
|
||||
iser_free_reg_res(&desc->rsc);
|
||||
if (desc->pi_ctx)
|
||||
iser_free_pi_ctx(desc->pi_ctx);
|
||||
|
|
|
@ -1863,6 +1863,9 @@ struct ib_port_immutable {
|
|||
};
|
||||
|
||||
struct ib_device {
|
||||
/* Do not access @dma_device directly from ULP nor from HW drivers. */
|
||||
struct device *dma_device;
|
||||
|
||||
char name[IB_DEVICE_NAME_MAX];
|
||||
|
||||
struct list_head event_handler_list;
|
||||
|
@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
|
|||
*/
|
||||
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
|
||||
{
|
||||
return dma_mapping_error(&dev->dev, dma_addr);
|
||||
return dma_mapping_error(dev->dma_device, dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
|
|||
void *cpu_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return dma_map_single(&dev->dev, cpu_addr, size, direction);
|
||||
return dma_map_single(dev->dma_device, cpu_addr, size, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
|
|||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_unmap_single(&dev->dev, addr, size, direction);
|
||||
dma_unmap_single(dev->dma_device, addr, size, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
|
|||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return dma_map_page(&dev->dev, page, offset, size, direction);
|
||||
return dma_map_page(dev->dma_device, page, offset, size, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
|
|||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_unmap_page(&dev->dev, addr, size, direction);
|
||||
dma_unmap_page(dev->dma_device, addr, size, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
|
|||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return dma_map_sg(&dev->dev, sg, nents, direction);
|
||||
return dma_map_sg(dev->dma_device, sg, nents, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
|
|||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_unmap_sg(&dev->dev, sg, nents, direction);
|
||||
dma_unmap_sg(dev->dma_device, sg, nents, direction);
|
||||
}
|
||||
|
||||
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
|
||||
|
@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
|
|||
enum dma_data_direction direction,
|
||||
unsigned long dma_attrs)
|
||||
{
|
||||
return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
||||
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
|
||||
dma_attrs);
|
||||
}
|
||||
|
||||
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
||||
|
@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
|||
enum dma_data_direction direction,
|
||||
unsigned long dma_attrs)
|
||||
{
|
||||
dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
||||
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
|
||||
}
|
||||
/**
|
||||
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
|
||||
|
@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
|
|||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
|
||||
dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
|
|||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_for_device(&dev->dev, addr, size, dir);
|
||||
dma_sync_single_for_device(dev->dma_device, addr, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
|
|||
dma_addr_t *dma_handle,
|
||||
gfp_t flag)
|
||||
{
|
||||
return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
|
||||
return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
|
|||
size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
|
||||
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#define MLX5_ABI_USER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h> /* For ETH_ALEN. */
|
||||
|
||||
enum {
|
||||
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
|
||||
|
@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req {
|
|||
};
|
||||
|
||||
enum mlx5_lib_caps {
|
||||
MLX5_LIB_CAP_4K_UAR = (u64)1 << 0,
|
||||
MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
|
||||
};
|
||||
|
||||
struct mlx5_ib_alloc_ucontext_req_v2 {
|
||||
|
|
Загрузка…
Ссылка в новой задаче