IB/qib: Fix refcount leak in lkey/rkey validation
The mr optimization introduced a reference count leak on an exception test. The lock/refcount manipulation is moved down and the problematic exception test now calls bail to insure that the lock is released. Additional fixes as suggested by Ralph Campbell <ralph.campbell@qlogic.org>: - reduce lock scope of dma regions - use explicit values on returns vs. automatic ret value Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Родитель
f2d255a078
Коммит
4db62d4786
|
@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
|
||||||
struct qib_mregion *mr;
|
struct qib_mregion *mr;
|
||||||
unsigned n, m;
|
unsigned n, m;
|
||||||
size_t off;
|
size_t off;
|
||||||
int ret = 0;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -152,27 +151,28 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
|
||||||
if (!dev->dma_mr)
|
if (!dev->dma_mr)
|
||||||
goto bail;
|
goto bail;
|
||||||
atomic_inc(&dev->dma_mr->refcount);
|
atomic_inc(&dev->dma_mr->refcount);
|
||||||
|
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||||
|
|
||||||
isge->mr = dev->dma_mr;
|
isge->mr = dev->dma_mr;
|
||||||
isge->vaddr = (void *) sge->addr;
|
isge->vaddr = (void *) sge->addr;
|
||||||
isge->length = sge->length;
|
isge->length = sge->length;
|
||||||
isge->sge_length = sge->length;
|
isge->sge_length = sge->length;
|
||||||
isge->m = 0;
|
isge->m = 0;
|
||||||
isge->n = 0;
|
isge->n = 0;
|
||||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
|
||||||
goto ok;
|
goto ok;
|
||||||
}
|
}
|
||||||
mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
|
mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
|
||||||
if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
|
if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
|
||||||
mr->pd != &pd->ibpd))
|
mr->pd != &pd->ibpd))
|
||||||
goto bail;
|
goto bail;
|
||||||
atomic_inc(&mr->refcount);
|
|
||||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
|
||||||
|
|
||||||
off = sge->addr - mr->user_base;
|
off = sge->addr - mr->user_base;
|
||||||
if (unlikely(sge->addr < mr->user_base ||
|
if (unlikely(sge->addr < mr->user_base ||
|
||||||
off + sge->length > mr->length ||
|
off + sge->length > mr->length ||
|
||||||
(mr->access_flags & acc) != acc))
|
(mr->access_flags & acc) != acc))
|
||||||
return ret;
|
goto bail;
|
||||||
|
atomic_inc(&mr->refcount);
|
||||||
|
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||||
|
|
||||||
off += mr->offset;
|
off += mr->offset;
|
||||||
if (mr->page_shift) {
|
if (mr->page_shift) {
|
||||||
|
@ -206,11 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
|
||||||
isge->m = m;
|
isge->m = m;
|
||||||
isge->n = n;
|
isge->n = n;
|
||||||
ok:
|
ok:
|
||||||
ret = 1;
|
return 1;
|
||||||
return ret;
|
|
||||||
bail:
|
bail:
|
||||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -231,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
|
||||||
struct qib_mregion *mr;
|
struct qib_mregion *mr;
|
||||||
unsigned n, m;
|
unsigned n, m;
|
||||||
size_t off;
|
size_t off;
|
||||||
int ret = 0;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -248,26 +246,27 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
|
||||||
if (!dev->dma_mr)
|
if (!dev->dma_mr)
|
||||||
goto bail;
|
goto bail;
|
||||||
atomic_inc(&dev->dma_mr->refcount);
|
atomic_inc(&dev->dma_mr->refcount);
|
||||||
|
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||||
|
|
||||||
sge->mr = dev->dma_mr;
|
sge->mr = dev->dma_mr;
|
||||||
sge->vaddr = (void *) vaddr;
|
sge->vaddr = (void *) vaddr;
|
||||||
sge->length = len;
|
sge->length = len;
|
||||||
sge->sge_length = len;
|
sge->sge_length = len;
|
||||||
sge->m = 0;
|
sge->m = 0;
|
||||||
sge->n = 0;
|
sge->n = 0;
|
||||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
|
||||||
goto ok;
|
goto ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
|
mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
|
||||||
if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
|
if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
|
||||||
goto bail;
|
goto bail;
|
||||||
atomic_inc(&mr->refcount);
|
|
||||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
|
||||||
|
|
||||||
off = vaddr - mr->iova;
|
off = vaddr - mr->iova;
|
||||||
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
|
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
|
||||||
(mr->access_flags & acc) == 0))
|
(mr->access_flags & acc) == 0))
|
||||||
return ret;
|
goto bail;
|
||||||
|
atomic_inc(&mr->refcount);
|
||||||
|
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||||
|
|
||||||
off += mr->offset;
|
off += mr->offset;
|
||||||
if (mr->page_shift) {
|
if (mr->page_shift) {
|
||||||
|
@ -301,11 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
|
||||||
sge->m = m;
|
sge->m = m;
|
||||||
sge->n = n;
|
sge->n = n;
|
||||||
ok:
|
ok:
|
||||||
ret = 1;
|
return 1;
|
||||||
return ret;
|
|
||||||
bail:
|
bail:
|
||||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче