RDMA/siw: Fix SGL mapping issues
All user level and most in-kernel applications submit WQEs
where the SG list entries are all of a single type.
iSER in particular, however, will send us WQEs with mixed SG
types: sge[0] = kernel buffer, sge[1] = PBL region.
Check and set is_kva on each SG entry individually instead of
assuming the first SGE type carries through to the last.
This fixes iSER over siw.
Fixes: b9be6f18cf
("rdma/siw: transmit path")
Reported-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
Tested-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Link: https://lore.kernel.org/r/20190822150741.21871-1-bmt@zurich.ibm.com
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Родитель
d37b1e5340
Коммит
fab4f97e1f
|
@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
|
||||||
|
|
||||||
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
|
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
|
||||||
|
|
||||||
static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
|
static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
|
||||||
{
|
{
|
||||||
if (hdr_len) {
|
while (kmap_mask) {
|
||||||
++pages;
|
if (kmap_mask & BIT(0))
|
||||||
--num_maps;
|
kunmap(*pp);
|
||||||
}
|
pp++;
|
||||||
while (num_maps-- > 0) {
|
kmap_mask >>= 1;
|
||||||
kunmap(*pages);
|
|
||||||
pages++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||||
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
|
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
|
||||||
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
|
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
|
||||||
pbl_idx = c_tx->pbl_idx;
|
pbl_idx = c_tx->pbl_idx;
|
||||||
|
unsigned long kmap_mask = 0L;
|
||||||
|
|
||||||
if (c_tx->state == SIW_SEND_HDR) {
|
if (c_tx->state == SIW_SEND_HDR) {
|
||||||
if (c_tx->use_sendpage) {
|
if (c_tx->use_sendpage) {
|
||||||
|
@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||||
|
|
||||||
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
|
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
|
||||||
mem = wqe->mem[sge_idx];
|
mem = wqe->mem[sge_idx];
|
||||||
if (!mem->mem_obj)
|
is_kva = mem->mem_obj == NULL ? 1 : 0;
|
||||||
is_kva = 1;
|
|
||||||
} else {
|
} else {
|
||||||
is_kva = 1;
|
is_kva = 1;
|
||||||
}
|
}
|
||||||
|
@ -500,12 +498,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||||
p = siw_get_upage(mem->umem,
|
p = siw_get_upage(mem->umem,
|
||||||
sge->laddr + sge_off);
|
sge->laddr + sge_off);
|
||||||
if (unlikely(!p)) {
|
if (unlikely(!p)) {
|
||||||
if (hdr_len)
|
siw_unmap_pages(page_array, kmap_mask);
|
||||||
seg--;
|
|
||||||
if (!c_tx->use_sendpage && seg) {
|
|
||||||
siw_unmap_pages(page_array,
|
|
||||||
hdr_len, seg);
|
|
||||||
}
|
|
||||||
wqe->processed -= c_tx->bytes_unsent;
|
wqe->processed -= c_tx->bytes_unsent;
|
||||||
rv = -EFAULT;
|
rv = -EFAULT;
|
||||||
goto done_crc;
|
goto done_crc;
|
||||||
|
@ -515,6 +508,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||||
if (!c_tx->use_sendpage) {
|
if (!c_tx->use_sendpage) {
|
||||||
iov[seg].iov_base = kmap(p) + fp_off;
|
iov[seg].iov_base = kmap(p) + fp_off;
|
||||||
iov[seg].iov_len = plen;
|
iov[seg].iov_len = plen;
|
||||||
|
|
||||||
|
/* Remember for later kunmap() */
|
||||||
|
kmap_mask |= BIT(seg);
|
||||||
|
|
||||||
if (do_crc)
|
if (do_crc)
|
||||||
crypto_shash_update(
|
crypto_shash_update(
|
||||||
c_tx->mpa_crc_hd,
|
c_tx->mpa_crc_hd,
|
||||||
|
@ -543,10 +540,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||||
|
|
||||||
if (++seg > (int)MAX_ARRAY) {
|
if (++seg > (int)MAX_ARRAY) {
|
||||||
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
|
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
|
||||||
if (!is_kva && !c_tx->use_sendpage) {
|
siw_unmap_pages(page_array, kmap_mask);
|
||||||
siw_unmap_pages(page_array, hdr_len,
|
|
||||||
seg - 1);
|
|
||||||
}
|
|
||||||
wqe->processed -= c_tx->bytes_unsent;
|
wqe->processed -= c_tx->bytes_unsent;
|
||||||
rv = -EMSGSIZE;
|
rv = -EMSGSIZE;
|
||||||
goto done_crc;
|
goto done_crc;
|
||||||
|
@ -597,8 +591,7 @@ sge_done:
|
||||||
} else {
|
} else {
|
||||||
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
|
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
|
||||||
hdr_len + data_len + trl_len);
|
hdr_len + data_len + trl_len);
|
||||||
if (!is_kva)
|
siw_unmap_pages(page_array, kmap_mask);
|
||||||
siw_unmap_pages(page_array, hdr_len, seg);
|
|
||||||
}
|
}
|
||||||
if (rv < (int)hdr_len) {
|
if (rv < (int)hdr_len) {
|
||||||
/* Not even complete hdr pushed or negative rv */
|
/* Not even complete hdr pushed or negative rv */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче