- Update Steve Wise info
 - Fix for soft-RoCE crc calculations (will break back compatibility, but
   only with the soft-RoCE driver, which has had this bug since it was
   introduced and it is an on-the-wire bug, but will make soft-RoCE fully
   compatible with real RoCE hardware)
 - cma init fixup
 - counters oops fix
 - fix for mlx4 init/teardown sequence
 - fix for mkx5 steering rules
 - introduce a cleanup API, which isn't a fix, but we want to use it in
   the next fix
 - fix for mlx5 memory management that uses API in previous patch
 
 Signed-off-by: Doug Ledford <dledford@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEErmsb2hIrI7QmWxJ0uCajMw5XL90FAl32qrIACgkQuCajMw5X
 L900Lw/+Noq2pY30TosVFd/f/8EyPH58QjsBe9UdOucYWdijD05WtjA56il8ef8b
 wsnJp48Qdo4PvhX1zvYPtV3iBlXcIAUDc0F3ZM9d1s5ppV3pvsAlSzZf4OC+yU+a
 qstoXuXyz6S7Oadja3Y94xZirIw9PWJ6MAEvlBa0ERufr42E/wdU1614I9XA88aQ
 RkbKsaCMMD68cKAUm/hjAxZef6iSya4/4xRI1lcCgJji2Qw6vDTDC6RRm2XHCKAi
 nr1D7fCIqEZikvAA+iCiw4kvTEwjwRc/igF5i9lftCfn3x118N/Kc9izswjg55l4
 Eukf9xHXXbZCfGed2a1+b6D7A0cRgrOrZkZ7FZkMOxu3eMRZUzNMd+xm8NQYi6u7
 UeXo4XtC5vfhlapqdGxHeVJnzDf3colRN0P9RkliSBmLYlXzPnyJ82leEK6P0xOh
 y2VluGkHCH/SV3rmP5TUZJGsnjPOlq+NMFOinFgjcjK8O4QXTE+4IU+66gI040dn
 wbFXeuQ1kashopr7W/cdJENvWFyl774X06XxIzIdoIyfi9TDTso2kVQJ7IiK193l
 WZe9gCfdkx+V8q8Z8INlDO4lzDmBpJszk9r7IpVPsdjuZjnUGq4H+DakP4y5cNyU
 Tj90y2NlduTVKzYMMrT4DSkiBKLHODwc9WT+5SKwp4NNzeVCTak=
 =UNwD
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Doug Ledford:
 "A small collection of -rc fixes. Mostly. One API addition, but that's
  because we wanted to use it in a fix. There's also a bug fix that is
  going to render the 5.5 kernel's soft-RoCE driver incompatible with
  all soft-RoCE versions prior, but it's required to actually implement
  the protocol according to the RoCE spec and required in order for the
  soft-RoCE driver to be able to successfully work with actual RoCE
  hardware.

  Summary:

   - Update Steve Wise info

   - Fix for soft-RoCE crc calculations (will break back compatibility,
     but only with the soft-RoCE driver, which has had this bug since it
     was introduced and it is an on-the-wire bug, but will make
     soft-RoCE fully compatible with real RoCE hardware)

   - cma init fixup

   - counters oops fix

   - fix for mlx4 init/teardown sequence

   - fix for mkx5 steering rules

   - introduce a cleanup API, which isn't a fix, but we want to use it
     in the next fix

   - fix for mlx5 memory management that uses API in previous patch"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/mlx5: Fix device memory flows
  IB/core: Introduce rdma_user_mmap_entry_insert_range() API
  IB/mlx5: Fix steering rule of drop and count
  IB/mlx4: Follow mirror sequence of device add during device removal
  RDMA/counter: Prevent auto-binding a QP which are not tracked with res
  rxe: correctly calculate iCRC for unaligned payloads
  Update mailmap info for Steve Wise
  RDMA/cma: add missed unregister_pernet_subsys in init failure
This commit is contained in:
Linus Torvalds 2019-12-15 14:58:13 -08:00
Родитель 1522d9da40 dc2316eba7
Коммит 9603e22104
13 изменённых файлов: 183 добавлений и 76 удалений

Просмотреть файл

@ -276,3 +276,5 @@ Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi> Gustavo Padovan <padovan@profusion.mobi>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>

Просмотреть файл

@ -4763,6 +4763,7 @@ err_ib:
err: err:
unregister_netdevice_notifier(&cma_nb); unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client); ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
err_wq: err_wq:
destroy_workqueue(cma_wq); destroy_workqueue(cma_wq);
return ret; return ret;

Просмотреть файл

@ -286,6 +286,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
struct rdma_counter *counter; struct rdma_counter *counter;
int ret; int ret;
if (!qp->res.valid)
return 0;
if (!rdma_is_port_valid(dev, port)) if (!rdma_is_port_valid(dev, port))
return -EINVAL; return -EINVAL;

Просмотреть файл

@ -238,28 +238,32 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
EXPORT_SYMBOL(rdma_user_mmap_entry_remove); EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
/** /**
* rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa
* in a given range.
* *
* @ucontext: associated user context. * @ucontext: associated user context.
* @entry: the entry to insert into the mmap_xa * @entry: the entry to insert into the mmap_xa
* @length: length of the address that will be mmapped * @length: length of the address that will be mmapped
* @min_pgoff: minimum pgoff to be returned
* @max_pgoff: maximum pgoff to be returned
* *
* This function should be called by drivers that use the rdma_user_mmap * This function should be called by drivers that use the rdma_user_mmap
* interface for implementing their mmap syscall A database of mmap offsets is * interface for implementing their mmap syscall A database of mmap offsets is
* handled in the core and helper functions are provided to insert entries * handled in the core and helper functions are provided to insert entries
* into the database and extract entries when the user calls mmap with the * into the database and extract entries when the user calls mmap with the
* given offset. The function allocates a unique page offset that should be * given offset. The function allocates a unique page offset in a given range
* provided to user, the user will use the offset to retrieve information such * that should be provided to user, the user will use the offset to retrieve
* as address to be mapped and how. * information such as address to be mapped and how.
* *
* Return: 0 on success and -ENOMEM on failure * Return: 0 on success and -ENOMEM on failure
*/ */
int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry, struct rdma_user_mmap_entry *entry,
size_t length) size_t length, u32 min_pgoff,
u32 max_pgoff)
{ {
struct ib_uverbs_file *ufile = ucontext->ufile; struct ib_uverbs_file *ufile = ucontext->ufile;
XA_STATE(xas, &ucontext->mmap_xa, 0); XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
u32 xa_first, xa_last, npages; u32 xa_first, xa_last, npages;
int err; int err;
u32 i; u32 i;
@ -285,7 +289,7 @@ int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
entry->npages = npages; entry->npages = npages;
while (true) { while (true) {
/* First find an empty index */ /* First find an empty index */
xas_find_marked(&xas, U32_MAX, XA_FREE_MARK); xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART) if (xas.xa_node == XAS_RESTART)
goto err_unlock; goto err_unlock;
@ -332,4 +336,30 @@ err_unlock:
mutex_unlock(&ufile->umap_lock); mutex_unlock(&ufile->umap_lock);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
/**
* rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa.
*
* @ucontext: associated user context.
* @entry: the entry to insert into the mmap_xa
* @length: length of the address that will be mmapped
*
* This function should be called by drivers that use the rdma_user_mmap
* interface for handling user mmapped addresses. The database is handled in
* the core and helper functions are provided to insert entries into the
* database and extract entries when the user calls mmap with the given offset.
* The function allocates a unique page offset that should be provided to user,
* the user will use the offset to retrieve information such as address to
* be mapped and how.
*
* Return: 0 on success and -ENOMEM on failure
*/
int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry,
size_t length)
{
return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
U32_MAX);
}
EXPORT_SYMBOL(rdma_user_mmap_entry_insert); EXPORT_SYMBOL(rdma_user_mmap_entry_insert);

Просмотреть файл

@ -3018,16 +3018,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->ib_active = false; ibdev->ib_active = false;
flush_workqueue(wq); flush_workqueue(wq);
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) { if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb)) if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n"); pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL; ibdev->iboe.nb.notifier_call = NULL;
} }
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
mlx4_ib_diag_cleanup(ibdev);
mlx4_qp_release_range(dev, ibdev->steer_qpn_base, mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count); ibdev->steer_qpn_count);
kfree(ibdev->ib_uc_qpns_bitmap); kfree(ibdev->ib_uc_qpns_bitmap);

Просмотреть файл

@ -157,7 +157,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
return -ENOMEM; return -ENOMEM;
} }
int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
{ {
struct mlx5_core_dev *dev = dm->dev; struct mlx5_core_dev *dev = dm->dev;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
@ -175,15 +175,13 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
MLX5_SET(dealloc_memic_in, in, memic_size, length); MLX5_SET(dealloc_memic_in, in, memic_size, length);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return;
if (!err) { spin_lock(&dm->lock);
spin_lock(&dm->lock); bitmap_clear(dm->memic_alloc_pages,
bitmap_clear(dm->memic_alloc_pages, start_page_idx, num_pages);
start_page_idx, num_pages); spin_unlock(&dm->lock);
spin_unlock(&dm->lock);
}
return err;
} }
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out) int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)

Просмотреть файл

@ -46,7 +46,7 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size); void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment); u64 length, u32 alignment);
int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid); void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid); void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);

Просмотреть файл

@ -2074,6 +2074,24 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
virt_to_page(dev->mdev->clock_info)); virt_to_page(dev->mdev->clock_info));
} }
static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
{
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_ib_dm *mdm;
switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
mdm->size);
kfree(mdm);
break;
default:
WARN_ON(true);
}
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma, struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context) struct mlx5_ib_ucontext *context)
@ -2186,26 +2204,55 @@ free_bfreg:
return err; return err;
} }
static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) static int add_dm_mmap_entry(struct ib_ucontext *context,
struct mlx5_ib_dm *mdm,
u64 address)
{ {
struct mlx5_ib_ucontext *mctx = to_mucontext(context); mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
struct mlx5_ib_dev *dev = to_mdev(context->device); mdm->mentry.address = address;
u16 page_idx = get_extended_index(vma->vm_pgoff); return rdma_user_mmap_entry_insert_range(
size_t map_size = vma->vm_end - vma->vm_start; context, &mdm->mentry.rdma_entry,
u32 npages = map_size >> PAGE_SHIFT; mdm->size,
phys_addr_t pfn; MLX5_IB_MMAP_DEVICE_MEM << 16,
(MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
}
if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) != static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
page_idx + npages) {
unsigned long idx;
u8 command;
command = get_command(vma->vm_pgoff);
idx = get_extended_index(vma->vm_pgoff);
return (command << 16 | idx);
}
static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma,
struct ib_ucontext *ucontext)
{
struct mlx5_user_mmap_entry *mentry;
struct rdma_user_mmap_entry *entry;
unsigned long pgoff;
pgprot_t prot;
phys_addr_t pfn;
int ret;
pgoff = mlx5_vma_to_pgoff(vma);
entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
if (!entry)
return -EINVAL; return -EINVAL;
pfn = ((dev->mdev->bar_addr + mentry = to_mmmap(entry);
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> pfn = (mentry->address >> PAGE_SHIFT);
PAGE_SHIFT) + prot = pgprot_writecombine(vma->vm_page_prot);
page_idx; ret = rdma_user_mmap_io(ucontext, vma, pfn,
return rdma_user_mmap_io(context, vma, pfn, map_size, entry->npages * PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot), prot,
NULL); entry);
rdma_user_mmap_entry_put(&mentry->rdma_entry);
return ret;
} }
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@ -2248,11 +2295,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_CLOCK_INFO: case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context); return mlx5_ib_mmap_clock_info_page(dev, vma, context);
case MLX5_IB_MMAP_DEVICE_MEM:
return dm_mmap(ibcontext, vma);
default: default:
return -EINVAL; return mlx5_ib_mmap_offset(dev, vma, ibcontext);
} }
return 0; return 0;
@ -2288,8 +2332,9 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
{ {
struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
u64 start_offset; u64 start_offset;
u32 page_idx; u16 page_idx;
int err; int err;
u64 address;
dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
@ -2298,28 +2343,30 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
if (err) if (err)
return err; return err;
page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) - address = dm->dev_addr & PAGE_MASK;
MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >> err = add_dm_mmap_entry(ctx, dm, address);
PAGE_SHIFT;
err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
&page_idx, sizeof(page_idx));
if (err) if (err)
goto err_dealloc; goto err_dealloc;
page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
&page_idx,
sizeof(page_idx));
if (err)
goto err_copy;
start_offset = dm->dev_addr & ~PAGE_MASK; start_offset = dm->dev_addr & ~PAGE_MASK;
err = uverbs_copy_to(attrs, err = uverbs_copy_to(attrs,
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
&start_offset, sizeof(start_offset)); &start_offset, sizeof(start_offset));
if (err) if (err)
goto err_dealloc; goto err_copy;
bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
DIV_ROUND_UP(dm->size, PAGE_SIZE));
return 0; return 0;
err_copy:
rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
err_dealloc: err_dealloc:
mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
@ -2423,23 +2470,13 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
struct mlx5_ib_dm *dm = to_mdm(ibdm); struct mlx5_ib_dm *dm = to_mdm(ibdm);
u32 page_idx;
int ret; int ret;
switch (dm->type) { switch (dm->type) {
case MLX5_IB_UAPI_DM_TYPE_MEMIC: case MLX5_IB_UAPI_DM_TYPE_MEMIC:
ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
if (ret) return 0;
return ret;
page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
PAGE_SHIFT;
bitmap_clear(ctx->dm_pages, page_idx,
DIV_ROUND_UP(dm->size, PAGE_SIZE));
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
dm->size, ctx->devx_uid, dm->dev_addr, dm->size, ctx->devx_uid, dm->dev_addr,
@ -3544,10 +3581,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
} }
INIT_LIST_HEAD(&handler->list); INIT_LIST_HEAD(&handler->list);
if (dst) {
memcpy(&dest_arr[0], dst, sizeof(*dst));
dest_num++;
}
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
err = parse_flow_attr(dev->mdev, spec, err = parse_flow_attr(dev->mdev, spec,
@ -3560,6 +3593,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow += ((union ib_flow_spec *)ib_flow)->size; ib_flow += ((union ib_flow_spec *)ib_flow)->size;
} }
if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
memcpy(&dest_arr[0], dst, sizeof(*dst));
dest_num++;
}
if (!flow_is_multicast_only(flow_attr)) if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn); set_underlay_qp(dev, spec, underlay_qpn);
@ -3600,10 +3638,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
} }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { if (!dest_num)
rule_dst = NULL; rule_dst = NULL;
dest_num = 0;
}
} else { } else {
if (is_egress) if (is_egress)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@ -6236,6 +6272,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.map_mr_sg = mlx5_ib_map_mr_sg, .map_mr_sg = mlx5_ib_map_mr_sg,
.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
.mmap = mlx5_ib_mmap, .mmap = mlx5_ib_mmap,
.mmap_free = mlx5_ib_mmap_free,
.modify_cq = mlx5_ib_modify_cq, .modify_cq = mlx5_ib_modify_cq,
.modify_device = mlx5_ib_modify_device, .modify_device = mlx5_ib_modify_device,
.modify_port = mlx5_ib_modify_port, .modify_port = mlx5_ib_modify_port,

Просмотреть файл

@ -118,6 +118,10 @@ enum {
MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
}; };
enum mlx5_ib_mmap_type {
MLX5_IB_MMAP_TYPE_MEMIC = 1,
};
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \ #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \
(MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@ -135,7 +139,6 @@ struct mlx5_ib_ucontext {
u32 tdn; u32 tdn;
u64 lib_caps; u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
u16 devx_uid; u16 devx_uid;
/* For RoCE LAG TX affinity */ /* For RoCE LAG TX affinity */
atomic_t tx_port_affinity; atomic_t tx_port_affinity;
@ -556,6 +559,12 @@ enum mlx5_ib_mtt_access_flags {
MLX5_IB_MTT_WRITE = (1 << 1), MLX5_IB_MTT_WRITE = (1 << 1),
}; };
struct mlx5_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
u8 mmap_flag;
u64 address;
};
struct mlx5_ib_dm { struct mlx5_ib_dm {
struct ib_dm ibdm; struct ib_dm ibdm;
phys_addr_t dev_addr; phys_addr_t dev_addr;
@ -567,6 +576,7 @@ struct mlx5_ib_dm {
} icm_dm; } icm_dm;
/* other dm types specific params should be added here */ /* other dm types specific params should be added here */
}; };
struct mlx5_user_mmap_entry mentry;
}; };
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@ -1101,6 +1111,13 @@ to_mflow_act(struct ib_flow_action *ibact)
return container_of(ibact, struct mlx5_ib_flow_action, ib_action); return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
} }
static inline struct mlx5_user_mmap_entry *
to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
{
return container_of(rdma_entry,
struct mlx5_user_mmap_entry, rdma_entry);
}
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
struct ib_udata *udata, unsigned long virt, struct ib_udata *udata, unsigned long virt,
struct mlx5_db *db); struct mlx5_db *db);

Просмотреть файл

@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb)
calc_icrc = rxe_icrc_hdr(pkt, skb); calc_icrc = rxe_icrc_hdr(pkt, skb);
calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
payload_size(pkt)); payload_size(pkt) + bth_pad(pkt));
calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) { if (unlikely(calc_icrc != pack_icrc)) {
if (skb->protocol == htons(ETH_P_IPV6)) if (skb->protocol == htons(ETH_P_IPV6))

Просмотреть файл

@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (err) if (err)
return err; return err;
} }
if (bth_pad(pkt)) {
u8 *pad = payload_addr(pkt) + paylen;
memset(pad, 0, bth_pad(pkt));
crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
}
} }
p = payload_addr(pkt) + paylen + bth_pad(pkt); p = payload_addr(pkt) + paylen + bth_pad(pkt);

Просмотреть файл

@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (err) if (err)
pr_err("Failed copying memory\n"); pr_err("Failed copying memory\n");
if (bth_pad(&ack_pkt)) {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u8 *pad = payload_addr(&ack_pkt) + payload;
memset(pad, 0, bth_pad(&ack_pkt));
icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
}
p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
*p = ~icrc; *p = ~icrc;

Просмотреть файл

@ -2832,6 +2832,11 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry, struct rdma_user_mmap_entry *entry,
size_t length); size_t length);
int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry,
size_t length, u32 min_pgoff,
u32 max_pgoff);
struct rdma_user_mmap_entry * struct rdma_user_mmap_entry *
rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
unsigned long pgoff); unsigned long pgoff);