RDMA 5.10 second rc pull request
A few more merge window regressions that didn't make rc1: - New validation in the DMA layer triggers wrong use of the DMA layer in rxe, siw and rdmavt - Accidental change of a hypervisor facing ABI when widening the port speed u8 to u16 in vmw_pvrdma - Memory leak on error unwind in SRP target -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl+kPikACgkQOG33FX4g mxqBkQ//cUlx1JfZp2MDlvbrpk10+GTPrZt3PJkL7GcMDjIvplk4xMXvC2rp9PH0 z3cuVblQI3skdQnokjrykpLLakBoe0y6pzqIrBZ4bq36Ggry5i88YD3yMBbCkHhl ZPKxcYGd2Qey32PNVe4KmYnZ1MPPQZzPYAUaMxvroZWbWPjfOsXCJC7wxZkQs7Qn CcqCFVJ7IU2YTK7ygLlRWnmjhNn0wdkDX6t4YhSB+EnTJosPYxGtorKa9/IpZJ5C NBhAJ7MiQGK5XtHdFpANuB+GYnm3Aob/UJl9YR3wvtzqHbWwCxoiSUlkqkjxtoak +6b6eS4XmubePqtd0AnuIpNkfi09CGe6VKuUwDsSt6eTMNHtJNsLR8LqkfblKb/9 V9U19/4l2D8iedUR1Y3WR51diidJgHs7eSD9ycASTJ5HJqgBxz77K4eORu5zqMyr QtcnMBB7nYQ5tNYgz3s78xLorFjCbRAvtyvVPG3HXQcSEuauYJjrMXo8BbxNmI/Z JIzJhDsrm6S6FRu9BzMISNBHJl4ay5+Uv9A9SmFytmeXDGvDHVIuiwW1GbUfbR8n KecuAC+/8459LkeVf8h5nonOi30NbLOX4fpCJBi1PljBsbYl4ET7mhy9mS0mYe0s 9lj/VbhHP8xuT2JiX15vopyUMCPVvlXaiJKp7ccK6lq42muv6Kc= =y+a3 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "A few more merge window regressions that didn't make rc1: - New validation in the DMA layer triggers wrong use of the DMA layer in rxe, siw and rdmavt - Accidental change of a hypervisor facing ABI when widening the port speed u8 to u16 in vmw_pvrdma - Memory leak on error unwind in SRP target" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/srpt: Fix typo in srpt_unregister_mad_agent docstring RDMA/vmw_pvrdma: Fix the active_speed and phys_state value IB/srpt: Fix memory leak in srpt_add_one RDMA: Fix software RDMA drivers for dma mapping error
This commit is contained in:
Коммит
6f3f374ac0
|
@ -176,7 +176,7 @@ struct pvrdma_port_attr {
|
||||||
u8 subnet_timeout;
|
u8 subnet_timeout;
|
||||||
u8 init_type_reply;
|
u8 init_type_reply;
|
||||||
u8 active_width;
|
u8 active_width;
|
||||||
u16 active_speed;
|
u8 active_speed;
|
||||||
u8 phys_state;
|
u8 phys_state;
|
||||||
u8 reserved[2];
|
u8 reserved[2];
|
||||||
};
|
};
|
||||||
|
|
|
@ -524,6 +524,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
|
||||||
int rvt_register_device(struct rvt_dev_info *rdi)
|
int rvt_register_device(struct rvt_dev_info *rdi)
|
||||||
{
|
{
|
||||||
int ret = 0, i;
|
int ret = 0, i;
|
||||||
|
u64 dma_mask;
|
||||||
|
|
||||||
if (!rdi)
|
if (!rdi)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -580,8 +581,10 @@ int rvt_register_device(struct rvt_dev_info *rdi)
|
||||||
|
|
||||||
/* DMA Operations */
|
/* DMA Operations */
|
||||||
rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
|
rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
|
||||||
dma_set_coherent_mask(&rdi->ibdev.dev,
|
dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
|
||||||
rdi->ibdev.dev.parent->coherent_dma_mask);
|
ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
|
||||||
|
if (ret)
|
||||||
|
goto bail_wss;
|
||||||
|
|
||||||
/* Protection Domain */
|
/* Protection Domain */
|
||||||
spin_lock_init(&rdi->n_pds_lock);
|
spin_lock_init(&rdi->n_pds_lock);
|
||||||
|
|
|
@ -1118,6 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||||
int err;
|
int err;
|
||||||
struct ib_device *dev = &rxe->ib_dev;
|
struct ib_device *dev = &rxe->ib_dev;
|
||||||
struct crypto_shash *tfm;
|
struct crypto_shash *tfm;
|
||||||
|
u64 dma_mask;
|
||||||
|
|
||||||
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
|
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
|
||||||
|
|
||||||
|
@ -1130,7 +1131,10 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||||
rxe->ndev->dev_addr);
|
rxe->ndev->dev_addr);
|
||||||
dev->dev.dma_parms = &rxe->dma_parms;
|
dev->dev.dma_parms = &rxe->dma_parms;
|
||||||
dma_set_max_seg_size(&dev->dev, UINT_MAX);
|
dma_set_max_seg_size(&dev->dev, UINT_MAX);
|
||||||
dma_set_coherent_mask(&dev->dev, dma_get_required_mask(&dev->dev));
|
dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
|
||||||
|
err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
|
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
|
||||||
| BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
|
| BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
|
||||||
|
|
|
@ -306,6 +306,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
|
||||||
struct siw_device *sdev = NULL;
|
struct siw_device *sdev = NULL;
|
||||||
struct ib_device *base_dev;
|
struct ib_device *base_dev;
|
||||||
struct device *parent = netdev->dev.parent;
|
struct device *parent = netdev->dev.parent;
|
||||||
|
u64 dma_mask;
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
if (!parent) {
|
if (!parent) {
|
||||||
|
@ -384,8 +385,10 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
|
||||||
base_dev->dev.parent = parent;
|
base_dev->dev.parent = parent;
|
||||||
base_dev->dev.dma_parms = &sdev->dma_parms;
|
base_dev->dev.dma_parms = &sdev->dma_parms;
|
||||||
dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
|
dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
|
||||||
dma_set_coherent_mask(&base_dev->dev,
|
dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
|
||||||
dma_get_required_mask(&base_dev->dev));
|
if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
|
||||||
|
goto error;
|
||||||
|
|
||||||
base_dev->num_comp_vectors = num_possible_cpus();
|
base_dev->num_comp_vectors = num_possible_cpus();
|
||||||
|
|
||||||
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
|
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
|
||||||
|
|
|
@ -622,10 +622,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
|
||||||
/**
|
/**
|
||||||
* srpt_unregister_mad_agent - unregister MAD callback functions
|
* srpt_unregister_mad_agent - unregister MAD callback functions
|
||||||
* @sdev: SRPT HCA pointer.
|
* @sdev: SRPT HCA pointer.
|
||||||
|
* @port_cnt: number of ports with registered MAD
|
||||||
*
|
*
|
||||||
* Note: It is safe to call this function more than once for the same device.
|
* Note: It is safe to call this function more than once for the same device.
|
||||||
*/
|
*/
|
||||||
static void srpt_unregister_mad_agent(struct srpt_device *sdev)
|
static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
|
||||||
{
|
{
|
||||||
struct ib_port_modify port_modify = {
|
struct ib_port_modify port_modify = {
|
||||||
.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
|
.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
|
||||||
|
@ -633,7 +634,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
|
||||||
struct srpt_port *sport;
|
struct srpt_port *sport;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
|
for (i = 1; i <= port_cnt; i++) {
|
||||||
sport = &sdev->port[i - 1];
|
sport = &sdev->port[i - 1];
|
||||||
WARN_ON(sport->port != i);
|
WARN_ON(sport->port != i);
|
||||||
if (sport->mad_agent) {
|
if (sport->mad_agent) {
|
||||||
|
@ -3185,7 +3186,8 @@ static int srpt_add_one(struct ib_device *device)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("MAD registration failed for %s-%d.\n",
|
pr_err("MAD registration failed for %s-%d.\n",
|
||||||
dev_name(&sdev->device->dev), i);
|
dev_name(&sdev->device->dev), i);
|
||||||
goto err_event;
|
i--;
|
||||||
|
goto err_port;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3197,7 +3199,8 @@ static int srpt_add_one(struct ib_device *device)
|
||||||
pr_debug("added %s.\n", dev_name(&device->dev));
|
pr_debug("added %s.\n", dev_name(&device->dev));
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_event:
|
err_port:
|
||||||
|
srpt_unregister_mad_agent(sdev, i);
|
||||||
ib_unregister_event_handler(&sdev->event_handler);
|
ib_unregister_event_handler(&sdev->event_handler);
|
||||||
err_cm:
|
err_cm:
|
||||||
if (sdev->cm_id)
|
if (sdev->cm_id)
|
||||||
|
@ -3221,7 +3224,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
|
||||||
struct srpt_device *sdev = client_data;
|
struct srpt_device *sdev = client_data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
srpt_unregister_mad_agent(sdev);
|
srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
|
||||||
|
|
||||||
ib_unregister_event_handler(&sdev->event_handler);
|
ib_unregister_event_handler(&sdev->event_handler);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче