rpmsg updates for v4.17
This transitions the rpmsg_trysend() code paths of SMD and GLINK to use non-sleeping locks, it reverts the overly optimistic handling of discovered SMD channels and fixes an issue in SMD where incoming messages races with the probing of a client driver. -----BEGIN PGP SIGNATURE----- iQJPBAABCAA5FiEEBd4DzF816k8JZtUlCx85Pw2ZrcUFAlrL50YbHGJqb3JuLmFu ZGVyc3NvbkBsaW5hcm8ub3JnAAoJEAsfOT8Nma3FEfAP/2WeZxD8D5kw9NoGFYMA vp1K6P0TZjZk23YaAkww7jwHFV1gGuVbC9otzJPSFMXtHZjSGog30iwqbwT/jYG/ 7Mm0+8gRxVM/JvX9pFo+w58tElz5w35ScmgeuC3lTOgFctNPRs0xQi14BVBkYZhU e+ocNCi0/YfD5gI+JTZR4VEJwqdcnMzjVtZ6OP4XPX/U3gDuctiS5V6LjHB/YDeh rXLt4joNDQNZ6nJLZYpMOm88Er/wXdoJi0HwQ5+/dvqzzZGZZNzgyiBfvsKkSEj/ LGNA3mUs3xog/u0blpagNduhxP7OiUt4no8BaBwy08ZrqBAAER+zKqC70pGV8bEn N71fuDt7x5P5ebuh2c0z1vy2aZkOt0XzRexn9tR8r5VzSMKV7QslbZL4T6lE9Wb2 FSLlvccrSjKoOZlFZjXZxxmHMzYG/I0znyaPZKdu29H5449kXDctSDoMB1O6o/u4 85pf3zb9RMQ7/GyMRl6b9NTq7NKtFnv3khU5ECl55/HRb4SN5lcyA1+BHnJ5jAV6 jdC/i/9bkqi7mQ1Ep0HQiV/8UnUenJVsX+qisyo2/vxOubwCnta65byHt8eO/z9M 6F91TXb5p3WzsPEvNLTdHIhLol9rSYrKMue3JwU0FY5o4+sygl5MdXwVoYUO6OlZ nH7i30T7J5MTc7BA9YIBpXiE =0d78 -----END PGP SIGNATURE----- Merge tag 'rpmsg-v4.17' of git://github.com/andersson/remoteproc Pull rpmsg updates from Bjorn Andersson: - transition the rpmsg_trysend() code paths of SMD and GLINK to use non-sleeping locks - revert the overly optimistic handling of discovered SMD channels - fix an issue in SMD where incoming messages race with the probing of a client driver * tag 'rpmsg-v4.17' of git://github.com/andersson/remoteproc: rpmsg: smd: Use announce_create to process any receive work rpmsg: Only invoke announce_create for rpdev with endpoints rpmsg: smd: Fix container_of macros Revert "rpmsg: smd: Create device for all channels" rpmsg: glink: Use spinlock in tx path rpmsg: smd: Use spinlock in tx path rpmsg: smd: use put_device() if device_register fail rpmsg: glink: use put_device() if device_register fail
This commit is contained in:
Коммит
9ab89c407d
|
@ -113,7 +113,7 @@ struct qcom_glink {
|
|||
spinlock_t rx_lock;
|
||||
struct list_head rx_queue;
|
||||
|
||||
struct mutex tx_lock;
|
||||
spinlock_t tx_lock;
|
||||
|
||||
spinlock_t idr_lock;
|
||||
struct idr lcids;
|
||||
|
@ -288,15 +288,14 @@ static int qcom_glink_tx(struct qcom_glink *glink,
|
|||
const void *data, size_t dlen, bool wait)
|
||||
{
|
||||
unsigned int tlen = hlen + dlen;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
/* Reject packets that are too big */
|
||||
if (tlen >= glink->tx_pipe->length)
|
||||
return -EINVAL;
|
||||
|
||||
ret = mutex_lock_interruptible(&glink->tx_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
spin_lock_irqsave(&glink->tx_lock, flags);
|
||||
|
||||
while (qcom_glink_tx_avail(glink) < tlen) {
|
||||
if (!wait) {
|
||||
|
@ -304,7 +303,12 @@ static int qcom_glink_tx(struct qcom_glink *glink,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Wait without holding the tx_lock */
|
||||
spin_unlock_irqrestore(&glink->tx_lock, flags);
|
||||
|
||||
usleep_range(10000, 15000);
|
||||
|
||||
spin_lock_irqsave(&glink->tx_lock, flags);
|
||||
}
|
||||
|
||||
qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
|
||||
|
@ -313,7 +317,7 @@ static int qcom_glink_tx(struct qcom_glink *glink,
|
|||
mbox_client_txdone(glink->mbox_chan, 0);
|
||||
|
||||
out:
|
||||
mutex_unlock(&glink->tx_lock);
|
||||
spin_unlock_irqrestore(&glink->tx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1567,7 +1571,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
|
|||
glink->features = features;
|
||||
glink->intentless = intentless;
|
||||
|
||||
mutex_init(&glink->tx_lock);
|
||||
spin_lock_init(&glink->tx_lock);
|
||||
spin_lock_init(&glink->rx_lock);
|
||||
INIT_LIST_HEAD(&glink->rx_queue);
|
||||
INIT_WORK(&glink->rx_work, qcom_glink_work);
|
||||
|
|
|
@ -217,6 +217,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
|
|||
ret = device_register(dev);
|
||||
if (ret) {
|
||||
pr_err("failed to register glink edge\n");
|
||||
put_device(dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -299,7 +300,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
|
|||
return glink;
|
||||
|
||||
err_put_dev:
|
||||
put_device(dev);
|
||||
device_unregister(dev);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -167,9 +167,9 @@ struct qcom_smd_endpoint {
|
|||
struct qcom_smd_channel *qsch;
|
||||
};
|
||||
|
||||
#define to_smd_device(_rpdev) container_of(_rpdev, struct qcom_smd_device, rpdev)
|
||||
#define to_smd_device(r) container_of(r, struct qcom_smd_device, rpdev)
|
||||
#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
|
||||
#define to_smd_endpoint(ept) container_of(ept, struct qcom_smd_endpoint, ept)
|
||||
#define to_smd_endpoint(e) container_of(e, struct qcom_smd_endpoint, ept)
|
||||
|
||||
/**
|
||||
* struct qcom_smd_channel - smd channel struct
|
||||
|
@ -205,7 +205,7 @@ struct qcom_smd_channel {
|
|||
struct smd_channel_info_pair *info;
|
||||
struct smd_channel_info_word_pair *info_word;
|
||||
|
||||
struct mutex tx_lock;
|
||||
spinlock_t tx_lock;
|
||||
wait_queue_head_t fblockread_event;
|
||||
|
||||
void *tx_fifo;
|
||||
|
@ -729,6 +729,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
|
|||
{
|
||||
__le32 hdr[5] = { cpu_to_le32(len), };
|
||||
int tlen = sizeof(hdr) + len;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* Word aligned channels only accept word size aligned data */
|
||||
|
@ -739,9 +740,11 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
|
|||
if (tlen >= channel->fifo_size)
|
||||
return -EINVAL;
|
||||
|
||||
ret = mutex_lock_interruptible(&channel->tx_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Highlight the fact that if we enter the loop below we might sleep */
|
||||
if (wait)
|
||||
might_sleep();
|
||||
|
||||
spin_lock_irqsave(&channel->tx_lock, flags);
|
||||
|
||||
while (qcom_smd_get_tx_avail(channel) < tlen &&
|
||||
channel->state == SMD_CHANNEL_OPENED) {
|
||||
|
@ -753,7 +756,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
|
|||
SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
|
||||
|
||||
/* Wait without holding the tx_lock */
|
||||
mutex_unlock(&channel->tx_lock);
|
||||
spin_unlock_irqrestore(&channel->tx_lock, flags);
|
||||
|
||||
ret = wait_event_interruptible(channel->fblockread_event,
|
||||
qcom_smd_get_tx_avail(channel) >= tlen ||
|
||||
|
@ -761,9 +764,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&channel->tx_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
spin_lock_irqsave(&channel->tx_lock, flags);
|
||||
|
||||
SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
|
||||
}
|
||||
|
@ -787,7 +788,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
|
|||
qcom_smd_signal_channel(channel);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&channel->tx_lock);
|
||||
spin_unlock_irqrestore(&channel->tx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -996,8 +997,26 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int qcom_smd_announce_create(struct rpmsg_device *rpdev)
|
||||
{
|
||||
struct qcom_smd_endpoint *qept = to_smd_endpoint(rpdev->ept);
|
||||
struct qcom_smd_channel *channel = qept->qsch;
|
||||
unsigned long flags;
|
||||
bool kick_state;
|
||||
|
||||
spin_lock_irqsave(&channel->recv_lock, flags);
|
||||
kick_state = qcom_smd_channel_intr(channel);
|
||||
spin_unlock_irqrestore(&channel->recv_lock, flags);
|
||||
|
||||
if (kick_state)
|
||||
schedule_work(&channel->edge->state_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct rpmsg_device_ops qcom_smd_device_ops = {
|
||||
.create_ept = qcom_smd_create_ept,
|
||||
.announce_create = qcom_smd_announce_create,
|
||||
};
|
||||
|
||||
static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
|
||||
|
@ -1090,7 +1109,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
|
|||
if (!channel->name)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_init(&channel->tx_lock);
|
||||
spin_lock_init(&channel->tx_lock);
|
||||
spin_lock_init(&channel->recv_lock);
|
||||
init_waitqueue_head(&channel->fblockread_event);
|
||||
init_waitqueue_head(&channel->state_change_event);
|
||||
|
@ -1234,6 +1253,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
|
|||
if (channel->state != SMD_CHANNEL_CLOSED)
|
||||
continue;
|
||||
|
||||
remote_state = GET_RX_CHANNEL_INFO(channel, state);
|
||||
if (remote_state != SMD_CHANNEL_OPENING &&
|
||||
remote_state != SMD_CHANNEL_OPENED)
|
||||
continue;
|
||||
|
||||
if (channel->registered)
|
||||
continue;
|
||||
|
||||
|
@ -1408,6 +1432,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
|
|||
ret = device_register(&edge->dev);
|
||||
if (ret) {
|
||||
pr_err("failed to register smd edge\n");
|
||||
put_device(&edge->dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -1428,7 +1453,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
|
|||
return edge;
|
||||
|
||||
unregister_dev:
|
||||
put_device(&edge->dev);
|
||||
device_unregister(&edge->dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_smd_register_edge);
|
||||
|
|
|
@ -442,7 +442,7 @@ static int rpmsg_dev_probe(struct device *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (rpdev->ops->announce_create)
|
||||
if (ept && rpdev->ops->announce_create)
|
||||
err = rpdev->ops->announce_create(rpdev);
|
||||
out:
|
||||
return err;
|
||||
|
|
Загрузка…
Ссылка в новой задаче