Merge branch 'net-ipa-wake-up-system-on-RX-available'
Alex Elder says: ==================== net: ipa: wake up system on RX available This series arranges for the IPA driver to wake up a suspended system if the IPA hardware has a packet to deliver to the AP. Version 2 replaced the first patch from version 1 with three patches, in response to David Miller's feedback. And based on Bjorn Andersson's feedback on version 2, this version reworks the tracking of IPA clock references. As a result, we no longer need a flag to determine whether a "don't' suspend" clock reference is held (though an bit in a bitmask is still used for a different purpose). In summary: - A refcount_t is used to track IPA clock references where an atomic_t was previously used. (This may go away soon as well, with upcoming work to implement runtime PM.) - We no longer track whether a special reference has been taken to avoid suspending IPA. - A bit in a bitmask is used to ensure we only trigger a system resume once per system suspend. And from the original series: - Suspending endpoints only occurs when suspending the driver, not when dropping the last clock reference. Resuming endpoints is also disconnected from starting the clock. - The IPA SUSPEND interrupt is now a wakeup interrupt. If it fires, it schedules a system resume operation. - The GSI interrupt is no longer a wakeup interrupt. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
5e43df14d6
|
@ -1987,31 +1987,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
|
|||
}
|
||||
gsi->irq = irq;
|
||||
|
||||
ret = enable_irq_wake(gsi->irq);
|
||||
if (ret)
|
||||
dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
|
||||
gsi->irq_wake_enabled = !ret;
|
||||
|
||||
/* Get GSI memory range and map it */
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
|
||||
if (!res) {
|
||||
dev_err(dev, "DT error getting \"gsi\" memory property\n");
|
||||
ret = -ENODEV;
|
||||
goto err_disable_irq_wake;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
size = resource_size(res);
|
||||
if (res->start > U32_MAX || size > U32_MAX - res->start) {
|
||||
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
|
||||
ret = -EINVAL;
|
||||
goto err_disable_irq_wake;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
gsi->virt = ioremap(res->start, size);
|
||||
if (!gsi->virt) {
|
||||
dev_err(dev, "unable to remap \"gsi\" memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_disable_irq_wake;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
|
||||
|
@ -2025,9 +2020,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
|
|||
|
||||
err_iounmap:
|
||||
iounmap(gsi->virt);
|
||||
err_disable_irq_wake:
|
||||
if (gsi->irq_wake_enabled)
|
||||
(void)disable_irq_wake(gsi->irq);
|
||||
err_free_irq:
|
||||
free_irq(gsi->irq, gsi);
|
||||
|
||||
return ret;
|
||||
|
@ -2038,8 +2031,6 @@ void gsi_exit(struct gsi *gsi)
|
|||
{
|
||||
mutex_destroy(&gsi->mutex);
|
||||
gsi_channel_exit(gsi);
|
||||
if (gsi->irq_wake_enabled)
|
||||
(void)disable_irq_wake(gsi->irq);
|
||||
free_irq(gsi->irq, gsi);
|
||||
iounmap(gsi->virt);
|
||||
}
|
||||
|
|
|
@ -150,7 +150,6 @@ struct gsi {
|
|||
struct net_device dummy_dev; /* needed for NAPI */
|
||||
void __iomem *virt;
|
||||
u32 irq;
|
||||
bool irq_wake_enabled;
|
||||
u32 channel_count;
|
||||
u32 evt_ring_count;
|
||||
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
|
||||
|
|
|
@ -27,15 +27,25 @@ struct ipa_clock;
|
|||
struct ipa_smp2p;
|
||||
struct ipa_interrupt;
|
||||
|
||||
/**
|
||||
* enum ipa_flag - IPA state flags
|
||||
* @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
|
||||
* @IPA_FLAG_COUNT: Number of defined IPA flags
|
||||
*/
|
||||
enum ipa_flag {
|
||||
IPA_FLAG_RESUMED,
|
||||
IPA_FLAG_COUNT, /* Last; not a flag */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ipa - IPA information
|
||||
* @gsi: Embedded GSI structure
|
||||
* @flags: Boolean state flags
|
||||
* @version: IPA hardware version
|
||||
* @pdev: Platform device
|
||||
* @modem_rproc: Remoteproc handle for modem subsystem
|
||||
* @smp2p: SMP2P information
|
||||
* @clock: IPA clocking information
|
||||
* @suspend_ref: Whether clock reference preventing suspend taken
|
||||
* @table_addr: DMA address of filter/route table content
|
||||
* @table_virt: Virtual address of filter/route table content
|
||||
* @interrupt: IPA Interrupt information
|
||||
|
@ -70,6 +80,7 @@ struct ipa_interrupt;
|
|||
*/
|
||||
struct ipa {
|
||||
struct gsi gsi;
|
||||
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
|
||||
enum ipa_version version;
|
||||
struct platform_device *pdev;
|
||||
struct rproc *modem_rproc;
|
||||
|
@ -77,7 +88,6 @@ struct ipa {
|
|||
void *notifier;
|
||||
struct ipa_smp2p *smp2p;
|
||||
struct ipa_clock *clock;
|
||||
atomic_t suspend_ref;
|
||||
|
||||
dma_addr_t table_addr;
|
||||
__le64 *table_virt;
|
||||
|
@ -104,8 +114,6 @@ struct ipa {
|
|||
void *zero_virt;
|
||||
size_t zero_size;
|
||||
|
||||
struct wakeup_source *wakeup_source;
|
||||
|
||||
/* Bit masks indicating endpoint state */
|
||||
u32 available; /* supported by hardware */
|
||||
u32 filter_map;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Copyright (C) 2018-2020 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/device.h>
|
||||
|
@ -51,7 +51,7 @@
|
|||
* @config_path: Configuration space interconnect
|
||||
*/
|
||||
struct ipa_clock {
|
||||
atomic_t count;
|
||||
refcount_t count;
|
||||
struct mutex mutex; /* protects clock enable/disable */
|
||||
struct clk *core;
|
||||
struct icc_path *memory_path;
|
||||
|
@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa)
|
|||
*/
|
||||
bool ipa_clock_get_additional(struct ipa *ipa)
|
||||
{
|
||||
return !!atomic_inc_not_zero(&ipa->clock->count);
|
||||
return refcount_inc_not_zero(&ipa->clock->count);
|
||||
}
|
||||
|
||||
/* Get an IPA clock reference. If the reference count is non-zero, it is
|
||||
* incremented and return is immediate. Otherwise it is checked again
|
||||
* under protection of the mutex, and if appropriate the clock (and
|
||||
* interconnects) are enabled suspended endpoints (if any) are resumed
|
||||
* before returning.
|
||||
* under protection of the mutex, and if appropriate the IPA clock
|
||||
* is enabled.
|
||||
*
|
||||
* Incrementing the reference count is intentionally deferred until
|
||||
* after the clock is running and endpoints are resumed.
|
||||
|
@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa)
|
|||
goto out_mutex_unlock;
|
||||
}
|
||||
|
||||
ipa_endpoint_resume(ipa);
|
||||
|
||||
atomic_inc(&clock->count);
|
||||
refcount_set(&clock->count, 1);
|
||||
|
||||
out_mutex_unlock:
|
||||
mutex_unlock(&clock->mutex);
|
||||
}
|
||||
|
||||
/* Attempt to remove an IPA clock reference. If this represents the last
|
||||
* reference, suspend endpoints and disable the clock (and interconnects)
|
||||
* under protection of a mutex.
|
||||
/* Attempt to remove an IPA clock reference. If this represents the
|
||||
* last reference, disable the IPA clock under protection of the mutex.
|
||||
*/
|
||||
void ipa_clock_put(struct ipa *ipa)
|
||||
{
|
||||
struct ipa_clock *clock = ipa->clock;
|
||||
|
||||
/* If this is not the last reference there's nothing more to do */
|
||||
if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
|
||||
if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
|
||||
return;
|
||||
|
||||
ipa_endpoint_suspend(ipa);
|
||||
|
||||
ipa_clock_disable(ipa);
|
||||
|
||||
mutex_unlock(&clock->mutex);
|
||||
|
@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
|
|||
goto err_kfree;
|
||||
|
||||
mutex_init(&clock->mutex);
|
||||
atomic_set(&clock->count, 0);
|
||||
refcount_set(&clock->count, 0);
|
||||
|
||||
return clock;
|
||||
|
||||
|
@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock)
|
|||
{
|
||||
struct clk *clk = clock->core;
|
||||
|
||||
WARN_ON(atomic_read(&clock->count) != 0);
|
||||
WARN_ON(refcount_read(&clock->count) != 0);
|
||||
mutex_destroy(&clock->mutex);
|
||||
ipa_interconnect_exit(clock);
|
||||
kfree(clock);
|
||||
|
|
|
@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
|
|||
goto err_kfree;
|
||||
}
|
||||
|
||||
ret = enable_irq_wake(irq);
|
||||
if (ret) {
|
||||
dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
return interrupt;
|
||||
|
||||
err_free_irq:
|
||||
free_irq(interrupt->irq, interrupt);
|
||||
err_kfree:
|
||||
kfree(interrupt);
|
||||
|
||||
|
@ -248,6 +256,12 @@ err_kfree:
|
|||
/* Tear down the IPA interrupt framework */
|
||||
void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
|
||||
{
|
||||
struct device *dev = &interrupt->ipa->pdev->dev;
|
||||
int ret;
|
||||
|
||||
ret = disable_irq_wake(interrupt->irq);
|
||||
if (ret)
|
||||
dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
|
||||
free_irq(interrupt->irq, interrupt);
|
||||
kfree(interrupt);
|
||||
}
|
||||
|
|
|
@ -75,17 +75,19 @@
|
|||
* @ipa: IPA pointer
|
||||
* @irq_id: IPA interrupt type (unused)
|
||||
*
|
||||
* When in suspended state, the IPA can trigger a resume by sending a SUSPEND
|
||||
* IPA interrupt.
|
||||
* If an RX endpoint is in suspend state, and the IPA has a packet
|
||||
* destined for that endpoint, the IPA generates a SUSPEND interrupt
|
||||
* to inform the AP that it should resume the endpoint. If we get
|
||||
* one of these interrupts we just resume everything.
|
||||
*/
|
||||
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
|
||||
{
|
||||
/* Take a a single clock reference to prevent suspend. All
|
||||
* endpoints will be resumed as a result. This reference will
|
||||
* be dropped when we get a power management suspend request.
|
||||
/* Just report the event, and let system resume handle the rest.
|
||||
* More than one endpoint could signal this; if so, ignore
|
||||
* all but the first.
|
||||
*/
|
||||
if (!atomic_xchg(&ipa->suspend_ref, 1))
|
||||
ipa_clock_get(ipa);
|
||||
if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
|
||||
pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
|
||||
|
||||
/* Acknowledge/clear the suspend interrupt on all endpoints */
|
||||
ipa_interrupt_suspend_clear_all(ipa->interrupt);
|
||||
|
@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa)
|
|||
{
|
||||
struct ipa_endpoint *exception_endpoint;
|
||||
struct ipa_endpoint *command_endpoint;
|
||||
struct device *dev = &ipa->pdev->dev;
|
||||
int ret;
|
||||
|
||||
/* Setup for IPA v3.5.1 has some slight differences */
|
||||
|
@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa)
|
|||
|
||||
ipa_uc_setup(ipa);
|
||||
|
||||
ret = device_init_wakeup(dev, true);
|
||||
if (ret)
|
||||
goto err_uc_teardown;
|
||||
|
||||
ipa_endpoint_setup(ipa);
|
||||
|
||||
/* We need to use the AP command TX endpoint to perform other
|
||||
|
@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa)
|
|||
|
||||
ipa->setup_complete = true;
|
||||
|
||||
dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
|
||||
dev_info(dev, "IPA driver setup completed successfully\n");
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -173,6 +180,8 @@ err_command_disable:
|
|||
ipa_endpoint_disable_one(command_endpoint);
|
||||
err_endpoint_teardown:
|
||||
ipa_endpoint_teardown(ipa);
|
||||
(void)device_init_wakeup(dev, false);
|
||||
err_uc_teardown:
|
||||
ipa_uc_teardown(ipa);
|
||||
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
|
||||
ipa_interrupt_teardown(ipa->interrupt);
|
||||
|
@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa)
|
|||
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
|
||||
ipa_endpoint_disable_one(command_endpoint);
|
||||
ipa_endpoint_teardown(ipa);
|
||||
(void)device_init_wakeup(&ipa->pdev->dev, false);
|
||||
ipa_uc_teardown(ipa);
|
||||
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
|
||||
ipa_interrupt_teardown(ipa->interrupt);
|
||||
|
@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
|
|||
* is held after initialization completes, and won't get dropped
|
||||
* unless/until a system suspend request arrives.
|
||||
*/
|
||||
atomic_set(&ipa->suspend_ref, 1);
|
||||
ipa_clock_get(ipa);
|
||||
|
||||
ipa_hardware_config(ipa);
|
||||
|
@ -544,7 +553,6 @@ err_endpoint_deconfig:
|
|||
err_hardware_deconfig:
|
||||
ipa_hardware_deconfig(ipa);
|
||||
ipa_clock_put(ipa);
|
||||
atomic_set(&ipa->suspend_ref, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa)
|
|||
ipa_endpoint_deconfig(ipa);
|
||||
ipa_hardware_deconfig(ipa);
|
||||
ipa_clock_put(ipa);
|
||||
atomic_set(&ipa->suspend_ref, 0);
|
||||
}
|
||||
|
||||
static int ipa_firmware_load(struct device *dev)
|
||||
|
@ -709,7 +716,6 @@ static void ipa_validate_build(void)
|
|||
*/
|
||||
static int ipa_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct wakeup_source *wakeup_source;
|
||||
struct device *dev = &pdev->dev;
|
||||
const struct ipa_data *data;
|
||||
struct ipa_clock *clock;
|
||||
|
@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev)
|
|||
goto err_clock_exit;
|
||||
}
|
||||
|
||||
/* Create a wakeup source. */
|
||||
wakeup_source = wakeup_source_register(dev, "ipa");
|
||||
if (!wakeup_source) {
|
||||
/* The most likely reason for failure is memory exhaustion */
|
||||
ret = -ENOMEM;
|
||||
goto err_clock_exit;
|
||||
}
|
||||
|
||||
/* Allocate and initialize the IPA structure */
|
||||
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
|
||||
if (!ipa) {
|
||||
ret = -ENOMEM;
|
||||
goto err_wakeup_source_unregister;
|
||||
goto err_clock_exit;
|
||||
}
|
||||
|
||||
ipa->pdev = pdev;
|
||||
dev_set_drvdata(dev, ipa);
|
||||
ipa->modem_rproc = rproc;
|
||||
ipa->clock = clock;
|
||||
atomic_set(&ipa->suspend_ref, 0);
|
||||
ipa->wakeup_source = wakeup_source;
|
||||
ipa->version = data->version;
|
||||
|
||||
ret = ipa_reg_init(ipa);
|
||||
|
@ -857,8 +853,6 @@ err_reg_exit:
|
|||
ipa_reg_exit(ipa);
|
||||
err_kfree_ipa:
|
||||
kfree(ipa);
|
||||
err_wakeup_source_unregister:
|
||||
wakeup_source_unregister(wakeup_source);
|
||||
err_clock_exit:
|
||||
ipa_clock_exit(clock);
|
||||
err_rproc_put:
|
||||
|
@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev)
|
|||
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
|
||||
struct rproc *rproc = ipa->modem_rproc;
|
||||
struct ipa_clock *clock = ipa->clock;
|
||||
struct wakeup_source *wakeup_source;
|
||||
int ret;
|
||||
|
||||
wakeup_source = ipa->wakeup_source;
|
||||
|
||||
if (ipa->setup_complete) {
|
||||
ret = ipa_modem_stop(ipa);
|
||||
if (ret)
|
||||
|
@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev)
|
|||
ipa_mem_exit(ipa);
|
||||
ipa_reg_exit(ipa);
|
||||
kfree(ipa);
|
||||
wakeup_source_unregister(wakeup_source);
|
||||
ipa_clock_exit(clock);
|
||||
rproc_put(rproc);
|
||||
|
||||
|
@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev)
|
|||
* Return: Always returns zero
|
||||
*
|
||||
* Called by the PM framework when a system suspend operation is invoked.
|
||||
* Suspends endpoints and releases the clock reference held to keep
|
||||
* the IPA clock running until this point.
|
||||
*/
|
||||
static int ipa_suspend(struct device *dev)
|
||||
{
|
||||
struct ipa *ipa = dev_get_drvdata(dev);
|
||||
|
||||
/* When a suspended RX endpoint has a packet ready to receive, we
|
||||
* get an IPA SUSPEND interrupt. We trigger a system resume in
|
||||
* that case, but only on the first such interrupt since suspend.
|
||||
*/
|
||||
__clear_bit(IPA_FLAG_RESUMED, ipa->flags);
|
||||
|
||||
ipa_endpoint_suspend(ipa);
|
||||
|
||||
ipa_clock_put(ipa);
|
||||
atomic_set(&ipa->suspend_ref, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev)
|
|||
* Return: Always returns 0
|
||||
*
|
||||
* Called by the PM framework when a system resume operation is invoked.
|
||||
* Takes an IPA clock reference to keep the clock running until suspend,
|
||||
* and resumes endpoints.
|
||||
*/
|
||||
static int ipa_resume(struct device *dev)
|
||||
{
|
||||
|
@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev)
|
|||
/* This clock reference will keep the IPA out of suspend
|
||||
* until we get a power management suspend request.
|
||||
*/
|
||||
atomic_set(&ipa->suspend_ref, 1);
|
||||
ipa_clock_get(ipa);
|
||||
|
||||
ipa_endpoint_resume(ipa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче