2016-05-03 20:33:50 +03:00
|
|
|
/*
|
|
|
|
* Copyright(C) 2016 Linaro Limited. All rights reserved.
|
|
|
|
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as published by
|
|
|
|
* the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/coresight.h>
|
2016-05-03 20:33:52 +03:00
|
|
|
#include <linux/dma-mapping.h>
|
2016-05-03 20:33:50 +03:00
|
|
|
#include "coresight-priv.h"
|
|
|
|
#include "coresight-tmc.h"
|
|
|
|
|
2016-09-09 01:50:39 +03:00
|
|
|
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
|
2016-05-03 20:33:50 +03:00
|
|
|
{
|
|
|
|
u32 axictl;
|
|
|
|
|
|
|
|
/* Zero out the memory to help with debug */
|
|
|
|
memset(drvdata->vaddr, 0, drvdata->size);
|
|
|
|
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
/* Wait for TMCSReady bit to be set */
|
|
|
|
tmc_wait_for_tmcready(drvdata);
|
|
|
|
|
|
|
|
writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
|
|
|
|
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
|
|
|
|
|
|
|
|
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
|
|
|
|
axictl |= TMC_AXICTL_WR_BURST_16;
|
|
|
|
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
|
|
|
|
axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
|
|
|
|
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
|
|
|
|
axictl = (axictl &
|
|
|
|
~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
|
|
|
|
TMC_AXICTL_PROT_CTL_B1;
|
|
|
|
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
|
|
|
|
|
|
|
|
writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
|
|
|
|
writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
|
|
|
|
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
|
|
|
|
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
|
|
|
|
TMC_FFCR_TRIGON_TRIGIN,
|
|
|
|
drvdata->base + TMC_FFCR);
|
|
|
|
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
|
|
|
|
tmc_enable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
|
|
|
u32 rwp, val;
|
|
|
|
|
|
|
|
rwp = readl_relaxed(drvdata->base + TMC_RWP);
|
|
|
|
val = readl_relaxed(drvdata->base + TMC_STS);
|
|
|
|
|
2016-08-26 00:18:57 +03:00
|
|
|
/*
|
|
|
|
* Adjust the buffer to point to the beginning of the trace data
|
|
|
|
* and update the available trace data.
|
|
|
|
*/
|
2016-08-26 00:18:59 +03:00
|
|
|
if (val & TMC_STS_FULL) {
|
2016-05-03 20:33:50 +03:00
|
|
|
drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
|
2016-08-26 00:18:57 +03:00
|
|
|
drvdata->len = drvdata->size;
|
|
|
|
} else {
|
2016-05-03 20:33:50 +03:00
|
|
|
drvdata->buf = drvdata->vaddr;
|
2016-08-26 00:18:57 +03:00
|
|
|
drvdata->len = rwp - drvdata->paddr;
|
|
|
|
}
|
2016-05-03 20:33:50 +03:00
|
|
|
}
|
|
|
|
|
2016-05-03 20:33:51 +03:00
|
|
|
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
|
2016-05-03 20:33:50 +03:00
|
|
|
{
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
tmc_flush_and_stop(drvdata);
|
2016-05-03 20:33:55 +03:00
|
|
|
/*
|
|
|
|
* When operating in sysFS mode the content of the buffer needs to be
|
|
|
|
* read before the TMC is disabled.
|
|
|
|
*/
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-03 20:33:55 +03:00
|
|
|
tmc_etr_dump_hw(drvdata);
|
2016-05-03 20:33:50 +03:00
|
|
|
tmc_disable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
2016-11-29 19:47:16 +03:00
|
|
|
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
|
2016-05-03 20:33:50 +03:00
|
|
|
{
|
2016-05-03 20:33:52 +03:00
|
|
|
int ret = 0;
|
|
|
|
bool used = false;
|
2016-05-03 20:33:50 +03:00
|
|
|
unsigned long flags;
|
2016-05-03 20:33:52 +03:00
|
|
|
void __iomem *vaddr = NULL;
|
|
|
|
dma_addr_t paddr;
|
2016-05-03 20:33:50 +03:00
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
2016-05-03 20:33:52 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't have a buffer release the lock and allocate memory.
|
|
|
|
* Otherwise keep the lock and move along.
|
|
|
|
*/
|
2016-05-03 20:33:50 +03:00
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2016-05-03 20:33:52 +03:00
|
|
|
if (!drvdata->vaddr) {
|
2016-05-03 20:33:50 +03:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
2016-05-03 20:33:52 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Contiguous memory can't be allocated while a spinlock is
|
|
|
|
* held. As such allocate memory here and free it if a buffer
|
|
|
|
* has already been allocated (from a previous session).
|
|
|
|
*/
|
|
|
|
vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
|
|
|
|
&paddr, GFP_KERNEL);
|
|
|
|
if (!vaddr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Let's try again */
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-03 20:33:54 +03:00
|
|
|
/*
|
|
|
|
* In sysFS mode we can have multiple writers per sink. Since this
|
|
|
|
* sink is already enabled no memory is needed and the HW need not be
|
|
|
|
* touched.
|
|
|
|
*/
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-03 20:33:54 +03:00
|
|
|
goto out;
|
|
|
|
|
2016-05-03 20:33:52 +03:00
|
|
|
/*
|
|
|
|
* If drvdata::buf == NULL, use the memory allocated above.
|
|
|
|
* Otherwise a buffer still exists from a previous session, so
|
|
|
|
* simply use that.
|
|
|
|
*/
|
|
|
|
if (drvdata->buf == NULL) {
|
|
|
|
used = true;
|
|
|
|
drvdata->vaddr = vaddr;
|
|
|
|
drvdata->paddr = paddr;
|
|
|
|
drvdata->buf = drvdata->vaddr;
|
2016-05-03 20:33:50 +03:00
|
|
|
}
|
|
|
|
|
2016-11-29 19:47:15 +03:00
|
|
|
drvdata->mode = CS_MODE_SYSFS;
|
2016-05-03 20:33:50 +03:00
|
|
|
tmc_etr_enable_hw(drvdata);
|
2016-05-03 20:33:52 +03:00
|
|
|
out:
|
2016-05-03 20:33:50 +03:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-03 20:33:52 +03:00
|
|
|
/* Free memory outside the spinlock if need be */
|
|
|
|
if (!used && vaddr)
|
|
|
|
dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
dev_info(drvdata->dev, "TMC-ETR enabled\n");
|
|
|
|
|
|
|
|
return ret;
|
2016-05-03 20:33:50 +03:00
|
|
|
}
|
|
|
|
|
2016-11-29 19:47:16 +03:00
|
|
|
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
|
2016-05-03 20:33:56 +03:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In Perf mode there can be only one writer per sink. There
|
|
|
|
* is also no need to continue if the ETR is already operated
|
|
|
|
* from sysFS.
|
|
|
|
*/
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode != CS_MODE_DISABLED) {
|
2016-05-03 20:33:56 +03:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-11-29 19:47:15 +03:00
|
|
|
drvdata->mode = CS_MODE_PERF;
|
2016-05-03 20:33:56 +03:00
|
|
|
tmc_etr_enable_hw(drvdata);
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
|
|
|
|
{
|
|
|
|
switch (mode) {
|
|
|
|
case CS_MODE_SYSFS:
|
2016-11-29 19:47:16 +03:00
|
|
|
return tmc_enable_etr_sink_sysfs(csdev);
|
2016-05-03 20:33:56 +03:00
|
|
|
case CS_MODE_PERF:
|
2016-11-29 19:47:16 +03:00
|
|
|
return tmc_enable_etr_sink_perf(csdev);
|
2016-05-03 20:33:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We shouldn't be here */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-05-03 20:33:50 +03:00
|
|
|
static void tmc_disable_etr_sink(struct coresight_device *csdev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
if (drvdata->reading) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-03 20:33:54 +03:00
|
|
|
/* Disable the TMC only if it needs to */
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode != CS_MODE_DISABLED) {
|
2016-05-03 20:33:54 +03:00
|
|
|
tmc_etr_disable_hw(drvdata);
|
2016-11-29 19:47:15 +03:00
|
|
|
drvdata->mode = CS_MODE_DISABLED;
|
|
|
|
}
|
2016-05-03 20:33:54 +03:00
|
|
|
|
2016-05-03 20:33:50 +03:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
dev_info(drvdata->dev, "TMC-ETR disabled\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct coresight_ops_sink tmc_etr_sink_ops = {
|
|
|
|
.enable = tmc_enable_etr_sink,
|
|
|
|
.disable = tmc_disable_etr_sink,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct coresight_ops tmc_etr_cs_ops = {
|
|
|
|
.sink_ops = &tmc_etr_sink_ops,
|
|
|
|
};
|
2016-05-03 20:33:51 +03:00
|
|
|
|
|
|
|
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2016-05-03 20:33:52 +03:00
|
|
|
int ret = 0;
|
2016-05-03 20:33:51 +03:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* config types are set a boot time and never change */
|
|
|
|
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2016-05-03 20:33:53 +03:00
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2016-05-03 20:33:51 +03:00
|
|
|
|
2016-05-03 20:33:56 +03:00
|
|
|
/* Don't interfere if operated from Perf */
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode == CS_MODE_PERF) {
|
2016-05-03 20:33:56 +03:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-03 20:33:52 +03:00
|
|
|
/* If drvdata::buf is NULL the trace data has been read already */
|
|
|
|
if (drvdata->buf == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-03 20:33:51 +03:00
|
|
|
/* Disable the TMC if need be */
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-03 20:33:51 +03:00
|
|
|
tmc_etr_disable_hw(drvdata);
|
|
|
|
|
|
|
|
drvdata->reading = true;
|
2016-05-03 20:33:52 +03:00
|
|
|
out:
|
2016-05-03 20:33:51 +03:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-03 20:33:56 +03:00
|
|
|
return ret;
|
2016-05-03 20:33:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2016-05-03 20:33:52 +03:00
|
|
|
dma_addr_t paddr;
|
|
|
|
void __iomem *vaddr = NULL;
|
2016-05-03 20:33:51 +03:00
|
|
|
|
|
|
|
/* config types are set a boot time and never change */
|
|
|
|
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
/* RE-enable the TMC if need be */
|
2016-11-29 19:47:15 +03:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS) {
|
2016-05-03 20:33:52 +03:00
|
|
|
/*
|
|
|
|
* The trace run will continue with the same allocated trace
|
2016-06-14 20:17:14 +03:00
|
|
|
* buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
|
|
|
|
* so we don't have to explicitly clear it. Also, since the
|
|
|
|
* tracer is still enabled drvdata::buf can't be NULL.
|
2016-05-03 20:33:52 +03:00
|
|
|
*/
|
2016-05-03 20:33:51 +03:00
|
|
|
tmc_etr_enable_hw(drvdata);
|
2016-05-03 20:33:52 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The ETR is not tracing and the buffer was just read.
|
|
|
|
* As such prepare to free the trace buffer.
|
|
|
|
*/
|
|
|
|
vaddr = drvdata->vaddr;
|
|
|
|
paddr = drvdata->paddr;
|
2016-06-14 20:17:13 +03:00
|
|
|
drvdata->buf = drvdata->vaddr = NULL;
|
2016-05-03 20:33:52 +03:00
|
|
|
}
|
2016-05-03 20:33:51 +03:00
|
|
|
|
|
|
|
drvdata->reading = false;
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-03 20:33:52 +03:00
|
|
|
/* Free allocated memory out side of the spinlock */
|
|
|
|
if (vaddr)
|
|
|
|
dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
|
|
|
|
|
2016-05-03 20:33:51 +03:00
|
|
|
return 0;
|
|
|
|
}
|