Merge branches 'clk-tegra', 'clk-imx', 'clk-sifive', 'clk-mediatek' and 'clk-summary' into clk-next
- Support for SiFive FU740 PRCI - Add hardware enable information to clk_summary debugfs * clk-tegra: clk: tegra: Fix duplicated SE clock entry clk: tegra: bpmp: Clamp clock rates on requests clk: tegra: Do not return 0 on failure * clk-imx: (24 commits) clk: imx: scu: remove the calling of device_is_bound clk: imx: scu: Make pd_np with static keyword clk: imx8mq: drop of_match_ptr from of_device_id table clk: imx8mp: drop of_match_ptr from of_device_id table clk: imx8mn: drop of_match_ptr from of_device_id table clk: imx8mm: drop of_match_ptr from of_device_id table clk: imx: gate2: Remove unused variable ret clk: imx: gate2: Add locking in is_enabled op clk: imx: gate2: Add cgr_mask for more flexible number of control bits clk: imx: gate2: Check if clock is enabled against cgr_val clk: imx: gate2: Keep the register writing in on place clk: imx: gate2: Remove the IMX_CLK_GATE2_SINGLE_BIT special case clk: imx: scu: fix build break when compiled as modules clk: imx: remove redundant assignment to pointer np clk: imx: remove unneeded semicolon clk: imx: lpcg: add suspend/resume support clk: imx: clk-imx8qxp-lpcg: add runtime pm support clk: imx: lpcg: allow lpcg clk to take device pointer clk: imx: imx8qxp-lpcg: add parsing clocks from device tree clk: imx: scu: add suspend/resume support ... * clk-sifive: clk: sifive: Add clock enable and disable ops clk: sifive: Fix the wrong bit field shift clk: sifive: Add a driver for the SiFive FU740 PRCI IP block clk: sifive: Use common name for prci configuration clk: sifive: Extract prci core to common base dt-bindings: fu740: prci: add YAML documentation for the FU740 PRCI * clk-mediatek: clk: mediatek: Make mtk_clk_register_mux() a static function * clk-summary: clk: Add hardware-enable column to clk summary
This commit is contained in:
Коммит
699eda2814
|
@ -0,0 +1,60 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
# Copyright (C) 2020 SiFive, Inc.
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/clock/sifive/fu740-prci.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: SiFive FU740 Power Reset Clock Interrupt Controller (PRCI)
|
||||
|
||||
maintainers:
|
||||
- Zong Li <zong.li@sifive.com>
|
||||
- Paul Walmsley <paul.walmsley@sifive.com>
|
||||
|
||||
description:
|
||||
On the FU740 family of SoCs, most system-wide clock and reset integration
|
||||
is via the PRCI IP block.
|
||||
The clock consumer should specify the desired clock via the clock ID
|
||||
macros defined in include/dt-bindings/clock/sifive-fu740-prci.h.
|
||||
These macros begin with PRCI_CLK_.
|
||||
|
||||
The hfclk and rtcclk nodes are required, and represent physical
|
||||
crystals or resonators located on the PCB. These nodes should be present
|
||||
underneath /, rather than /soc.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: sifive,fu740-c000-prci
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: high frequency clock.
|
||||
- description: RTL clock.
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: hfclk
|
||||
- const: rtcclk
|
||||
|
||||
"#clock-cells":
|
||||
const: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- "#clock-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
prci: clock-controller@10000000 {
|
||||
compatible = "sifive,fu740-c000-prci";
|
||||
reg = <0x10000000 0x1000>;
|
||||
clocks = <&hfclk>, <&rtcclk>;
|
||||
#clock-cells = <1>;
|
||||
};
|
|
@ -5,7 +5,7 @@ config SOC_SIFIVE
|
|||
select SERIAL_SIFIVE if TTY
|
||||
select SERIAL_SIFIVE_CONSOLE if TTY
|
||||
select CLK_SIFIVE
|
||||
select CLK_SIFIVE_FU540_PRCI
|
||||
select CLK_SIFIVE_PRCI
|
||||
select SIFIVE_PLIC
|
||||
help
|
||||
This enables support for SiFive SoC platform hardware.
|
||||
|
|
|
@ -2931,7 +2931,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
|
|||
else
|
||||
seq_puts(s, "-----");
|
||||
|
||||
seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
|
||||
seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
|
||||
|
||||
if (c->ops->is_enabled)
|
||||
seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
|
||||
else if (!c->ops->enable)
|
||||
seq_printf(s, " %9c\n", 'Y');
|
||||
else
|
||||
seq_printf(s, " %9c\n", '?');
|
||||
}
|
||||
|
||||
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
|
||||
|
@ -2950,9 +2957,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
|
|||
struct clk_core *c;
|
||||
struct hlist_head **lists = (struct hlist_head **)s->private;
|
||||
|
||||
seq_puts(s, " enable prepare protect duty\n");
|
||||
seq_puts(s, " clock count count count rate accuracy phase cycle\n");
|
||||
seq_puts(s, "---------------------------------------------------------------------------------------------\n");
|
||||
seq_puts(s, " enable prepare protect duty hardware\n");
|
||||
seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
|
||||
seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
|
||||
|
||||
clk_prepare_lock();
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ struct clk_gate2 {
|
|||
void __iomem *reg;
|
||||
u8 bit_idx;
|
||||
u8 cgr_val;
|
||||
u8 cgr_mask;
|
||||
u8 flags;
|
||||
spinlock_t *lock;
|
||||
unsigned int *share_count;
|
||||
|
@ -37,37 +38,38 @@ struct clk_gate2 {
|
|||
|
||||
#define to_clk_gate2(_hw) container_of(_hw, struct clk_gate2, hw)
|
||||
|
||||
static int clk_gate2_enable(struct clk_hw *hw)
|
||||
static void clk_gate2_do_shared_clks(struct clk_hw *hw, bool enable)
|
||||
{
|
||||
struct clk_gate2 *gate = to_clk_gate2(hw);
|
||||
u32 reg;
|
||||
|
||||
reg = readl(gate->reg);
|
||||
reg &= ~(gate->cgr_mask << gate->bit_idx);
|
||||
if (enable)
|
||||
reg |= (gate->cgr_val & gate->cgr_mask) << gate->bit_idx;
|
||||
writel(reg, gate->reg);
|
||||
}
|
||||
|
||||
static int clk_gate2_enable(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gate2 *gate = to_clk_gate2(hw);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(gate->lock, flags);
|
||||
|
||||
if (gate->share_count && (*gate->share_count)++ > 0)
|
||||
goto out;
|
||||
|
||||
if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
|
||||
ret = clk_gate_ops.enable(hw);
|
||||
} else {
|
||||
reg = readl(gate->reg);
|
||||
reg &= ~(3 << gate->bit_idx);
|
||||
reg |= gate->cgr_val << gate->bit_idx;
|
||||
writel(reg, gate->reg);
|
||||
}
|
||||
|
||||
clk_gate2_do_shared_clks(hw, true);
|
||||
out:
|
||||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clk_gate2_disable(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gate2 *gate = to_clk_gate2(hw);
|
||||
u32 reg;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(gate->lock, flags);
|
||||
|
@ -79,23 +81,17 @@ static void clk_gate2_disable(struct clk_hw *hw)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
|
||||
clk_gate_ops.disable(hw);
|
||||
} else {
|
||||
reg = readl(gate->reg);
|
||||
reg &= ~(3 << gate->bit_idx);
|
||||
writel(reg, gate->reg);
|
||||
}
|
||||
|
||||
clk_gate2_do_shared_clks(hw, false);
|
||||
out:
|
||||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
}
|
||||
|
||||
static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
|
||||
static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx,
|
||||
u8 cgr_val, u8 cgr_mask)
|
||||
{
|
||||
u32 val = readl(reg);
|
||||
|
||||
if (((val >> bit_idx) & 1) == 1)
|
||||
if (((val >> bit_idx) & cgr_mask) == cgr_val)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -104,29 +100,28 @@ static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
|
|||
static int clk_gate2_is_enabled(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gate2 *gate = to_clk_gate2(hw);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
|
||||
return clk_gate_ops.is_enabled(hw);
|
||||
spin_lock_irqsave(gate->lock, flags);
|
||||
|
||||
return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
|
||||
ret = clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx,
|
||||
gate->cgr_val, gate->cgr_mask);
|
||||
|
||||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void clk_gate2_disable_unused(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gate2 *gate = to_clk_gate2(hw);
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(gate->lock, flags);
|
||||
|
||||
if (!gate->share_count || *gate->share_count == 0) {
|
||||
reg = readl(gate->reg);
|
||||
reg &= ~(3 << gate->bit_idx);
|
||||
writel(reg, gate->reg);
|
||||
}
|
||||
if (!gate->share_count || *gate->share_count == 0)
|
||||
clk_gate2_do_shared_clks(hw, false);
|
||||
|
||||
spin_unlock_irqrestore(gate->lock, flags);
|
||||
}
|
||||
|
@ -140,7 +135,7 @@ static const struct clk_ops clk_gate2_ops = {
|
|||
|
||||
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned long flags,
|
||||
void __iomem *reg, u8 bit_idx, u8 cgr_val,
|
||||
void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 cgr_mask,
|
||||
u8 clk_gate2_flags, spinlock_t *lock,
|
||||
unsigned int *share_count)
|
||||
{
|
||||
|
@ -157,6 +152,7 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
|
|||
gate->reg = reg;
|
||||
gate->bit_idx = bit_idx;
|
||||
gate->cgr_val = cgr_val;
|
||||
gate->cgr_mask = cgr_mask;
|
||||
gate->flags = clk_gate2_flags;
|
||||
gate->lock = lock;
|
||||
gate->share_count = share_count;
|
||||
|
|
|
@ -653,7 +653,7 @@ static struct platform_driver imx8mm_clk_driver = {
|
|||
* reloading the driver will crash or break devices.
|
||||
*/
|
||||
.suppress_bind_attrs = true,
|
||||
.of_match_table = of_match_ptr(imx8mm_clk_of_match),
|
||||
.of_match_table = imx8mm_clk_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(imx8mm_clk_driver);
|
||||
|
|
|
@ -604,7 +604,7 @@ static struct platform_driver imx8mn_clk_driver = {
|
|||
* reloading the driver will crash or break devices.
|
||||
*/
|
||||
.suppress_bind_attrs = true,
|
||||
.of_match_table = of_match_ptr(imx8mn_clk_of_match),
|
||||
.of_match_table = imx8mn_clk_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(imx8mn_clk_driver);
|
||||
|
|
|
@ -425,7 +425,7 @@ static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
|
|||
static int imx8mp_clocks_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct device_node *np;
|
||||
void __iomem *anatop_base, *ccm_base;
|
||||
int i;
|
||||
|
||||
|
@ -763,7 +763,7 @@ static struct platform_driver imx8mp_clk_driver = {
|
|||
* reloading the driver will crash or break devices.
|
||||
*/
|
||||
.suppress_bind_attrs = true,
|
||||
.of_match_table = of_match_ptr(imx8mp_clk_of_match),
|
||||
.of_match_table = imx8mp_clk_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(imx8mp_clk_driver);
|
||||
|
|
|
@ -639,7 +639,7 @@ static struct platform_driver imx8mq_clk_driver = {
|
|||
* reloading the driver will crash or break devices.
|
||||
*/
|
||||
.suppress_bind_attrs = true,
|
||||
.of_match_table = of_match_ptr(imx8mq_clk_of_match),
|
||||
.of_match_table = imx8mq_clk_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(imx8mq_clk_driver);
|
||||
|
|
|
@ -9,8 +9,10 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "clk-scu.h"
|
||||
|
@ -157,6 +159,135 @@ static const struct imx8qxp_ss_lpcg imx8qxp_ss_lsio = {
|
|||
.num_max = IMX_LSIO_LPCG_CLK_END,
|
||||
};
|
||||
|
||||
#define IMX_LPCG_MAX_CLKS 8
|
||||
|
||||
static struct clk_hw *imx_lpcg_of_clk_src_get(struct of_phandle_args *clkspec,
|
||||
void *data)
|
||||
{
|
||||
struct clk_hw_onecell_data *hw_data = data;
|
||||
unsigned int idx = clkspec->args[0] / 4;
|
||||
|
||||
if (idx >= hw_data->num) {
|
||||
pr_err("%s: invalid index %u\n", __func__, idx);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return hw_data->hws[idx];
|
||||
}
|
||||
|
||||
static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
|
||||
struct device_node *np)
|
||||
{
|
||||
const char *output_names[IMX_LPCG_MAX_CLKS];
|
||||
const char *parent_names[IMX_LPCG_MAX_CLKS];
|
||||
unsigned int bit_offset[IMX_LPCG_MAX_CLKS];
|
||||
struct clk_hw_onecell_data *clk_data;
|
||||
struct clk_hw **clk_hws;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
int count;
|
||||
int idx;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (!of_device_is_compatible(np, "fsl,imx8qxp-lpcg"))
|
||||
return -EINVAL;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
count = of_property_count_u32_elems(np, "clock-indices");
|
||||
if (count < 0) {
|
||||
dev_err(&pdev->dev, "failed to count clocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* A trick here is that we set the num of clks to the MAX instead
|
||||
* of the count from clock-indices because one LPCG supports up to
|
||||
* 8 clock outputs which each of them is fixed to 4 bits. Then we can
|
||||
* easily get the clock by clk-indices (bit-offset) / 4.
|
||||
* And the cost is very limited few pointers.
|
||||
*/
|
||||
|
||||
clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws,
|
||||
IMX_LPCG_MAX_CLKS), GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
return -ENOMEM;
|
||||
|
||||
clk_data->num = IMX_LPCG_MAX_CLKS;
|
||||
clk_hws = clk_data->hws;
|
||||
|
||||
ret = of_property_read_u32_array(np, "clock-indices", bit_offset,
|
||||
count);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to read clock-indices\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_clk_parent_fill(np, parent_names, count);
|
||||
if (ret != count) {
|
||||
dev_err(&pdev->dev, "failed to get clock parent names\n");
|
||||
return count;
|
||||
}
|
||||
|
||||
ret = of_property_read_string_array(np, "clock-output-names",
|
||||
output_names, count);
|
||||
if (ret != count) {
|
||||
dev_err(&pdev->dev, "failed to read clock-output-names\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
idx = bit_offset[i] / 4;
|
||||
if (idx > IMX_LPCG_MAX_CLKS) {
|
||||
dev_warn(&pdev->dev, "invalid bit offset of clock %d\n",
|
||||
i);
|
||||
ret = -EINVAL;
|
||||
goto unreg;
|
||||
}
|
||||
|
||||
clk_hws[idx] = imx_clk_lpcg_scu_dev(&pdev->dev, output_names[i],
|
||||
parent_names[i], 0, base,
|
||||
bit_offset[i], false);
|
||||
if (IS_ERR(clk_hws[idx])) {
|
||||
dev_warn(&pdev->dev, "failed to register clock %d\n",
|
||||
idx);
|
||||
ret = PTR_ERR(clk_hws[idx]);
|
||||
goto unreg;
|
||||
}
|
||||
}
|
||||
|
||||
ret = devm_of_clk_add_hw_provider(&pdev->dev, imx_lpcg_of_clk_src_get,
|
||||
clk_data);
|
||||
if (ret)
|
||||
goto unreg;
|
||||
|
||||
pm_runtime_mark_last_busy(&pdev->dev);
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
unreg:
|
||||
while (--i >= 0) {
|
||||
idx = bit_offset[i] / 4;
|
||||
if (clk_hws[idx])
|
||||
imx_clk_lpcg_scu_unregister(clk_hws[idx]);
|
||||
}
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -167,8 +298,14 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
|
|||
struct resource *res;
|
||||
struct clk_hw **clks;
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* try new binding to parse clocks from device tree first */
|
||||
ret = imx_lpcg_parse_clks_from_dt(pdev, np);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ss_lpcg = of_device_get_match_data(dev);
|
||||
if (!ss_lpcg)
|
||||
return -ENODEV;
|
||||
|
@ -219,6 +356,7 @@ static const struct of_device_id imx8qxp_lpcg_match[] = {
|
|||
{ .compatible = "fsl,imx8qxp-lpcg-adma", &imx8qxp_ss_adma, },
|
||||
{ .compatible = "fsl,imx8qxp-lpcg-conn", &imx8qxp_ss_conn, },
|
||||
{ .compatible = "fsl,imx8qxp-lpcg-lsio", &imx8qxp_ss_lsio, },
|
||||
{ .compatible = "fsl,imx8qxp-lpcg", NULL },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
|
@ -226,6 +364,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
|
|||
.driver = {
|
||||
.name = "imx8qxp-lpcg-clk",
|
||||
.of_match_table = imx8qxp_lpcg_match,
|
||||
.pm = &imx_clk_lpcg_scu_pm_ops,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = imx8qxp_lpcg_clk_probe,
|
||||
|
|
|
@ -22,9 +22,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
|
|||
struct device_node *ccm_node = pdev->dev.of_node;
|
||||
struct clk_hw_onecell_data *clk_data;
|
||||
struct clk_hw **clks;
|
||||
u32 clk_cells;
|
||||
int ret, i;
|
||||
|
||||
ret = imx_clk_scu_init();
|
||||
ret = imx_clk_scu_init(ccm_node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -33,6 +34,9 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
|
|||
if (!clk_data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (of_property_read_u32(ccm_node, "#clock-cells", &clk_cells))
|
||||
return -EINVAL;
|
||||
|
||||
clk_data->num = IMX_SCU_CLK_END;
|
||||
clks = clk_data->hws;
|
||||
|
||||
|
@ -55,78 +59,78 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
|
|||
clks[IMX_LSIO_BUS_CLK] = clk_hw_register_fixed_rate(NULL, "lsio_bus_clk_root", NULL, 0, 100000000);
|
||||
|
||||
/* ARM core */
|
||||
clks[IMX_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU);
|
||||
clks[IMX_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU, clk_cells);
|
||||
|
||||
/* LSIO SS */
|
||||
clks[IMX_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
|
||||
/* ADMA SS */
|
||||
clks[IMX_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
|
||||
/* Connectivity */
|
||||
clks[IMX_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS);
|
||||
clks[IMX_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0);
|
||||
clks[IMX_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS);
|
||||
clks[IMX_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0);
|
||||
clks[IMX_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS);
|
||||
clks[IMX_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS);
|
||||
clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
|
||||
clks[IMX_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS, clk_cells);
|
||||
clks[IMX_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0, clk_cells);
|
||||
clks[IMX_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS, clk_cells);
|
||||
clks[IMX_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0, clk_cells);
|
||||
clks[IMX_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS, clk_cells);
|
||||
clks[IMX_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS, clk_cells);
|
||||
clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC, clk_cells);
|
||||
|
||||
/* Display controller SS */
|
||||
clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
|
||||
clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
|
||||
clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
|
||||
clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
|
||||
|
||||
/* MIPI-LVDS SS */
|
||||
clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
|
||||
clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
|
||||
clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
|
||||
clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
|
||||
|
||||
/* MIPI CSI SS */
|
||||
clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC);
|
||||
clks[IMX_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC, clk_cells);
|
||||
clks[IMX_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
|
||||
/* GPU SS */
|
||||
clks[IMX_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER);
|
||||
clks[IMX_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC);
|
||||
clks[IMX_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER, clk_cells);
|
||||
clks[IMX_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC, clk_cells);
|
||||
|
||||
for (i = 0; i < clk_data->num; i++) {
|
||||
if (IS_ERR(clks[i]))
|
||||
|
@ -134,7 +138,19 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
|
|||
i, PTR_ERR(clks[i]));
|
||||
}
|
||||
|
||||
return of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
|
||||
if (clk_cells == 2) {
|
||||
ret = of_clk_add_hw_provider(ccm_node, imx_scu_of_clk_src_get, imx_scu_clks);
|
||||
if (ret)
|
||||
imx_clk_scu_unregister();
|
||||
} else {
|
||||
/*
|
||||
* legacy binding code path doesn't unregister here because
|
||||
* it will be removed later.
|
||||
*/
|
||||
ret = of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id imx8qxp_match[] = {
|
||||
|
|
|
@ -34,6 +34,9 @@ struct clk_lpcg_scu {
|
|||
void __iomem *reg;
|
||||
u8 bit_idx;
|
||||
bool hw_gate;
|
||||
|
||||
/* for state save&restore */
|
||||
u32 state;
|
||||
};
|
||||
|
||||
#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
|
||||
|
@ -81,9 +84,9 @@ static const struct clk_ops clk_lpcg_scu_ops = {
|
|||
.disable = clk_lpcg_scu_disable,
|
||||
};
|
||||
|
||||
struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
|
||||
unsigned long flags, void __iomem *reg,
|
||||
u8 bit_idx, bool hw_gate)
|
||||
struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned long flags,
|
||||
void __iomem *reg, u8 bit_idx, bool hw_gate)
|
||||
{
|
||||
struct clk_lpcg_scu *clk;
|
||||
struct clk_init_data init;
|
||||
|
@ -107,11 +110,53 @@ struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
|
|||
clk->hw.init = &init;
|
||||
|
||||
hw = &clk->hw;
|
||||
ret = clk_hw_register(NULL, hw);
|
||||
ret = clk_hw_register(dev, hw);
|
||||
if (ret) {
|
||||
kfree(clk);
|
||||
hw = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (dev)
|
||||
dev_set_drvdata(dev, clk);
|
||||
|
||||
return hw;
|
||||
}
|
||||
|
||||
void imx_clk_lpcg_scu_unregister(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
|
||||
|
||||
clk_hw_unregister(&clk->hw);
|
||||
kfree(clk);
|
||||
}
|
||||
|
||||
static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
|
||||
{
|
||||
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
|
||||
|
||||
clk->state = readl_relaxed(clk->reg);
|
||||
dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
|
||||
{
|
||||
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
|
||||
|
||||
/*
|
||||
* FIXME: Sometimes writes don't work unless the CPU issues
|
||||
* them twice
|
||||
*/
|
||||
|
||||
writel(clk->state, clk->reg);
|
||||
writel(clk->state, clk->reg);
|
||||
dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_lpcg_scu_suspend,
|
||||
imx_clk_lpcg_scu_resume)
|
||||
};
|
||||
|
|
|
@ -416,7 +416,7 @@ struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
|
|||
__func__, name);
|
||||
kfree(pll);
|
||||
return ERR_PTR(-EINVAL);
|
||||
};
|
||||
}
|
||||
|
||||
pll->base = base;
|
||||
pll->hw.init = &init;
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
#include <linux/arm-smccc.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "clk-scu.h"
|
||||
|
@ -16,6 +20,21 @@
|
|||
#define IMX_SIP_SET_CPUFREQ 0x00
|
||||
|
||||
static struct imx_sc_ipc *ccm_ipc_handle;
|
||||
static struct device_node *pd_np;
|
||||
static struct platform_driver imx_clk_scu_driver;
|
||||
|
||||
struct imx_scu_clk_node {
|
||||
const char *name;
|
||||
u32 rsrc;
|
||||
u8 clk_type;
|
||||
const char * const *parents;
|
||||
int num_parents;
|
||||
|
||||
struct clk_hw *hw;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct list_head imx_scu_clks[IMX_SC_R_LAST];
|
||||
|
||||
/*
|
||||
* struct clk_scu - Description of one SCU clock
|
||||
|
@ -27,6 +46,10 @@ struct clk_scu {
|
|||
struct clk_hw hw;
|
||||
u16 rsrc_id;
|
||||
u8 clk_type;
|
||||
|
||||
/* for state save&restore */
|
||||
bool is_enabled;
|
||||
u32 rate;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -128,9 +151,28 @@ static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
|
|||
return container_of(hw, struct clk_scu, hw);
|
||||
}
|
||||
|
||||
int imx_clk_scu_init(void)
|
||||
int imx_clk_scu_init(struct device_node *np)
|
||||
{
|
||||
return imx_scu_get_handle(&ccm_ipc_handle);
|
||||
u32 clk_cells;
|
||||
int ret, i;
|
||||
|
||||
ret = imx_scu_get_handle(&ccm_ipc_handle);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
of_property_read_u32(np, "#clock-cells", &clk_cells);
|
||||
|
||||
if (clk_cells == 2) {
|
||||
for (i = 0; i < IMX_SC_R_LAST; i++)
|
||||
INIT_LIST_HEAD(&imx_scu_clks[i]);
|
||||
|
||||
/* pd_np will be used to attach power domains later */
|
||||
pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
|
||||
if (!pd_np)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return platform_driver_register(&imx_clk_scu_driver);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -344,8 +386,9 @@ static const struct clk_ops clk_scu_cpu_ops = {
|
|||
.unprepare = clk_scu_unprepare,
|
||||
};
|
||||
|
||||
struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
|
||||
int num_parents, u32 rsrc_id, u8 clk_type)
|
||||
struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
|
||||
const char * const *parents, int num_parents,
|
||||
u32 rsrc_id, u8 clk_type)
|
||||
{
|
||||
struct clk_init_data init;
|
||||
struct clk_scu *clk;
|
||||
|
@ -379,11 +422,185 @@ struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
|
|||
clk->hw.init = &init;
|
||||
|
||||
hw = &clk->hw;
|
||||
ret = clk_hw_register(NULL, hw);
|
||||
ret = clk_hw_register(dev, hw);
|
||||
if (ret) {
|
||||
kfree(clk);
|
||||
hw = ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (dev)
|
||||
dev_set_drvdata(dev, clk);
|
||||
|
||||
return hw;
|
||||
}
|
||||
|
||||
struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
|
||||
void *data)
|
||||
{
|
||||
unsigned int rsrc = clkspec->args[0];
|
||||
unsigned int idx = clkspec->args[1];
|
||||
struct list_head *scu_clks = data;
|
||||
struct imx_scu_clk_node *clk;
|
||||
|
||||
list_for_each_entry(clk, &scu_clks[rsrc], node) {
|
||||
if (clk->clk_type == idx)
|
||||
return clk->hw;
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static int imx_clk_scu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct imx_scu_clk_node *clk = dev_get_platdata(dev);
|
||||
struct clk_hw *hw;
|
||||
int ret;
|
||||
|
||||
pm_runtime_set_suspended(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, 50);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret) {
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
|
||||
clk->rsrc, clk->clk_type);
|
||||
if (IS_ERR(hw)) {
|
||||
pm_runtime_disable(dev);
|
||||
return PTR_ERR(hw);
|
||||
}
|
||||
|
||||
clk->hw = hw;
|
||||
list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
|
||||
|
||||
pm_runtime_mark_last_busy(&pdev->dev);
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
||||
dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
|
||||
clk->clk_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
|
||||
{
|
||||
struct clk_scu *clk = dev_get_drvdata(dev);
|
||||
|
||||
clk->rate = clk_hw_get_rate(&clk->hw);
|
||||
clk->is_enabled = clk_hw_is_enabled(&clk->hw);
|
||||
|
||||
if (clk->rate)
|
||||
dev_dbg(dev, "save rate %d\n", clk->rate);
|
||||
|
||||
if (clk->is_enabled)
|
||||
dev_dbg(dev, "save enabled state\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused imx_clk_scu_resume(struct device *dev)
|
||||
{
|
||||
struct clk_scu *clk = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
if (clk->rate) {
|
||||
ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
|
||||
dev_dbg(dev, "restore rate %d %s\n", clk->rate,
|
||||
!ret ? "success" : "failed");
|
||||
}
|
||||
|
||||
if (clk->is_enabled) {
|
||||
ret = clk_scu_prepare(&clk->hw);
|
||||
dev_dbg(dev, "restore enabled state %s\n",
|
||||
!ret ? "success" : "failed");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops imx_clk_scu_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
|
||||
imx_clk_scu_resume)
|
||||
};
|
||||
|
||||
static struct platform_driver imx_clk_scu_driver = {
|
||||
.driver = {
|
||||
.name = "imx-scu-clk",
|
||||
.suppress_bind_attrs = true,
|
||||
.pm = &imx_clk_scu_pm_ops,
|
||||
},
|
||||
.probe = imx_clk_scu_probe,
|
||||
};
|
||||
|
||||
static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
|
||||
{
|
||||
struct of_phandle_args genpdspec = {
|
||||
.np = pd_np,
|
||||
.args_count = 1,
|
||||
.args[0] = rsrc_id,
|
||||
};
|
||||
|
||||
if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
|
||||
rsrc_id == IMX_SC_R_A72)
|
||||
return 0;
|
||||
|
||||
return of_genpd_add_device(&genpdspec, dev);
|
||||
}
|
||||
|
||||
struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
|
||||
const char * const *parents,
|
||||
int num_parents, u32 rsrc_id, u8 clk_type)
|
||||
{
|
||||
struct imx_scu_clk_node clk = {
|
||||
.name = name,
|
||||
.rsrc = rsrc_id,
|
||||
.clk_type = clk_type,
|
||||
.parents = parents,
|
||||
.num_parents = num_parents,
|
||||
};
|
||||
struct platform_device *pdev;
|
||||
int ret;
|
||||
|
||||
pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
|
||||
if (!pdev) {
|
||||
pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
|
||||
name, rsrc_id, clk_type);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = platform_device_add_data(pdev, &clk, sizeof(clk));
|
||||
if (ret) {
|
||||
platform_device_put(pdev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
pdev->driver_override = "imx-scu-clk";
|
||||
|
||||
ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
|
||||
if (ret)
|
||||
pr_warn("%s: failed to attached the power domain %d\n",
|
||||
name, ret);
|
||||
|
||||
platform_device_add(pdev);
|
||||
|
||||
/* For API backwards compatiblilty, simply return NULL for success */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void imx_clk_scu_unregister(void)
|
||||
{
|
||||
struct imx_scu_clk_node *clk;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IMX_SC_R_LAST; i++) {
|
||||
list_for_each_entry(clk, &imx_scu_clks[i], node) {
|
||||
clk_hw_unregister(clk->hw);
|
||||
kfree(clk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,25 +8,61 @@
|
|||
#define __IMX_CLK_SCU_H
|
||||
|
||||
#include <linux/firmware/imx/sci.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
int imx_clk_scu_init(void);
|
||||
extern struct list_head imx_scu_clks[];
|
||||
extern const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops;
|
||||
|
||||
struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
|
||||
int num_parents, u32 rsrc_id, u8 clk_type);
|
||||
int imx_clk_scu_init(struct device_node *np);
|
||||
struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
|
||||
void *data);
|
||||
struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
|
||||
const char * const *parents,
|
||||
int num_parents, u32 rsrc_id, u8 clk_type);
|
||||
|
||||
struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
|
||||
const char * const *parents, int num_parents,
|
||||
u32 rsrc_id, u8 clk_type);
|
||||
|
||||
void imx_clk_scu_unregister(void);
|
||||
|
||||
struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned long flags,
|
||||
void __iomem *reg, u8 bit_idx, bool hw_gate);
|
||||
void imx_clk_lpcg_scu_unregister(struct clk_hw *hw);
|
||||
|
||||
static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id,
|
||||
u8 clk_type)
|
||||
u8 clk_type, u8 clk_cells)
|
||||
{
|
||||
return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type);
|
||||
if (clk_cells == 2)
|
||||
return imx_clk_scu_alloc_dev(name, NULL, 0, rsrc_id, clk_type);
|
||||
else
|
||||
return __imx_clk_scu(NULL, name, NULL, 0, rsrc_id, clk_type);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents,
|
||||
int num_parents, u32 rsrc_id, u8 clk_type)
|
||||
int num_parents, u32 rsrc_id, u8 clk_type,
|
||||
u8 clk_cells)
|
||||
{
|
||||
return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type);
|
||||
if (clk_cells == 2)
|
||||
return imx_clk_scu_alloc_dev(name, parents, num_parents, rsrc_id, clk_type);
|
||||
else
|
||||
return __imx_clk_scu(NULL, name, parents, num_parents, rsrc_id, clk_type);
|
||||
}
|
||||
|
||||
struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
|
||||
unsigned long flags, void __iomem *reg,
|
||||
u8 bit_idx, bool hw_gate);
|
||||
static inline struct clk_hw *imx_clk_lpcg_scu_dev(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned long flags,
|
||||
void __iomem *reg, u8 bit_idx, bool hw_gate)
|
||||
{
|
||||
return __imx_clk_lpcg_scu(dev, name, parent_name, flags, reg,
|
||||
bit_idx, hw_gate);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
|
||||
unsigned long flags, void __iomem *reg,
|
||||
u8 bit_idx, bool hw_gate)
|
||||
{
|
||||
return __imx_clk_lpcg_scu(NULL, name, parent_name, flags, reg,
|
||||
bit_idx, hw_gate);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/clk-provider.h>
|
||||
|
||||
#define IMX_CLK_GATE2_SINGLE_BIT 1
|
||||
|
||||
extern spinlock_t imx_ccm_lock;
|
||||
|
||||
void imx_check_clocks(struct clk *clks[], unsigned int count);
|
||||
|
@ -68,9 +66,9 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
|
|||
to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
|
||||
|
||||
#define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
|
||||
cgr_val, clk_gate_flags, lock, share_count) \
|
||||
cgr_val, cgr_mask, clk_gate_flags, lock, share_count) \
|
||||
to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
|
||||
cgr_val, clk_gate_flags, lock, share_count))
|
||||
cgr_val, cgr_mask, clk_gate_flags, lock, share_count))
|
||||
|
||||
#define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
|
||||
to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
|
||||
|
@ -198,7 +196,7 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
|
|||
|
||||
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
|
||||
const char *parent_name, unsigned long flags,
|
||||
void __iomem *reg, u8 bit_idx, u8 cgr_val,
|
||||
void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 cgr_mask,
|
||||
u8 clk_gate_flags, spinlock_t *lock,
|
||||
unsigned int *share_count);
|
||||
|
||||
|
@ -351,14 +349,14 @@ static inline struct clk_hw *imx_clk_hw_gate2(const char *name, const char *pare
|
|||
void __iomem *reg, u8 shift)
|
||||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
|
||||
shift, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_hw_gate2_flags(const char *name, const char *parent,
|
||||
void __iomem *reg, u8 shift, unsigned long flags)
|
||||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
|
||||
shift, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
|
||||
|
@ -366,7 +364,7 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
|
|||
unsigned int *share_count)
|
||||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
|
||||
shift, 0x3, 0, &imx_ccm_lock, share_count);
|
||||
shift, 0x3, 0x3, 0, &imx_ccm_lock, share_count);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
|
||||
|
@ -374,7 +372,7 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
|
|||
unsigned int *share_count)
|
||||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
|
||||
CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0,
|
||||
CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0x3, 0,
|
||||
&imx_ccm_lock, share_count);
|
||||
}
|
||||
|
||||
|
@ -384,16 +382,15 @@ static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
|
|||
unsigned int *share_count)
|
||||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
|
||||
CLK_OPS_PARENT_ENABLE, reg, shift, 0x3,
|
||||
IMX_CLK_GATE2_SINGLE_BIT,
|
||||
&imx_ccm_lock, share_count);
|
||||
CLK_OPS_PARENT_ENABLE, reg, shift, 0x1,
|
||||
0x1, 0, &imx_ccm_lock, share_count);
|
||||
}
|
||||
|
||||
static inline struct clk *imx_clk_gate2_cgr(const char *name,
|
||||
const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
|
||||
{
|
||||
return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
|
||||
shift, cgr_val, 0, &imx_ccm_lock, NULL);
|
||||
shift, cgr_val, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *parent,
|
||||
|
@ -421,7 +418,7 @@ static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *pare
|
|||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent,
|
||||
CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
|
||||
reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
}
|
||||
|
||||
static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
|
||||
|
@ -430,7 +427,7 @@ static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
|
|||
{
|
||||
return clk_hw_register_gate2(NULL, name, parent,
|
||||
flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
|
||||
reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
|
||||
}
|
||||
|
||||
#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
|
||||
|
|
|
@ -155,7 +155,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
|
|||
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
|
||||
};
|
||||
|
||||
struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
|
||||
static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
|
||||
struct regmap *regmap,
|
||||
spinlock_t *lock)
|
||||
{
|
||||
|
|
|
@ -77,10 +77,6 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
|
|||
_width, _gate, _upd_ofs, _upd, \
|
||||
CLK_SET_RATE_PARENT)
|
||||
|
||||
struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
|
||||
struct regmap *regmap,
|
||||
spinlock_t *lock);
|
||||
|
||||
int mtk_clk_register_muxes(const struct mtk_mux *muxes,
|
||||
int num, struct device_node *node,
|
||||
spinlock_t *lock,
|
||||
|
|
|
@ -8,12 +8,12 @@ menuconfig CLK_SIFIVE
|
|||
|
||||
if CLK_SIFIVE
|
||||
|
||||
config CLK_SIFIVE_FU540_PRCI
|
||||
bool "PRCI driver for SiFive FU540 SoCs"
|
||||
config CLK_SIFIVE_PRCI
|
||||
bool "PRCI driver for SiFive SoCs"
|
||||
select CLK_ANALOGBITS_WRPLL_CLN28HPC
|
||||
help
|
||||
Supports the Power Reset Clock interface (PRCI) IP block found in
|
||||
FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC,
|
||||
enable this driver.
|
||||
FU540/FU740 SoCs. If this kernel is meant to run on a SiFive FU540/
|
||||
FU740 SoCs, enable this driver.
|
||||
|
||||
endif
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o
|
||||
obj-$(CONFIG_CLK_SIFIVE_PRCI) += sifive-prci.o fu540-prci.o fu740-prci.o
|
||||
|
|
|
@ -1,17 +1,9 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018-2019 SiFive, Inc.
|
||||
* Wesley Terpstra
|
||||
* Paul Walmsley
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
* Copyright (C) 2018-2019 Wesley Terpstra
|
||||
* Copyright (C) 2018-2019 Paul Walmsley
|
||||
* Copyright (C) 2020 Zong Li
|
||||
*
|
||||
* The FU540 PRCI implements clock and reset control for the SiFive
|
||||
* FU540-C000 chip. This driver assumes that it has sole control
|
||||
|
@ -24,464 +16,53 @@
|
|||
* - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
|
||||
*/
|
||||
|
||||
#include <dt-bindings/clock/sifive-fu540-prci.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_clk.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
|
||||
* hfclk and rtcclk
|
||||
*/
|
||||
#define EXPECTED_CLK_PARENT_COUNT 2
|
||||
#include <dt-bindings/clock/sifive-fu540-prci.h>
|
||||
|
||||
/*
|
||||
* Register offsets and bitmasks
|
||||
*/
|
||||
#include "fu540-prci.h"
|
||||
#include "sifive-prci.h"
|
||||
|
||||
/* COREPLLCFG0 */
|
||||
#define PRCI_COREPLLCFG0_OFFSET 0x4
|
||||
# define PRCI_COREPLLCFG0_DIVR_SHIFT 0
|
||||
# define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
|
||||
# define PRCI_COREPLLCFG0_DIVF_SHIFT 6
|
||||
# define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
|
||||
# define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
|
||||
# define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
|
||||
# define PRCI_COREPLLCFG0_RANGE_SHIFT 18
|
||||
# define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
|
||||
# define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
|
||||
# define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
|
||||
# define PRCI_COREPLLCFG0_FSE_SHIFT 25
|
||||
# define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
|
||||
# define PRCI_COREPLLCFG0_LOCK_SHIFT 31
|
||||
# define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* DDRPLLCFG0 */
|
||||
#define PRCI_DDRPLLCFG0_OFFSET 0xc
|
||||
# define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
|
||||
# define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
|
||||
# define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
|
||||
# define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
|
||||
# define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
|
||||
# define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
|
||||
# define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
|
||||
# define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
|
||||
# define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
|
||||
# define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
|
||||
# define PRCI_DDRPLLCFG0_FSE_SHIFT 25
|
||||
# define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
|
||||
# define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
|
||||
# define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* DDRPLLCFG1 */
|
||||
#define PRCI_DDRPLLCFG1_OFFSET 0x10
|
||||
# define PRCI_DDRPLLCFG1_CKE_SHIFT 24
|
||||
# define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* GEMGXLPLLCFG0 */
|
||||
#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
|
||||
# define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
|
||||
# define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
|
||||
# define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
|
||||
# define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
|
||||
# define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
|
||||
# define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
|
||||
# define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
|
||||
# define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
|
||||
# define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
|
||||
# define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
|
||||
# define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
|
||||
# define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
|
||||
# define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
|
||||
# define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* GEMGXLPLLCFG1 */
|
||||
#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
|
||||
# define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 24
|
||||
# define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* CORECLKSEL */
|
||||
#define PRCI_CORECLKSEL_OFFSET 0x24
|
||||
# define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
|
||||
# define PRCI_CORECLKSEL_CORECLKSEL_MASK (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
|
||||
|
||||
/* DEVICESRESETREG */
|
||||
#define PRCI_DEVICESRESETREG_OFFSET 0x28
|
||||
# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
|
||||
# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
|
||||
# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
|
||||
# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
|
||||
# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
|
||||
# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
|
||||
# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
|
||||
# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
|
||||
# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
|
||||
# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
|
||||
|
||||
/* CLKMUXSTATUSREG */
|
||||
#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
|
||||
# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
|
||||
# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
|
||||
|
||||
/*
|
||||
* Private structures
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct __prci_data - per-device-instance data
|
||||
* @va: base virtual address of the PRCI IP block
|
||||
* @hw_clks: encapsulates struct clk_hw records
|
||||
*
|
||||
* PRCI per-device instance data
|
||||
*/
|
||||
struct __prci_data {
|
||||
void __iomem *va;
|
||||
struct clk_hw_onecell_data hw_clks;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct __prci_wrpll_data - WRPLL configuration and integration data
|
||||
* @c: WRPLL current configuration record
|
||||
* @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
|
||||
* @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
|
||||
* @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
|
||||
*
|
||||
* @enable_bypass and @disable_bypass are used for WRPLL instances
|
||||
* that contain a separate external glitchless clock mux downstream
|
||||
* from the PLL. The WRPLL internal bypass mux is not glitchless.
|
||||
*/
|
||||
struct __prci_wrpll_data {
|
||||
struct wrpll_cfg c;
|
||||
void (*enable_bypass)(struct __prci_data *pd);
|
||||
void (*disable_bypass)(struct __prci_data *pd);
|
||||
u8 cfg0_offs;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct __prci_clock - describes a clock device managed by PRCI
|
||||
* @name: user-readable clock name string - should match the manual
|
||||
* @parent_name: parent name for this clock
|
||||
* @ops: struct clk_ops for the Linux clock framework to use for control
|
||||
* @hw: Linux-private clock data
|
||||
* @pwd: WRPLL-specific data, associated with this clock (if not NULL)
|
||||
* @pd: PRCI-specific data associated with this clock (if not NULL)
|
||||
*
|
||||
* PRCI clock data. Used by the PRCI driver to register PRCI-provided
|
||||
* clocks to the Linux clock infrastructure.
|
||||
*/
|
||||
struct __prci_clock {
|
||||
const char *name;
|
||||
const char *parent_name;
|
||||
const struct clk_ops *ops;
|
||||
struct clk_hw hw;
|
||||
struct __prci_wrpll_data *pwd;
|
||||
struct __prci_data *pd;
|
||||
};
|
||||
|
||||
#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
|
||||
|
||||
/*
|
||||
* Private functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* __prci_readl() - read from a PRCI register
|
||||
* @pd: PRCI context
|
||||
* @offs: register offset to read from (in bytes, from PRCI base address)
|
||||
*
|
||||
* Read the register located at offset @offs from the base virtual
|
||||
* address of the PRCI register target described by @pd, and return
|
||||
* the value to the caller.
|
||||
*
|
||||
* Context: Any context.
|
||||
*
|
||||
* Return: the contents of the register described by @pd and @offs.
|
||||
*/
|
||||
static u32 __prci_readl(struct __prci_data *pd, u32 offs)
|
||||
{
|
||||
return readl_relaxed(pd->va + offs);
|
||||
}
|
||||
|
||||
static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
|
||||
{
|
||||
writel_relaxed(v, pd->va + offs);
|
||||
}
|
||||
|
||||
/* WRPLL-related private functions */
|
||||
|
||||
/**
|
||||
* __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
|
||||
* @c: ptr to a struct wrpll_cfg record to write config into
|
||||
* @r: value read from the PRCI PLL configuration register
|
||||
*
|
||||
* Given a value @r read from an FU540 PRCI PLL configuration register,
|
||||
* split it into fields and populate it into the WRPLL configuration record
|
||||
* pointed to by @c.
|
||||
*
|
||||
* The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
|
||||
* have the same register layout.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_DIVR_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
|
||||
c->divr = v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_DIVF_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
|
||||
c->divf = v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
|
||||
c->divq = v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_RANGE_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
|
||||
c->range = v;
|
||||
|
||||
c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
|
||||
WRPLL_FLAGS_EXT_FEEDBACK_MASK);
|
||||
|
||||
/* external feedback mode not supported */
|
||||
c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_pack() - pack PLL configuration parameters into a register value
|
||||
* @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
|
||||
*
|
||||
* Using a set of WRPLL configuration values pointed to by @c,
|
||||
* assemble a PRCI PLL configuration register value, and return it to
|
||||
* the caller.
|
||||
*
|
||||
* Context: Any context. Caller must ensure that the contents of the
|
||||
* record pointed to by @c do not change during the execution
|
||||
* of this function.
|
||||
*
|
||||
* Returns: a value suitable for writing into a PRCI PLL configuration
|
||||
* register
|
||||
*/
|
||||
static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
|
||||
{
|
||||
u32 r = 0;
|
||||
|
||||
r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
|
||||
r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
|
||||
r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
|
||||
r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
|
||||
|
||||
/* external feedback mode not supported */
|
||||
r |= PRCI_COREPLLCFG0_FSE_MASK;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_read_cfg() - read the WRPLL configuration from the PRCI
|
||||
* @pd: PRCI context
|
||||
* @pwd: PRCI WRPLL metadata
|
||||
*
|
||||
* Read the current configuration of the PLL identified by @pwd from
|
||||
* the PRCI identified by @pd, and store it into the local configuration
|
||||
* cache in @pwd.
|
||||
*
|
||||
* Context: Any context. Caller must prevent the records pointed to by
|
||||
* @pd and @pwd from changing during execution.
|
||||
*/
|
||||
static void __prci_wrpll_read_cfg(struct __prci_data *pd,
|
||||
struct __prci_wrpll_data *pwd)
|
||||
{
|
||||
__prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_write_cfg() - write WRPLL configuration into the PRCI
|
||||
* @pd: PRCI context
|
||||
* @pwd: PRCI WRPLL metadata
|
||||
* @c: WRPLL configuration record to write
|
||||
*
|
||||
* Write the WRPLL configuration described by @c into the WRPLL
|
||||
* configuration register identified by @pwd in the PRCI instance
|
||||
* described by @c. Make a cached copy of the WRPLL's current
|
||||
* configuration so it can be used by other code.
|
||||
*
|
||||
* Context: Any context. Caller must prevent the records pointed to by
|
||||
* @pd and @pwd from changing during execution.
|
||||
*/
|
||||
static void __prci_wrpll_write_cfg(struct __prci_data *pd,
|
||||
struct __prci_wrpll_data *pwd,
|
||||
struct wrpll_cfg *c)
|
||||
{
|
||||
__prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
|
||||
|
||||
memcpy(&pwd->c, c, sizeof(*c));
|
||||
}
|
||||
|
||||
/* Core clock mux control */
|
||||
|
||||
/**
|
||||
* __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
|
||||
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
|
||||
*
|
||||
* Switch the CORECLK mux to the HFCLK input source; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_CORECLKSEL_OFFSET register.
|
||||
*/
|
||||
static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
|
||||
r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
|
||||
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
|
||||
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
|
||||
*
|
||||
* Switch the CORECLK mux to the PLL output clock; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_CORECLKSEL_OFFSET register.
|
||||
*/
|
||||
static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
|
||||
r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
|
||||
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/*
|
||||
* Linux clock framework integration
|
||||
*
|
||||
* See the Linux clock framework documentation for more information on
|
||||
* these functions.
|
||||
*/
|
||||
|
||||
static unsigned long sifive_fu540_prci_wrpll_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
|
||||
return wrpll_calc_output_rate(&pwd->c, parent_rate);
|
||||
}
|
||||
|
||||
static long sifive_fu540_prci_wrpll_round_rate(struct clk_hw *hw,
|
||||
unsigned long rate,
|
||||
unsigned long *parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct wrpll_cfg c;
|
||||
|
||||
memcpy(&c, &pwd->c, sizeof(c));
|
||||
|
||||
wrpll_configure_for_rate(&c, rate, *parent_rate);
|
||||
|
||||
return wrpll_calc_output_rate(&c, *parent_rate);
|
||||
}
|
||||
|
||||
static int sifive_fu540_prci_wrpll_set_rate(struct clk_hw *hw,
|
||||
unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct __prci_data *pd = pc->pd;
|
||||
int r;
|
||||
|
||||
r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (pwd->enable_bypass)
|
||||
pwd->enable_bypass(pd);
|
||||
|
||||
__prci_wrpll_write_cfg(pd, pwd, &pwd->c);
|
||||
|
||||
udelay(wrpll_calc_max_lock_us(&pwd->c));
|
||||
|
||||
if (pwd->disable_bypass)
|
||||
pwd->disable_bypass(pd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
|
||||
.set_rate = sifive_fu540_prci_wrpll_set_rate,
|
||||
.round_rate = sifive_fu540_prci_wrpll_round_rate,
|
||||
.recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
|
||||
};
|
||||
|
||||
static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
|
||||
.recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
|
||||
};
|
||||
|
||||
/* TLCLKSEL clock integration */
|
||||
|
||||
static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_data *pd = pc->pd;
|
||||
u32 v;
|
||||
u8 div;
|
||||
|
||||
v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
|
||||
v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
|
||||
div = v ? 1 : 2;
|
||||
|
||||
return div_u64(parent_rate, div);
|
||||
}
|
||||
|
||||
static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
|
||||
.recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
|
||||
};
|
||||
|
||||
/*
|
||||
* PRCI integration data for each WRPLL instance
|
||||
*/
|
||||
/* PRCI integration data for each WRPLL instance */
|
||||
|
||||
static struct __prci_wrpll_data __prci_corepll_data = {
|
||||
.cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
|
||||
.enable_bypass = __prci_coreclksel_use_hfclk,
|
||||
.disable_bypass = __prci_coreclksel_use_corepll,
|
||||
.cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
|
||||
.enable_bypass = sifive_prci_coreclksel_use_hfclk,
|
||||
.disable_bypass = sifive_prci_coreclksel_use_corepll,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_ddrpll_data = {
|
||||
.cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_gemgxlpll_data = {
|
||||
.cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
|
||||
};
|
||||
|
||||
/*
|
||||
* List of clock controls provided by the PRCI
|
||||
*/
|
||||
/* Linux clock framework integration */
|
||||
|
||||
static struct __prci_clock __prci_init_clocks[] = {
|
||||
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
|
||||
.set_rate = sifive_prci_wrpll_set_rate,
|
||||
.round_rate = sifive_prci_wrpll_round_rate,
|
||||
.recalc_rate = sifive_prci_wrpll_recalc_rate,
|
||||
.enable = sifive_prci_clock_enable,
|
||||
.disable = sifive_prci_clock_disable,
|
||||
.is_enabled = sifive_clk_is_enabled,
|
||||
};
|
||||
|
||||
static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
|
||||
.recalc_rate = sifive_prci_wrpll_recalc_rate,
|
||||
};
|
||||
|
||||
static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
|
||||
.recalc_rate = sifive_prci_tlclksel_recalc_rate,
|
||||
};
|
||||
|
||||
/* List of clock controls provided by the PRCI */
|
||||
struct __prci_clock __prci_init_clocks_fu540[] = {
|
||||
[PRCI_CLK_COREPLL] = {
|
||||
.name = "corepll",
|
||||
.parent_name = "hfclk",
|
||||
|
@ -506,125 +87,3 @@ static struct __prci_clock __prci_init_clocks[] = {
|
|||
.ops = &sifive_fu540_prci_tlclksel_clk_ops,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* __prci_register_clocks() - register clock controls in the PRCI with Linux
|
||||
* @dev: Linux struct device *
|
||||
*
|
||||
* Register the list of clock controls described in __prci_init_plls[] with
|
||||
* the Linux clock framework.
|
||||
*
|
||||
* Return: 0 upon success or a negative error code upon failure.
|
||||
*/
|
||||
static int __prci_register_clocks(struct device *dev, struct __prci_data *pd)
|
||||
{
|
||||
struct clk_init_data init = { };
|
||||
struct __prci_clock *pic;
|
||||
int parent_count, i, r;
|
||||
|
||||
parent_count = of_clk_get_parent_count(dev->of_node);
|
||||
if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
|
||||
dev_err(dev, "expected only two parent clocks, found %d\n",
|
||||
parent_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Register PLLs */
|
||||
for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
|
||||
pic = &__prci_init_clocks[i];
|
||||
|
||||
init.name = pic->name;
|
||||
init.parent_names = &pic->parent_name;
|
||||
init.num_parents = 1;
|
||||
init.ops = pic->ops;
|
||||
pic->hw.init = &init;
|
||||
|
||||
pic->pd = pd;
|
||||
|
||||
if (pic->pwd)
|
||||
__prci_wrpll_read_cfg(pd, pic->pwd);
|
||||
|
||||
r = devm_clk_hw_register(dev, &pic->hw);
|
||||
if (r) {
|
||||
dev_warn(dev, "Failed to register clock %s: %d\n",
|
||||
init.name, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
|
||||
if (r) {
|
||||
dev_warn(dev, "Failed to register clkdev for %s: %d\n",
|
||||
init.name, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
pd->hw_clks.hws[i] = &pic->hw;
|
||||
}
|
||||
|
||||
pd->hw_clks.num = i;
|
||||
|
||||
r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
|
||||
&pd->hw_clks);
|
||||
if (r) {
|
||||
dev_err(dev, "could not add hw_provider: %d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Linux device model integration
|
||||
*
|
||||
* See the Linux device model documentation for more information about
|
||||
* these functions.
|
||||
*/
|
||||
static int sifive_fu540_prci_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct __prci_data *pd;
|
||||
int r;
|
||||
|
||||
pd = devm_kzalloc(dev,
|
||||
struct_size(pd, hw_clks.hws,
|
||||
ARRAY_SIZE(__prci_init_clocks)),
|
||||
GFP_KERNEL);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
pd->va = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pd->va))
|
||||
return PTR_ERR(pd->va);
|
||||
|
||||
r = __prci_register_clocks(dev, pd);
|
||||
if (r) {
|
||||
dev_err(dev, "could not register clocks: %d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "SiFive FU540 PRCI probed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id sifive_fu540_prci_of_match[] = {
|
||||
{ .compatible = "sifive,fu540-c000-prci", },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sifive_fu540_prci_of_match);
|
||||
|
||||
static struct platform_driver sifive_fu540_prci_driver = {
|
||||
.driver = {
|
||||
.name = "sifive-fu540-prci",
|
||||
.of_match_table = sifive_fu540_prci_of_match,
|
||||
},
|
||||
.probe = sifive_fu540_prci_probe,
|
||||
};
|
||||
|
||||
static int __init sifive_fu540_prci_init(void)
|
||||
{
|
||||
return platform_driver_register(&sifive_fu540_prci_driver);
|
||||
}
|
||||
core_initcall(sifive_fu540_prci_init);
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020 SiFive, Inc.
|
||||
* Zong Li
|
||||
*/
|
||||
|
||||
#ifndef __SIFIVE_CLK_FU540_PRCI_H
|
||||
#define __SIFIVE_CLK_FU540_PRCI_H
|
||||
|
||||
#include "sifive-prci.h"
|
||||
|
||||
#define NUM_CLOCK_FU540 4
|
||||
|
||||
extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
|
||||
|
||||
static const struct prci_clk_desc prci_clk_fu540 = {
|
||||
.clks = __prci_init_clocks_fu540,
|
||||
.num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
|
||||
};
|
||||
|
||||
#endif /* __SIFIVE_CLK_FU540_PRCI_H */
|
|
@ -0,0 +1,123 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020 SiFive, Inc.
|
||||
* Copyright (C) 2020 Zong Li
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <dt-bindings/clock/sifive-fu740-prci.h>
|
||||
|
||||
#include "fu540-prci.h"
|
||||
#include "sifive-prci.h"
|
||||
|
||||
/* PRCI integration data for each WRPLL instance */
|
||||
|
||||
static struct __prci_wrpll_data __prci_corepll_data = {
|
||||
.cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
|
||||
.enable_bypass = sifive_prci_coreclksel_use_hfclk,
|
||||
.disable_bypass = sifive_prci_coreclksel_use_final_corepll,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_ddrpll_data = {
|
||||
.cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_gemgxlpll_data = {
|
||||
.cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_dvfscorepll_data = {
|
||||
.cfg0_offs = PRCI_DVFSCOREPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_DVFSCOREPLLCFG1_OFFSET,
|
||||
.enable_bypass = sifive_prci_corepllsel_use_corepll,
|
||||
.disable_bypass = sifive_prci_corepllsel_use_dvfscorepll,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_hfpclkpll_data = {
|
||||
.cfg0_offs = PRCI_HFPCLKPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_HFPCLKPLLCFG1_OFFSET,
|
||||
.enable_bypass = sifive_prci_hfpclkpllsel_use_hfclk,
|
||||
.disable_bypass = sifive_prci_hfpclkpllsel_use_hfpclkpll,
|
||||
};
|
||||
|
||||
static struct __prci_wrpll_data __prci_cltxpll_data = {
|
||||
.cfg0_offs = PRCI_CLTXPLLCFG0_OFFSET,
|
||||
.cfg1_offs = PRCI_CLTXPLLCFG1_OFFSET,
|
||||
};
|
||||
|
||||
/* Linux clock framework integration */
|
||||
|
||||
static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
|
||||
.set_rate = sifive_prci_wrpll_set_rate,
|
||||
.round_rate = sifive_prci_wrpll_round_rate,
|
||||
.recalc_rate = sifive_prci_wrpll_recalc_rate,
|
||||
.enable = sifive_prci_clock_enable,
|
||||
.disable = sifive_prci_clock_disable,
|
||||
.is_enabled = sifive_clk_is_enabled,
|
||||
};
|
||||
|
||||
static const struct clk_ops sifive_fu740_prci_wrpll_ro_clk_ops = {
|
||||
.recalc_rate = sifive_prci_wrpll_recalc_rate,
|
||||
};
|
||||
|
||||
static const struct clk_ops sifive_fu740_prci_tlclksel_clk_ops = {
|
||||
.recalc_rate = sifive_prci_tlclksel_recalc_rate,
|
||||
};
|
||||
|
||||
static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
|
||||
.recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
|
||||
};
|
||||
|
||||
/* List of clock controls provided by the PRCI */
|
||||
struct __prci_clock __prci_init_clocks_fu740[] = {
|
||||
[PRCI_CLK_COREPLL] = {
|
||||
.name = "corepll",
|
||||
.parent_name = "hfclk",
|
||||
.ops = &sifive_fu740_prci_wrpll_clk_ops,
|
||||
.pwd = &__prci_corepll_data,
|
||||
},
|
||||
[PRCI_CLK_DDRPLL] = {
|
||||
.name = "ddrpll",
|
||||
.parent_name = "hfclk",
|
||||
.ops = &sifive_fu740_prci_wrpll_ro_clk_ops,
|
||||
.pwd = &__prci_ddrpll_data,
|
||||
},
|
||||
[PRCI_CLK_GEMGXLPLL] = {
|
||||
.name = "gemgxlpll",
|
||||
.parent_name = "hfclk",
|
||||
.ops = &sifive_fu740_prci_wrpll_clk_ops,
|
||||
.pwd = &__prci_gemgxlpll_data,
|
||||
},
|
||||
[PRCI_CLK_DVFSCOREPLL] = {
|
||||
.name = "dvfscorepll",
|
||||
.parent_name = "hfclk",
|
||||
.ops = &sifive_fu740_prci_wrpll_clk_ops,
|
||||
.pwd = &__prci_dvfscorepll_data,
|
||||
},
|
||||
[PRCI_CLK_HFPCLKPLL] = {
|
||||
.name = "hfpclkpll",
|
||||
.parent_name = "hfclk",
|
||||
.ops = &sifive_fu740_prci_wrpll_clk_ops,
|
||||
.pwd = &__prci_hfpclkpll_data,
|
||||
},
|
||||
[PRCI_CLK_CLTXPLL] = {
|
||||
.name = "cltxpll",
|
||||
.parent_name = "hfclk",
|
||||
.ops = &sifive_fu740_prci_wrpll_clk_ops,
|
||||
.pwd = &__prci_cltxpll_data,
|
||||
},
|
||||
[PRCI_CLK_TLCLK] = {
|
||||
.name = "tlclk",
|
||||
.parent_name = "corepll",
|
||||
.ops = &sifive_fu740_prci_tlclksel_clk_ops,
|
||||
},
|
||||
[PRCI_CLK_PCLK] = {
|
||||
.name = "pclk",
|
||||
.parent_name = "hfpclkpll",
|
||||
.ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
|
||||
},
|
||||
};
|
|
@ -0,0 +1,21 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020 SiFive, Inc.
|
||||
* Zong Li
|
||||
*/
|
||||
|
||||
#ifndef __SIFIVE_CLK_FU740_PRCI_H
|
||||
#define __SIFIVE_CLK_FU740_PRCI_H
|
||||
|
||||
#include "sifive-prci.h"
|
||||
|
||||
#define NUM_CLOCK_FU740 8
|
||||
|
||||
extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
|
||||
|
||||
static const struct prci_clk_desc prci_clk_fu740 = {
|
||||
.clks = __prci_init_clocks_fu740,
|
||||
.num_clks = ARRAY_SIZE(__prci_init_clocks_fu740),
|
||||
};
|
||||
|
||||
#endif /* __SIFIVE_CLK_FU740_PRCI_H */
|
|
@ -0,0 +1,574 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020 SiFive, Inc.
|
||||
* Copyright (C) 2020 Zong Li
|
||||
*/
|
||||
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_device.h>
|
||||
#include "sifive-prci.h"
|
||||
#include "fu540-prci.h"
|
||||
#include "fu740-prci.h"
|
||||
|
||||
/*
|
||||
* Private functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* __prci_readl() - read from a PRCI register
|
||||
* @pd: PRCI context
|
||||
* @offs: register offset to read from (in bytes, from PRCI base address)
|
||||
*
|
||||
* Read the register located at offset @offs from the base virtual
|
||||
* address of the PRCI register target described by @pd, and return
|
||||
* the value to the caller.
|
||||
*
|
||||
* Context: Any context.
|
||||
*
|
||||
* Return: the contents of the register described by @pd and @offs.
|
||||
*/
|
||||
static u32 __prci_readl(struct __prci_data *pd, u32 offs)
|
||||
{
|
||||
return readl_relaxed(pd->va + offs);
|
||||
}
|
||||
|
||||
static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
|
||||
{
|
||||
writel_relaxed(v, pd->va + offs);
|
||||
}
|
||||
|
||||
/* WRPLL-related private functions */
|
||||
|
||||
/**
|
||||
* __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
|
||||
* @c: ptr to a struct wrpll_cfg record to write config into
|
||||
* @r: value read from the PRCI PLL configuration register
|
||||
*
|
||||
* Given a value @r read from an FU740 PRCI PLL configuration register,
|
||||
* split it into fields and populate it into the WRPLL configuration record
|
||||
* pointed to by @c.
|
||||
*
|
||||
* The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
|
||||
* have the same register layout.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_DIVR_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
|
||||
c->divr = v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_DIVF_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
|
||||
c->divf = v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
|
||||
c->divq = v;
|
||||
|
||||
v = r & PRCI_COREPLLCFG0_RANGE_MASK;
|
||||
v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
|
||||
c->range = v;
|
||||
|
||||
c->flags &=
|
||||
(WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK);
|
||||
|
||||
/* external feedback mode not supported */
|
||||
c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_pack() - pack PLL configuration parameters into a register value
|
||||
* @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
|
||||
*
|
||||
* Using a set of WRPLL configuration values pointed to by @c,
|
||||
* assemble a PRCI PLL configuration register value, and return it to
|
||||
* the caller.
|
||||
*
|
||||
* Context: Any context. Caller must ensure that the contents of the
|
||||
* record pointed to by @c do not change during the execution
|
||||
* of this function.
|
||||
*
|
||||
* Returns: a value suitable for writing into a PRCI PLL configuration
|
||||
* register
|
||||
*/
|
||||
static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
|
||||
{
|
||||
u32 r = 0;
|
||||
|
||||
r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
|
||||
r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
|
||||
r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
|
||||
r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
|
||||
|
||||
/* external feedback mode not supported */
|
||||
r |= PRCI_COREPLLCFG0_FSE_MASK;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
|
||||
* @pd: PRCI context
|
||||
* @pwd: PRCI WRPLL metadata
|
||||
*
|
||||
* Read the current configuration of the PLL identified by @pwd from
|
||||
* the PRCI identified by @pd, and store it into the local configuration
|
||||
* cache in @pwd.
|
||||
*
|
||||
* Context: Any context. Caller must prevent the records pointed to by
|
||||
* @pd and @pwd from changing during execution.
|
||||
*/
|
||||
static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
|
||||
struct __prci_wrpll_data *pwd)
|
||||
{
|
||||
__prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
|
||||
* @pd: PRCI context
|
||||
* @pwd: PRCI WRPLL metadata
|
||||
* @c: WRPLL configuration record to write
|
||||
*
|
||||
* Write the WRPLL configuration described by @c into the WRPLL
|
||||
* configuration register identified by @pwd in the PRCI instance
|
||||
* described by @c. Make a cached copy of the WRPLL's current
|
||||
* configuration so it can be used by other code.
|
||||
*
|
||||
* Context: Any context. Caller must prevent the records pointed to by
|
||||
* @pd and @pwd from changing during execution.
|
||||
*/
|
||||
static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
|
||||
struct __prci_wrpll_data *pwd,
|
||||
struct wrpll_cfg *c)
|
||||
{
|
||||
__prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
|
||||
|
||||
memcpy(&pwd->c, c, sizeof(*c));
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
|
||||
* into the PRCI
|
||||
* @pd: PRCI context
|
||||
* @pwd: PRCI WRPLL metadata
|
||||
* @enable: Clock enable or disable value
|
||||
*/
|
||||
static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
|
||||
struct __prci_wrpll_data *pwd,
|
||||
u32 enable)
|
||||
{
|
||||
__prci_writel(enable, pwd->cfg1_offs, pd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Linux clock framework integration
|
||||
*
|
||||
* See the Linux clock framework documentation for more information on
|
||||
* these functions.
|
||||
*/
|
||||
|
||||
unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
|
||||
return wrpll_calc_output_rate(&pwd->c, parent_rate);
|
||||
}
|
||||
|
||||
long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
|
||||
unsigned long rate,
|
||||
unsigned long *parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct wrpll_cfg c;
|
||||
|
||||
memcpy(&c, &pwd->c, sizeof(c));
|
||||
|
||||
wrpll_configure_for_rate(&c, rate, *parent_rate);
|
||||
|
||||
return wrpll_calc_output_rate(&c, *parent_rate);
|
||||
}
|
||||
|
||||
int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
|
||||
unsigned long rate, unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct __prci_data *pd = pc->pd;
|
||||
int r;
|
||||
|
||||
r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (pwd->enable_bypass)
|
||||
pwd->enable_bypass(pd);
|
||||
|
||||
__prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
|
||||
|
||||
udelay(wrpll_calc_max_lock_us(&pwd->c));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sifive_clk_is_enabled(struct clk_hw *hw)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct __prci_data *pd = pc->pd;
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, pwd->cfg1_offs);
|
||||
|
||||
if (r & PRCI_COREPLLCFG1_CKE_MASK)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sifive_prci_clock_enable(struct clk_hw *hw)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct __prci_data *pd = pc->pd;
|
||||
|
||||
if (sifive_clk_is_enabled(hw))
|
||||
return 0;
|
||||
|
||||
__prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
|
||||
|
||||
if (pwd->disable_bypass)
|
||||
pwd->disable_bypass(pd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sifive_prci_clock_disable(struct clk_hw *hw)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_wrpll_data *pwd = pc->pwd;
|
||||
struct __prci_data *pd = pc->pd;
|
||||
u32 r;
|
||||
|
||||
if (pwd->enable_bypass)
|
||||
pwd->enable_bypass(pd);
|
||||
|
||||
r = __prci_readl(pd, pwd->cfg1_offs);
|
||||
r &= ~PRCI_COREPLLCFG1_CKE_MASK;
|
||||
|
||||
__prci_wrpll_write_cfg1(pd, pwd, r);
|
||||
}
|
||||
|
||||
/* TLCLKSEL clock integration */
|
||||
|
||||
unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_data *pd = pc->pd;
|
||||
u32 v;
|
||||
u8 div;
|
||||
|
||||
v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
|
||||
v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
|
||||
div = v ? 1 : 2;
|
||||
|
||||
return div_u64(parent_rate, div);
|
||||
}
|
||||
|
||||
/* HFPCLK clock integration */
|
||||
|
||||
unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
|
||||
struct __prci_data *pd = pc->pd;
|
||||
u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
|
||||
|
||||
return div_u64(parent_rate, div + 2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Core clock mux control
|
||||
*/
|
||||
|
||||
/**
|
||||
* sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
|
||||
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
|
||||
*
|
||||
* Switch the CORECLK mux to the HFCLK input source; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_CORECLKSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
|
||||
r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
|
||||
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output
|
||||
* COREPLL
|
||||
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
|
||||
*
|
||||
* Switch the CORECLK mux to the COREPLL output clock; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_CORECLKSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
|
||||
r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
|
||||
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
|
||||
* FINAL_COREPLL
|
||||
* @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
|
||||
*
|
||||
* Switch the CORECLK mux to the final COREPLL output clock; return once
|
||||
* complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_CORECLKSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
|
||||
r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
|
||||
__prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
|
||||
* output DVFS_COREPLL
|
||||
* @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
|
||||
*
|
||||
* Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_COREPLLSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
|
||||
r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
|
||||
__prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
|
||||
* output COREPLL
|
||||
* @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
|
||||
*
|
||||
* Switch the COREPLL mux to the COREPLL output clock; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_COREPLLSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
|
||||
r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
|
||||
__prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
|
||||
* output HFCLK
|
||||
* @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
|
||||
*
|
||||
* Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_HFPCLKPLLSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
|
||||
r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
|
||||
__prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
|
||||
* output HFPCLKPLL
|
||||
* @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
|
||||
*
|
||||
* Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
|
||||
*
|
||||
* Context: Any context. Caller must prevent concurrent changes to the
|
||||
* PRCI_HFPCLKPLLSEL_OFFSET register.
|
||||
*/
|
||||
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
|
||||
{
|
||||
u32 r;
|
||||
|
||||
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
|
||||
r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
|
||||
__prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
|
||||
|
||||
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
|
||||
}
|
||||
|
||||
/**
|
||||
* __prci_register_clocks() - register clock controls in the PRCI
|
||||
* @dev: Linux struct device
|
||||
* @pd: The pointer for PRCI per-device instance data
|
||||
* @desc: The pointer for the information of clocks of each SoCs
|
||||
*
|
||||
* Register the list of clock controls described in __prci_init_clocks[] with
|
||||
* the Linux clock framework.
|
||||
*
|
||||
* Return: 0 upon success or a negative error code upon failure.
|
||||
*/
|
||||
static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
|
||||
const struct prci_clk_desc *desc)
|
||||
{
|
||||
struct clk_init_data init = { };
|
||||
struct __prci_clock *pic;
|
||||
int parent_count, i, r;
|
||||
|
||||
parent_count = of_clk_get_parent_count(dev->of_node);
|
||||
if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
|
||||
dev_err(dev, "expected only two parent clocks, found %d\n",
|
||||
parent_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Register PLLs */
|
||||
for (i = 0; i < desc->num_clks; ++i) {
|
||||
pic = &(desc->clks[i]);
|
||||
|
||||
init.name = pic->name;
|
||||
init.parent_names = &pic->parent_name;
|
||||
init.num_parents = 1;
|
||||
init.ops = pic->ops;
|
||||
pic->hw.init = &init;
|
||||
|
||||
pic->pd = pd;
|
||||
|
||||
if (pic->pwd)
|
||||
__prci_wrpll_read_cfg0(pd, pic->pwd);
|
||||
|
||||
r = devm_clk_hw_register(dev, &pic->hw);
|
||||
if (r) {
|
||||
dev_warn(dev, "Failed to register clock %s: %d\n",
|
||||
init.name, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
|
||||
if (r) {
|
||||
dev_warn(dev, "Failed to register clkdev for %s: %d\n",
|
||||
init.name, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
pd->hw_clks.hws[i] = &pic->hw;
|
||||
}
|
||||
|
||||
pd->hw_clks.num = i;
|
||||
|
||||
r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
|
||||
&pd->hw_clks);
|
||||
if (r) {
|
||||
dev_err(dev, "could not add hw_provider: %d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sifive_prci_init() - initialize prci data and check parent count
|
||||
* @pdev: platform device pointer for the prci
|
||||
*
|
||||
* Return: 0 upon success or a negative error code upon failure.
|
||||
*/
|
||||
static int sifive_prci_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct __prci_data *pd;
|
||||
const struct prci_clk_desc *desc;
|
||||
int r;
|
||||
|
||||
desc = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
pd->va = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pd->va))
|
||||
return PTR_ERR(pd->va);
|
||||
|
||||
r = __prci_register_clocks(dev, pd, desc);
|
||||
if (r) {
|
||||
dev_err(dev, "could not register clocks: %d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "SiFive PRCI probed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id sifive_prci_of_match[] = {
|
||||
{.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540},
|
||||
{.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver sifive_prci_driver = {
|
||||
.driver = {
|
||||
.name = "sifive-clk-prci",
|
||||
.of_match_table = sifive_prci_of_match,
|
||||
},
|
||||
.probe = sifive_prci_probe,
|
||||
};
|
||||
|
||||
static int __init sifive_prci_init(void)
|
||||
{
|
||||
return platform_driver_register(&sifive_prci_driver);
|
||||
}
|
||||
core_initcall(sifive_prci_init);
|
|
@ -0,0 +1,299 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2018-2019 SiFive, Inc.
|
||||
* Wesley Terpstra
|
||||
* Paul Walmsley
|
||||
* Zong Li
|
||||
*/
|
||||
|
||||
#ifndef __SIFIVE_CLK_SIFIVE_PRCI_H
|
||||
#define __SIFIVE_CLK_SIFIVE_PRCI_H
|
||||
|
||||
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/*
|
||||
* EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
|
||||
* hfclk and rtcclk
|
||||
*/
|
||||
#define EXPECTED_CLK_PARENT_COUNT 2
|
||||
|
||||
/*
|
||||
* Register offsets and bitmasks
|
||||
*/
|
||||
|
||||
/* COREPLLCFG0 */
|
||||
#define PRCI_COREPLLCFG0_OFFSET 0x4
|
||||
#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
|
||||
#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
|
||||
#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
|
||||
#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
|
||||
#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
|
||||
#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
|
||||
#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
|
||||
#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
|
||||
#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
|
||||
#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
|
||||
#define PRCI_COREPLLCFG0_FSE_SHIFT 25
|
||||
#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
|
||||
#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
|
||||
#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* COREPLLCFG1 */
|
||||
#define PRCI_COREPLLCFG1_OFFSET 0x8
|
||||
#define PRCI_COREPLLCFG1_CKE_SHIFT 31
|
||||
#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* DDRPLLCFG0 */
|
||||
#define PRCI_DDRPLLCFG0_OFFSET 0xc
|
||||
#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
|
||||
#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
|
||||
#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
|
||||
#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
|
||||
#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
|
||||
#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
|
||||
#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
|
||||
#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
|
||||
#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
|
||||
#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
|
||||
#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
|
||||
#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
|
||||
#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
|
||||
#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* DDRPLLCFG1 */
|
||||
#define PRCI_DDRPLLCFG1_OFFSET 0x10
|
||||
#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
|
||||
#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* GEMGXLPLLCFG0 */
|
||||
#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
|
||||
#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
|
||||
#define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
|
||||
#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
|
||||
#define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
|
||||
#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
|
||||
#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
|
||||
#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
|
||||
#define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
|
||||
#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
|
||||
#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
|
||||
#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
|
||||
#define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
|
||||
#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
|
||||
#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* GEMGXLPLLCFG1 */
|
||||
#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
|
||||
#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
|
||||
#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* CORECLKSEL */
|
||||
#define PRCI_CORECLKSEL_OFFSET 0x24
|
||||
#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
|
||||
#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
|
||||
(0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
|
||||
|
||||
/* DEVICESRESETREG */
|
||||
#define PRCI_DEVICESRESETREG_OFFSET 0x28
|
||||
#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
|
||||
#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
|
||||
(0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
|
||||
#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
|
||||
#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
|
||||
(0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
|
||||
#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
|
||||
#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
|
||||
(0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
|
||||
#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
|
||||
#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
|
||||
(0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
|
||||
#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
|
||||
#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
|
||||
(0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
|
||||
#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT 6
|
||||
#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_MASK \
|
||||
(0x1 << PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT)
|
||||
|
||||
/* CLKMUXSTATUSREG */
|
||||
#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
|
||||
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
|
||||
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
|
||||
(0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
|
||||
|
||||
/* CLTXPLLCFG0 */
|
||||
#define PRCI_CLTXPLLCFG0_OFFSET 0x30
|
||||
#define PRCI_CLTXPLLCFG0_DIVR_SHIFT 0
|
||||
#define PRCI_CLTXPLLCFG0_DIVR_MASK (0x3f << PRCI_CLTXPLLCFG0_DIVR_SHIFT)
|
||||
#define PRCI_CLTXPLLCFG0_DIVF_SHIFT 6
|
||||
#define PRCI_CLTXPLLCFG0_DIVF_MASK (0x1ff << PRCI_CLTXPLLCFG0_DIVF_SHIFT)
|
||||
#define PRCI_CLTXPLLCFG0_DIVQ_SHIFT 15
|
||||
#define PRCI_CLTXPLLCFG0_DIVQ_MASK (0x7 << PRCI_CLTXPLLCFG0_DIVQ_SHIFT)
|
||||
#define PRCI_CLTXPLLCFG0_RANGE_SHIFT 18
|
||||
#define PRCI_CLTXPLLCFG0_RANGE_MASK (0x7 << PRCI_CLTXPLLCFG0_RANGE_SHIFT)
|
||||
#define PRCI_CLTXPLLCFG0_BYPASS_SHIFT 24
|
||||
#define PRCI_CLTXPLLCFG0_BYPASS_MASK (0x1 << PRCI_CLTXPLLCFG0_BYPASS_SHIFT)
|
||||
#define PRCI_CLTXPLLCFG0_FSE_SHIFT 25
|
||||
#define PRCI_CLTXPLLCFG0_FSE_MASK (0x1 << PRCI_CLTXPLLCFG0_FSE_SHIFT)
|
||||
#define PRCI_CLTXPLLCFG0_LOCK_SHIFT 31
|
||||
#define PRCI_CLTXPLLCFG0_LOCK_MASK (0x1 << PRCI_CLTXPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* CLTXPLLCFG1 */
|
||||
#define PRCI_CLTXPLLCFG1_OFFSET 0x34
|
||||
#define PRCI_CLTXPLLCFG1_CKE_SHIFT 31
|
||||
#define PRCI_CLTXPLLCFG1_CKE_MASK (0x1 << PRCI_CLTXPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* DVFSCOREPLLCFG0 */
|
||||
#define PRCI_DVFSCOREPLLCFG0_OFFSET 0x38
|
||||
|
||||
/* DVFSCOREPLLCFG1 */
|
||||
#define PRCI_DVFSCOREPLLCFG1_OFFSET 0x3c
|
||||
#define PRCI_DVFSCOREPLLCFG1_CKE_SHIFT 31
|
||||
#define PRCI_DVFSCOREPLLCFG1_CKE_MASK (0x1 << PRCI_DVFSCOREPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* COREPLLSEL */
|
||||
#define PRCI_COREPLLSEL_OFFSET 0x40
|
||||
#define PRCI_COREPLLSEL_COREPLLSEL_SHIFT 0
|
||||
#define PRCI_COREPLLSEL_COREPLLSEL_MASK \
|
||||
(0x1 << PRCI_COREPLLSEL_COREPLLSEL_SHIFT)
|
||||
|
||||
/* HFPCLKPLLCFG0 */
|
||||
#define PRCI_HFPCLKPLLCFG0_OFFSET 0x50
|
||||
#define PRCI_HFPCLKPLL_CFG0_DIVR_SHIFT 0
|
||||
#define PRCI_HFPCLKPLL_CFG0_DIVR_MASK \
|
||||
(0x3f << PRCI_HFPCLKPLLCFG0_DIVR_SHIFT)
|
||||
#define PRCI_HFPCLKPLL_CFG0_DIVF_SHIFT 6
|
||||
#define PRCI_HFPCLKPLL_CFG0_DIVF_MASK \
|
||||
(0x1ff << PRCI_HFPCLKPLLCFG0_DIVF_SHIFT)
|
||||
#define PRCI_HFPCLKPLL_CFG0_DIVQ_SHIFT 15
|
||||
#define PRCI_HFPCLKPLL_CFG0_DIVQ_MASK \
|
||||
(0x7 << PRCI_HFPCLKPLLCFG0_DIVQ_SHIFT)
|
||||
#define PRCI_HFPCLKPLL_CFG0_RANGE_SHIFT 18
|
||||
#define PRCI_HFPCLKPLL_CFG0_RANGE_MASK \
|
||||
(0x7 << PRCI_HFPCLKPLLCFG0_RANGE_SHIFT)
|
||||
#define PRCI_HFPCLKPLL_CFG0_BYPASS_SHIFT 24
|
||||
#define PRCI_HFPCLKPLL_CFG0_BYPASS_MASK \
|
||||
(0x1 << PRCI_HFPCLKPLLCFG0_BYPASS_SHIFT)
|
||||
#define PRCI_HFPCLKPLL_CFG0_FSE_SHIFT 25
|
||||
#define PRCI_HFPCLKPLL_CFG0_FSE_MASK \
|
||||
(0x1 << PRCI_HFPCLKPLLCFG0_FSE_SHIFT)
|
||||
#define PRCI_HFPCLKPLL_CFG0_LOCK_SHIFT 31
|
||||
#define PRCI_HFPCLKPLL_CFG0_LOCK_MASK \
|
||||
(0x1 << PRCI_HFPCLKPLLCFG0_LOCK_SHIFT)
|
||||
|
||||
/* HFPCLKPLLCFG1 */
|
||||
#define PRCI_HFPCLKPLLCFG1_OFFSET 0x54
|
||||
#define PRCI_HFPCLKPLLCFG1_CKE_SHIFT 31
|
||||
#define PRCI_HFPCLKPLLCFG1_CKE_MASK \
|
||||
(0x1 << PRCI_HFPCLKPLLCFG1_CKE_SHIFT)
|
||||
|
||||
/* HFPCLKPLLSEL */
|
||||
#define PRCI_HFPCLKPLLSEL_OFFSET 0x58
|
||||
#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT 0
|
||||
#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK \
|
||||
(0x1 << PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT)
|
||||
|
||||
/* HFPCLKPLLDIV */
|
||||
#define PRCI_HFPCLKPLLDIV_OFFSET 0x5c
|
||||
|
||||
/* PRCIPLL */
|
||||
#define PRCI_PRCIPLL_OFFSET 0xe0
|
||||
|
||||
/* PROCMONCFG */
|
||||
#define PRCI_PROCMONCFG_OFFSET 0xf0
|
||||
|
||||
/*
|
||||
* Private structures
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct __prci_data - per-device-instance data
|
||||
* @va: base virtual address of the PRCI IP block
|
||||
* @hw_clks: encapsulates struct clk_hw records
|
||||
*
|
||||
* PRCI per-device instance data
|
||||
*/
|
||||
struct __prci_data {
|
||||
void __iomem *va;
|
||||
struct clk_hw_onecell_data hw_clks;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct __prci_wrpll_data - WRPLL configuration and integration data
|
||||
* @c: WRPLL current configuration record
|
||||
* @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
|
||||
* @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
|
||||
* @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
|
||||
* @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
|
||||
*
|
||||
* @enable_bypass and @disable_bypass are used for WRPLL instances
|
||||
* that contain a separate external glitchless clock mux downstream
|
||||
* from the PLL. The WRPLL internal bypass mux is not glitchless.
|
||||
*/
|
||||
struct __prci_wrpll_data {
|
||||
struct wrpll_cfg c;
|
||||
void (*enable_bypass)(struct __prci_data *pd);
|
||||
void (*disable_bypass)(struct __prci_data *pd);
|
||||
u8 cfg0_offs;
|
||||
u8 cfg1_offs;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct __prci_clock - describes a clock device managed by PRCI
|
||||
* @name: user-readable clock name string - should match the manual
|
||||
* @parent_name: parent name for this clock
|
||||
* @ops: struct clk_ops for the Linux clock framework to use for control
|
||||
* @hw: Linux-private clock data
|
||||
* @pwd: WRPLL-specific data, associated with this clock (if not NULL)
|
||||
* @pd: PRCI-specific data associated with this clock (if not NULL)
|
||||
*
|
||||
* PRCI clock data. Used by the PRCI driver to register PRCI-provided
|
||||
* clocks to the Linux clock infrastructure.
|
||||
*/
|
||||
struct __prci_clock {
|
||||
const char *name;
|
||||
const char *parent_name;
|
||||
const struct clk_ops *ops;
|
||||
struct clk_hw hw;
|
||||
struct __prci_wrpll_data *pwd;
|
||||
struct __prci_data *pd;
|
||||
};
|
||||
|
||||
#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
|
||||
|
||||
/*
|
||||
* struct prci_clk_desc - describes the information of clocks of each SoCs
|
||||
* @clks: point to a array of __prci_clock
|
||||
* @num_clks: the number of element of clks
|
||||
*/
|
||||
struct prci_clk_desc {
|
||||
struct __prci_clock *clks;
|
||||
size_t num_clks;
|
||||
};
|
||||
|
||||
/* Core clock mux control */
|
||||
void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd);
|
||||
void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd);
|
||||
void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd);
|
||||
void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd);
|
||||
void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd);
|
||||
void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
|
||||
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
|
||||
|
||||
/* Linux clock framework integration */
|
||||
long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long *parent_rate);
|
||||
int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate);
|
||||
int sifive_clk_is_enabled(struct clk_hw *hw);
|
||||
int sifive_prci_clock_enable(struct clk_hw *hw);
|
||||
void sifive_prci_clock_disable(struct clk_hw *hw);
|
||||
unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate);
|
||||
unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate);
|
||||
unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate);
|
||||
|
||||
#endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2016 NVIDIA Corporation
|
||||
* Copyright (C) 2016-2020 NVIDIA Corporation
|
||||
*/
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
|
@ -174,7 +174,7 @@ static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||
int err;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.rate = rate;
|
||||
request.rate = min_t(u64, rate, S64_MAX);
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.cmd = CMD_CLK_ROUND_RATE;
|
||||
|
@ -256,7 +256,7 @@ static int tegra_bpmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
struct tegra_bpmp_clk_message msg;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.rate = rate;
|
||||
request.rate = min_t(u64, rate, S64_MAX);
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.cmd = CMD_CLK_SET_RATE;
|
||||
|
|
|
@ -1856,13 +1856,13 @@ static int dfll_fetch_pwm_params(struct tegra_dfll *td)
|
|||
&td->reg_init_uV);
|
||||
if (!ret) {
|
||||
dev_err(td->dev, "couldn't get initialized voltage\n");
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
|
||||
if (!ret) {
|
||||
dev_err(td->dev, "couldn't get PWM period\n");
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
}
|
||||
td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
|
||||
|
||||
|
|
|
@ -227,6 +227,7 @@ enum clk_id {
|
|||
tegra_clk_sdmmc4,
|
||||
tegra_clk_sdmmc4_8,
|
||||
tegra_clk_se,
|
||||
tegra_clk_se_10,
|
||||
tegra_clk_soc_therm,
|
||||
tegra_clk_soc_therm_8,
|
||||
tegra_clk_sor0,
|
||||
|
|
|
@ -630,7 +630,7 @@ static struct tegra_periph_init_data periph_clks[] = {
|
|||
INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
|
||||
INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9),
|
||||
INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
|
||||
INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
|
||||
INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10),
|
||||
INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
|
||||
INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
|
||||
INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
|
||||
/*
|
||||
* Copyright (C) 2019 SiFive, Inc.
|
||||
* Wesley Terpstra
|
||||
* Paul Walmsley
|
||||
* Zong Li
|
||||
*/
|
||||
|
||||
#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H
|
||||
#define __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H
|
||||
|
||||
/* Clock indexes for use by Device Tree data and the PRCI driver */
|
||||
|
||||
#define PRCI_CLK_COREPLL 0
|
||||
#define PRCI_CLK_DDRPLL 1
|
||||
#define PRCI_CLK_GEMGXLPLL 2
|
||||
#define PRCI_CLK_DVFSCOREPLL 3
|
||||
#define PRCI_CLK_HFPCLKPLL 4
|
||||
#define PRCI_CLK_CLTXPLL 5
|
||||
#define PRCI_CLK_TLCLK 6
|
||||
#define PRCI_CLK_PCLK 7
|
||||
|
||||
#endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */
|
Загрузка…
Ссылка в новой задаче