Merge tag 'drm-msm-next-2023-06-18' of https://gitlab.freedesktop.org/drm/msm into drm-next
Updates for v6.5.. this includes a backmerg of drm-next tree to be able to use new DRM DSC helpers. Core: + Add Marijn Suijten as drm/msm reviewer + Adreno A660 bindings + SM8350 MDSS bindings fix + Fix adreno_is_a690() warnings + More generic (DRM) and MSM-specific DSC helpers DP: + Removed obsolete USB-PD remains + Documented DP compatible string for sm8550 platform DPU: + Enable missing features (DSPP, DSC, split display) on sc8180x, sc8280xp, sm8450 + Enabled writeback on sc7280 + Implemented tearcheck support to support vsync on SM150 and newer platforms + Native HDMI output support + Dropped unused features: regdma, GC, IGC + Fixed the DSC flush operations + Simplified QoS handling, removing obsolete and unused features and merging SSPP and WB code paths + Reworked dpu_encoder initialisation path + Enabled DSPP support on sdm845 + Disabled color-management if DSPP blocks are not available + Added support for DSC 1.2 blocks found on sm8350 and later + Added .fb_dirty to fix CMD panels DSI: + Drop powerup quirks in favour of using pre_enable_prev_first for downstream bridges + Fixed 14nm DSI PHY programming + Added support for DSI and 28nm DSI PHY on MSM8226 platform + Make use of DRM and MSM DSC helpers MDP5: + Added support for display controller on MSM8226 platform GPU: + A690 support + Don't set IO_PGTABLE_QUIRK_ARM_OUTER_WBWA on devices with coherent SMMU (like A690) + Move cmdstream dumping out of fence signaling path + Cleanups + Support for a6xx devices without GMU (aka "GMU wrapper" + a610 support + a619_holi support (a619 variant without GMU) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsUB=tRB4nR6ZCJMuLhro5zN3BQWUSywVYbaipqqDZ_cQ@mail.gmail.com
This commit is contained in:
Коммит
2222dcb077
|
@ -29,6 +29,7 @@ properties:
|
|||
- items:
|
||||
- enum:
|
||||
- qcom,sm8450-dp
|
||||
- qcom,sm8550-dp
|
||||
- const: qcom,sm8350-dp
|
||||
|
||||
reg:
|
||||
|
|
|
@ -15,6 +15,7 @@ properties:
|
|||
- items:
|
||||
- enum:
|
||||
- qcom,apq8064-dsi-ctrl
|
||||
- qcom,msm8226-dsi-ctrl
|
||||
- qcom,msm8916-dsi-ctrl
|
||||
- qcom,msm8953-dsi-ctrl
|
||||
- qcom,msm8974-dsi-ctrl
|
||||
|
@ -26,6 +27,8 @@ properties:
|
|||
- qcom,sdm660-dsi-ctrl
|
||||
- qcom,sdm845-dsi-ctrl
|
||||
- qcom,sm6115-dsi-ctrl
|
||||
- qcom,sm6350-dsi-ctrl
|
||||
- qcom,sm6375-dsi-ctrl
|
||||
- qcom,sm8150-dsi-ctrl
|
||||
- qcom,sm8250-dsi-ctrl
|
||||
- qcom,sm8350-dsi-ctrl
|
||||
|
@ -256,6 +259,7 @@ allOf:
|
|||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,msm8226-dsi-ctrl
|
||||
- qcom,msm8974-dsi-ctrl
|
||||
then:
|
||||
properties:
|
||||
|
@ -297,6 +301,7 @@ allOf:
|
|||
contains:
|
||||
enum:
|
||||
- qcom,msm8998-dsi-ctrl
|
||||
- qcom,sm6350-dsi-ctrl
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
@ -364,6 +369,7 @@ allOf:
|
|||
enum:
|
||||
- qcom,sdm845-dsi-ctrl
|
||||
- qcom,sm6115-dsi-ctrl
|
||||
- qcom,sm6375-dsi-ctrl
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
|
|
@ -15,10 +15,11 @@ allOf:
|
|||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,dsi-phy-28nm-8226
|
||||
- qcom,dsi-phy-28nm-8960
|
||||
- qcom,dsi-phy-28nm-hpm
|
||||
- qcom,dsi-phy-28nm-hpm-fam-b
|
||||
- qcom,dsi-phy-28nm-lp
|
||||
- qcom,dsi-phy-28nm-8960
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
|
|
@ -19,16 +19,18 @@ description: |
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- pattern: '^qcom,adreno-gmu-6[0-9][0-9]\.[0-9]$'
|
||||
- const: qcom,adreno-gmu
|
||||
oneOf:
|
||||
- items:
|
||||
- pattern: '^qcom,adreno-gmu-6[0-9][0-9]\.[0-9]$'
|
||||
- const: qcom,adreno-gmu
|
||||
- const: qcom,adreno-gmu-wrapper
|
||||
|
||||
reg:
|
||||
minItems: 3
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
|
||||
reg-names:
|
||||
minItems: 3
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
|
||||
clocks:
|
||||
|
@ -44,7 +46,6 @@ properties:
|
|||
- description: GMU HFI interrupt
|
||||
- description: GMU interrupt
|
||||
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: hfi
|
||||
|
@ -72,14 +73,8 @@ required:
|
|||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- clocks
|
||||
- clock-names
|
||||
- interrupts
|
||||
- interrupt-names
|
||||
- power-domains
|
||||
- power-domain-names
|
||||
- iommus
|
||||
- operating-points-v2
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -122,6 +117,7 @@ allOf:
|
|||
contains:
|
||||
enum:
|
||||
- qcom,adreno-gmu-635.0
|
||||
- qcom,adreno-gmu-660.1
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
|
@ -217,6 +213,28 @@ allOf:
|
|||
- const: axi
|
||||
- const: memnoc
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: qcom,adreno-gmu-wrapper
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- description: GMU wrapper register space
|
||||
reg-names:
|
||||
items:
|
||||
- const: gmu
|
||||
else:
|
||||
required:
|
||||
- clocks
|
||||
- clock-names
|
||||
- interrupts
|
||||
- interrupt-names
|
||||
- iommus
|
||||
- operating-points-v2
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
|
||||
|
@ -249,3 +267,12 @@ examples:
|
|||
iommus = <&adreno_smmu 5>;
|
||||
operating-points-v2 = <&gmu_opp_table>;
|
||||
};
|
||||
|
||||
gmu_wrapper: gmu@596a000 {
|
||||
compatible = "qcom,adreno-gmu-wrapper";
|
||||
reg = <0x0596a000 0x30000>;
|
||||
reg-names = "gmu";
|
||||
power-domains = <&gpucc GPU_CX_GDSC>,
|
||||
<&gpucc GPU_GX_GDSC>;
|
||||
power-domain-names = "cx", "gx";
|
||||
};
|
||||
|
|
|
@ -36,10 +36,7 @@ properties:
|
|||
|
||||
reg-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: kgsl_3d0_reg_memory
|
||||
- const: cx_mem
|
||||
- const: cx_dbgc
|
||||
maxItems: 3
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
@ -157,16 +154,62 @@ allOf:
|
|||
required:
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
pattern: '^qcom,adreno-6[0-9][0-9]\.[0-9]$'
|
||||
|
||||
then: # Since Adreno 6xx series clocks should be defined in GMU
|
||||
enum:
|
||||
- qcom,adreno-610.0
|
||||
- qcom,adreno-619.1
|
||||
then:
|
||||
properties:
|
||||
clocks: false
|
||||
clock-names: false
|
||||
clocks:
|
||||
minItems: 6
|
||||
maxItems: 6
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: core
|
||||
description: GPU Core clock
|
||||
- const: iface
|
||||
description: GPU Interface clock
|
||||
- const: mem_iface
|
||||
description: GPU Memory Interface clock
|
||||
- const: alt_mem_iface
|
||||
description: GPU Alternative Memory Interface clock
|
||||
- const: gmu
|
||||
description: CX GMU clock
|
||||
- const: xo
|
||||
description: GPUCC clocksource clock
|
||||
|
||||
reg-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: kgsl_3d0_reg_memory
|
||||
- const: cx_dbgc
|
||||
|
||||
required:
|
||||
- clocks
|
||||
- clock-names
|
||||
else:
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
pattern: '^qcom,adreno-6[0-9][0-9]\.[0-9]$'
|
||||
|
||||
then: # Starting with A6xx, the clocks are usually defined in the GMU node
|
||||
properties:
|
||||
clocks: false
|
||||
clock-names: false
|
||||
|
||||
reg-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: kgsl_3d0_reg_memory
|
||||
- const: cx_mem
|
||||
- const: cx_dbgc
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
|
|
@ -22,6 +22,7 @@ properties:
|
|||
- items:
|
||||
- enum:
|
||||
- qcom,apq8084-mdp5
|
||||
- qcom,msm8226-mdp5
|
||||
- qcom,msm8916-mdp5
|
||||
- qcom,msm8917-mdp5
|
||||
- qcom,msm8953-mdp5
|
||||
|
|
|
@ -125,6 +125,7 @@ patternProperties:
|
|||
- qcom,dsi-phy-14nm-660
|
||||
- qcom,dsi-phy-14nm-8953
|
||||
- qcom,dsi-phy-20nm
|
||||
- qcom,dsi-phy-28nm-8226
|
||||
- qcom,dsi-phy-28nm-hpm
|
||||
- qcom,dsi-phy-28nm-lp
|
||||
- qcom,hdmi-phy-8084
|
||||
|
|
|
@ -13,7 +13,10 @@ $ref: /schemas/display/msm/dpu-common.yaml#
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sc7180-dpu
|
||||
enum:
|
||||
- qcom,sc7180-dpu
|
||||
- qcom,sm6350-dpu
|
||||
- qcom,sm6375-dpu
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
@ -26,6 +29,7 @@ properties:
|
|||
- const: vbif
|
||||
|
||||
clocks:
|
||||
minItems: 6
|
||||
items:
|
||||
- description: Display hf axi clock
|
||||
- description: Display ahb clock
|
||||
|
@ -33,8 +37,10 @@ properties:
|
|||
- description: Display lut clock
|
||||
- description: Display core clock
|
||||
- description: Display vsync clock
|
||||
- description: Display core throttle clock
|
||||
|
||||
clock-names:
|
||||
minItems: 6
|
||||
items:
|
||||
- const: bus
|
||||
- const: iface
|
||||
|
@ -42,6 +48,7 @@ properties:
|
|||
- const: lut
|
||||
- const: core
|
||||
- const: vsync
|
||||
- const: throttle
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
@ -52,6 +59,20 @@ required:
|
|||
|
||||
unevaluatedProperties: false
|
||||
|
||||
allOf:
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm6375-dpu
|
||||
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 7
|
||||
|
||||
clock-names:
|
||||
minItems: 7
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/msm/qcom,sm6350-mdss.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcomm SM6350 Display MDSS
|
||||
|
||||
maintainers:
|
||||
- Krishna Manikandan <quic_mkrishn@quicinc.com>
|
||||
|
||||
description:
|
||||
SM6350 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks
|
||||
like DPU display controller, DSI and DP interfaces etc.
|
||||
|
||||
$ref: /schemas/display/msm/mdss-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm6350-mdss
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Display AHB clock from gcc
|
||||
- description: Display AXI clock from gcc
|
||||
- description: Display core clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: iface
|
||||
- const: bus
|
||||
- const: core
|
||||
|
||||
iommus:
|
||||
maxItems: 1
|
||||
|
||||
interconnects:
|
||||
maxItems: 2
|
||||
|
||||
interconnect-names:
|
||||
maxItems: 2
|
||||
|
||||
patternProperties:
|
||||
"^display-controller@[0-9a-f]+$":
|
||||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm6350-dpu
|
||||
|
||||
"^dsi@[0-9a-f]+$":
|
||||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: qcom,sm6350-dsi-ctrl
|
||||
- const: qcom,mdss-dsi-ctrl
|
||||
|
||||
"^phy@[0-9a-f]+$":
|
||||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,dsi-phy-10nm
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,dispcc-sm6350.h>
|
||||
#include <dt-bindings/clock/qcom,gcc-sm6350.h>
|
||||
#include <dt-bindings/clock/qcom,rpmh.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
|
||||
display-subsystem@ae00000 {
|
||||
compatible = "qcom,sm6350-mdss";
|
||||
reg = <0x0ae00000 0x1000>;
|
||||
reg-names = "mdss";
|
||||
|
||||
power-domains = <&dispcc MDSS_GDSC>;
|
||||
|
||||
clocks = <&gcc GCC_DISP_AHB_CLK>,
|
||||
<&gcc GCC_DISP_AXI_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_CLK>;
|
||||
clock-names = "iface", "bus", "core";
|
||||
|
||||
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
iommus = <&apps_smmu 0x800 0x2>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
display-controller@ae01000 {
|
||||
compatible = "qcom,sm6350-dpu";
|
||||
reg = <0x0ae01000 0x8f000>,
|
||||
<0x0aeb0000 0x2008>;
|
||||
reg-names = "mdp", "vbif";
|
||||
|
||||
clocks = <&gcc GCC_DISP_AXI_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_ROT_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_VSYNC_CLK>;
|
||||
clock-names = "bus", "iface", "rot", "lut", "core",
|
||||
"vsync";
|
||||
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_VSYNC_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_ROT_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_AHB_CLK>;
|
||||
assigned-clock-rates = <300000000>,
|
||||
<19200000>,
|
||||
<19200000>,
|
||||
<19200000>;
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <0>;
|
||||
operating-points-v2 = <&mdp_opp_table>;
|
||||
power-domains = <&rpmhpd SM6350_CX>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dpu_intf1_out: endpoint {
|
||||
remote-endpoint = <&dsi0_in>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dpu_intf2_out: endpoint {
|
||||
remote-endpoint = <&dsi1_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dsi@ae94000 {
|
||||
compatible = "qcom,sm6350-dsi-ctrl", "qcom,mdss-dsi-ctrl";
|
||||
reg = <0x0ae94000 0x400>;
|
||||
reg-names = "dsi_ctrl";
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <4>;
|
||||
|
||||
clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_PCLK0_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_ESC0_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&gcc GCC_DISP_AXI_CLK>;
|
||||
clock-names = "byte",
|
||||
"byte_intf",
|
||||
"pixel",
|
||||
"core",
|
||||
"iface",
|
||||
"bus";
|
||||
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
|
||||
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
|
||||
assigned-clock-parents = <&dsi0_phy 0>, <&dsi0_phy 1>;
|
||||
|
||||
operating-points-v2 = <&dsi_opp_table>;
|
||||
power-domains = <&rpmhpd SM6350_MX>;
|
||||
|
||||
phys = <&dsi0_phy>;
|
||||
phy-names = "dsi";
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dsi0_in: endpoint {
|
||||
remote-endpoint = <&dpu_intf1_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dsi0_out: endpoint {
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dsi0_phy: phy@ae94400 {
|
||||
compatible = "qcom,dsi-phy-10nm";
|
||||
reg = <0x0ae94400 0x200>,
|
||||
<0x0ae94600 0x280>,
|
||||
<0x0ae94a00 0x1e0>;
|
||||
reg-names = "dsi_phy",
|
||||
"dsi_phy_lane",
|
||||
"dsi_pll";
|
||||
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, <&rpmhcc RPMH_CXO_CLK>;
|
||||
clock-names = "iface", "ref";
|
||||
};
|
||||
};
|
||||
...
|
|
@ -0,0 +1,215 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/msm/qcom,sm6375-mdss.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcomm SM6375 Display MDSS
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
|
||||
description:
|
||||
SM6375 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks
|
||||
like DPU display controller, DSI and DP interfaces etc.
|
||||
|
||||
$ref: /schemas/display/msm/mdss-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm6375-mdss
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Display AHB clock from gcc
|
||||
- description: Display AHB clock
|
||||
- description: Display core clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: iface
|
||||
- const: ahb
|
||||
- const: core
|
||||
|
||||
iommus:
|
||||
maxItems: 1
|
||||
|
||||
interconnects:
|
||||
maxItems: 2
|
||||
|
||||
interconnect-names:
|
||||
maxItems: 2
|
||||
|
||||
patternProperties:
|
||||
"^display-controller@[0-9a-f]+$":
|
||||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm6375-dpu
|
||||
|
||||
"^dsi@[0-9a-f]+$":
|
||||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: qcom,sm6375-dsi-ctrl
|
||||
- const: qcom,mdss-dsi-ctrl
|
||||
|
||||
"^phy@[0-9a-f]+$":
|
||||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm6375-dsi-phy-7nm
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,rpmcc.h>
|
||||
#include <dt-bindings/clock/qcom,sm6375-gcc.h>
|
||||
#include <dt-bindings/clock/qcom,sm6375-dispcc.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
|
||||
display-subsystem@5e00000 {
|
||||
compatible = "qcom,sm6375-mdss";
|
||||
reg = <0x05e00000 0x1000>;
|
||||
reg-names = "mdss";
|
||||
|
||||
power-domains = <&dispcc MDSS_GDSC>;
|
||||
|
||||
clocks = <&gcc GCC_DISP_AHB_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_CLK>;
|
||||
clock-names = "iface", "ahb", "core";
|
||||
|
||||
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
iommus = <&apps_smmu 0x820 0x2>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
display-controller@5e01000 {
|
||||
compatible = "qcom,sm6375-dpu";
|
||||
reg = <0x05e01000 0x8e030>,
|
||||
<0x05eb0000 0x2008>;
|
||||
reg-names = "mdp", "vbif";
|
||||
|
||||
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_ROT_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_VSYNC_CLK>,
|
||||
<&gcc GCC_DISP_THROTTLE_CORE_CLK>;
|
||||
clock-names = "bus",
|
||||
"iface",
|
||||
"rot",
|
||||
"lut",
|
||||
"core",
|
||||
"vsync",
|
||||
"throttle";
|
||||
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
|
||||
assigned-clock-rates = <19200000>;
|
||||
|
||||
operating-points-v2 = <&mdp_opp_table>;
|
||||
power-domains = <&rpmpd SM6375_VDDCX>;
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dpu_intf1_out: endpoint {
|
||||
remote-endpoint = <&dsi0_in>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dpu_intf2_out: endpoint {
|
||||
remote-endpoint = <&dsi1_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dsi@5e94000 {
|
||||
compatible = "qcom,sm6375-dsi-ctrl", "qcom,mdss-dsi-ctrl";
|
||||
reg = <0x05e94000 0x400>;
|
||||
reg-names = "dsi_ctrl";
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <4>;
|
||||
|
||||
clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_PCLK0_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_ESC0_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&gcc GCC_DISP_HF_AXI_CLK>;
|
||||
clock-names = "byte",
|
||||
"byte_intf",
|
||||
"pixel",
|
||||
"core",
|
||||
"iface",
|
||||
"bus";
|
||||
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
|
||||
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
|
||||
assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
|
||||
|
||||
operating-points-v2 = <&dsi_opp_table>;
|
||||
power-domains = <&rpmpd SM6375_VDDMX>;
|
||||
|
||||
phys = <&mdss_dsi0_phy>;
|
||||
phy-names = "dsi";
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dsi0_in: endpoint {
|
||||
remote-endpoint = <&dpu_intf1_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dsi0_out: endpoint {
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mdss_dsi0_phy: phy@5e94400 {
|
||||
compatible = "qcom,sm6375-dsi-phy-7nm";
|
||||
reg = <0x05e94400 0x200>,
|
||||
<0x05e94600 0x280>,
|
||||
<0x05e94900 0x264>;
|
||||
reg-names = "dsi_phy",
|
||||
"dsi_phy_lane",
|
||||
"dsi_pll";
|
||||
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&rpmcc RPM_SMD_XO_CLK_SRC>;
|
||||
clock-names = "iface", "ref";
|
||||
};
|
||||
};
|
||||
...
|
|
@ -64,7 +64,7 @@ patternProperties:
|
|||
type: object
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,dsi-phy-5nm-8350
|
||||
const: qcom,sm8350-dsi-phy-5nm
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
|
|
@ -6583,6 +6583,7 @@ M: Rob Clark <robdclark@gmail.com>
|
|||
M: Abhinav Kumar <quic_abhinavk@quicinc.com>
|
||||
M: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
|
||||
R: Sean Paul <sean@poorly.run>
|
||||
R: Marijn Suijten <marijn.suijten@somainline.org>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: freedreno@lists.freedesktop.org
|
||||
|
|
|
@ -270,6 +270,28 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
|
||||
|
||||
/**
|
||||
* drm_dsc_set_const_params() - Set DSC parameters considered typically
|
||||
* constant across operation modes
|
||||
*
|
||||
* @vdsc_cfg:
|
||||
* DSC Configuration data partially filled by driver
|
||||
*/
|
||||
void drm_dsc_set_const_params(struct drm_dsc_config *vdsc_cfg)
|
||||
{
|
||||
if (!vdsc_cfg->rc_model_size)
|
||||
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
|
||||
vdsc_cfg->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
|
||||
vdsc_cfg->rc_tgt_offset_high = DSC_RC_TGT_OFFSET_HI_CONST;
|
||||
vdsc_cfg->rc_tgt_offset_low = DSC_RC_TGT_OFFSET_LO_CONST;
|
||||
|
||||
if (vdsc_cfg->bits_per_component <= 10)
|
||||
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
|
||||
else
|
||||
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dsc_set_const_params);
|
||||
|
||||
/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
|
||||
static const u16 drm_dsc_rc_buf_thresh[] = {
|
||||
896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
|
||||
|
@ -1413,3 +1435,40 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
|
||||
|
||||
/**
|
||||
* drm_dsc_get_bpp_int() - Get integer bits per pixel value for the given DRM DSC config
|
||||
* @vdsc_cfg: Pointer to DRM DSC config struct
|
||||
*
|
||||
* Return: Integer BPP value
|
||||
*/
|
||||
u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg)
|
||||
{
|
||||
WARN_ON_ONCE(vdsc_cfg->bits_per_pixel & 0xf);
|
||||
return vdsc_cfg->bits_per_pixel >> 4;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dsc_get_bpp_int);
|
||||
|
||||
/**
|
||||
* drm_dsc_initial_scale_value() - Calculate the initial scale value for the given DSC config
|
||||
* @dsc: Pointer to DRM DSC config struct
|
||||
*
|
||||
* Return: Calculated initial scale value
|
||||
*/
|
||||
u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc)
|
||||
{
|
||||
return 8 * dsc->rc_model_size / (dsc->rc_model_size - dsc->initial_offset);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dsc_initial_scale_value);
|
||||
|
||||
/**
|
||||
* drm_dsc_flatness_det_thresh() - Calculate the flatness_det_thresh for the given DSC config
|
||||
* @dsc: Pointer to DRM DSC config struct
|
||||
*
|
||||
* Return: Calculated flatness det thresh value
|
||||
*/
|
||||
u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc)
|
||||
{
|
||||
return 2 << (dsc->bits_per_component - 8);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dsc_flatness_det_thresh);
|
||||
|
|
|
@ -65,6 +65,7 @@ msm-$(CONFIG_DRM_MSM_DPU) += \
|
|||
disp/dpu1/dpu_hw_catalog.o \
|
||||
disp/dpu1/dpu_hw_ctl.o \
|
||||
disp/dpu1/dpu_hw_dsc.o \
|
||||
disp/dpu1/dpu_hw_dsc_1_2.o \
|
||||
disp/dpu1/dpu_hw_interrupts.o \
|
||||
disp/dpu1/dpu_hw_intf.o \
|
||||
disp/dpu1/dpu_hw_lm.o \
|
||||
|
@ -122,7 +123,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
|
|||
dp/dp_ctrl.o \
|
||||
dp/dp_display.o \
|
||||
dp/dp_drm.o \
|
||||
dp/dp_hpd.o \
|
||||
dp/dp_link.o \
|
||||
dp/dp_panel.o \
|
||||
dp/dp_parser.o \
|
||||
|
|
|
@ -69,7 +69,7 @@ static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
|
|||
|
||||
static int show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
|
|
@ -1743,6 +1743,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
|||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct platform_device *pdev = priv->gpu_pdev;
|
||||
struct adreno_platform_config *config = pdev->dev.platform_data;
|
||||
struct a5xx_gpu *a5xx_gpu = NULL;
|
||||
struct adreno_gpu *adreno_gpu;
|
||||
struct msm_gpu *gpu;
|
||||
|
@ -1769,7 +1770,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
|||
|
||||
nr_rings = 4;
|
||||
|
||||
if (adreno_is_a510(adreno_gpu))
|
||||
if (adreno_cmp_rev(ADRENO_REV(5, 1, 0, ANY_ID), config->rev))
|
||||
nr_rings = 1;
|
||||
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
|
||||
|
|
|
@ -354,7 +354,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
|||
}
|
||||
|
||||
/* Enable CPU control of SPTP power power collapse */
|
||||
static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
|
||||
int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
@ -376,7 +376,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
|
|||
}
|
||||
|
||||
/* Disable CPU control of SPTP power power collapse */
|
||||
static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
|
||||
void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
@ -479,12 +479,6 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
|
|||
|
||||
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
|
||||
|
||||
/* Set up CX GMU counter 0 to count busy ticks */
|
||||
gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
|
||||
gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
|
||||
|
||||
/* Enable the power counter */
|
||||
gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -868,43 +862,6 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
|
|||
(val & 1), 100, 1000);
|
||||
}
|
||||
|
||||
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
||||
#define GBIF_ARB_HALT_MASK BIT(1)
|
||||
|
||||
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
|
||||
bool gx_off)
|
||||
{
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
|
||||
if (!a6xx_has_gbif(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
|
||||
0xf) == 0xf);
|
||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (gx_off) {
|
||||
/* Halt the gx side of GBIF */
|
||||
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
|
||||
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
|
||||
}
|
||||
|
||||
/* Halt new client requests on GBIF */
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
|
||||
|
||||
/* Halt all AXI requests on GBIF */
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
|
||||
|
||||
/* The GBIF halt needs to be explicitly cleared */
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
||||
}
|
||||
|
||||
/* Force the GMU off in case it isn't responsive */
|
||||
static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
|
||||
{
|
||||
|
@ -912,6 +869,12 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
|
|||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
|
||||
/*
|
||||
* Turn off keep alive that might have been enabled by the hang
|
||||
* interrupt
|
||||
*/
|
||||
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
|
||||
|
||||
/* Flush all the queues */
|
||||
a6xx_hfi_stop(gmu);
|
||||
|
||||
|
@ -930,8 +893,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
|
|||
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
|
||||
|
||||
/* Reset GPU core blocks */
|
||||
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
|
||||
udelay(100);
|
||||
a6xx_gpu_sw_reset(gpu, true);
|
||||
}
|
||||
|
||||
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
|
||||
|
@ -1469,6 +1431,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
|
|||
|
||||
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
struct platform_device *pdev = to_platform_device(gmu->dev);
|
||||
|
||||
|
@ -1494,10 +1457,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
|||
gmu->mmio = NULL;
|
||||
gmu->rscc = NULL;
|
||||
|
||||
a6xx_gmu_memory_free(gmu);
|
||||
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
|
||||
a6xx_gmu_memory_free(gmu);
|
||||
|
||||
free_irq(gmu->gmu_irq, gmu);
|
||||
free_irq(gmu->hfi_irq, gmu);
|
||||
free_irq(gmu->gmu_irq, gmu);
|
||||
free_irq(gmu->hfi_irq, gmu);
|
||||
}
|
||||
|
||||
/* Drop reference taken in of_find_device_by_node */
|
||||
put_device(gmu->dev);
|
||||
|
@ -1516,6 +1481,69 @@ static int cxpd_notifier_cb(struct notifier_block *nb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
{
|
||||
struct platform_device *pdev = of_find_device_by_node(node);
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
int ret;
|
||||
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
gmu->dev = &pdev->dev;
|
||||
|
||||
of_dma_configure(gmu->dev, node, true);
|
||||
|
||||
pm_runtime_enable(gmu->dev);
|
||||
|
||||
/* Mark legacy for manual SPTPRAC control */
|
||||
gmu->legacy = true;
|
||||
|
||||
/* Map the GMU registers */
|
||||
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
|
||||
if (IS_ERR(gmu->mmio)) {
|
||||
ret = PTR_ERR(gmu->mmio);
|
||||
goto err_mmio;
|
||||
}
|
||||
|
||||
gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
|
||||
if (IS_ERR(gmu->cxpd)) {
|
||||
ret = PTR_ERR(gmu->cxpd);
|
||||
goto err_mmio;
|
||||
}
|
||||
|
||||
if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
|
||||
ret = -ENODEV;
|
||||
goto detach_cxpd;
|
||||
}
|
||||
|
||||
init_completion(&gmu->pd_gate);
|
||||
complete_all(&gmu->pd_gate);
|
||||
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
|
||||
|
||||
/* Get a link to the GX power domain to reset the GPU */
|
||||
gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
|
||||
if (IS_ERR(gmu->gxpd)) {
|
||||
ret = PTR_ERR(gmu->gxpd);
|
||||
goto err_mmio;
|
||||
}
|
||||
|
||||
gmu->initialized = true;
|
||||
|
||||
return 0;
|
||||
|
||||
detach_cxpd:
|
||||
dev_pm_domain_detach(gmu->cxpd, false);
|
||||
|
||||
err_mmio:
|
||||
iounmap(gmu->mmio);
|
||||
|
||||
/* Drop reference taken in of_find_device_by_node */
|
||||
put_device(gmu->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
|
|
|
@ -51,8 +51,8 @@ struct a6xx_gmu {
|
|||
|
||||
struct msm_gem_address_space *aspace;
|
||||
|
||||
void * __iomem mmio;
|
||||
void * __iomem rscc;
|
||||
void __iomem *mmio;
|
||||
void __iomem *rscc;
|
||||
|
||||
int hfi_irq;
|
||||
int gmu_irq;
|
||||
|
@ -193,5 +193,7 @@ int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
|
|||
|
||||
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
|
||||
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
|
||||
void a6xx_sptprac_disable(struct a6xx_gmu *gmu);
|
||||
int a6xx_sptprac_enable(struct a6xx_gmu *gmu);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -21,7 +21,7 @@ static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
|
|||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
||||
/* Check that the GMU is idle */
|
||||
if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
|
||||
if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu))
|
||||
return false;
|
||||
|
||||
/* Check tha the CX master is idle */
|
||||
|
@ -252,6 +252,56 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
a6xx_flush(gpu, ring);
|
||||
}
|
||||
|
||||
const struct adreno_reglist a612_hwcg[] = {
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
|
||||
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
|
||||
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
|
||||
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
|
||||
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
|
||||
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
|
||||
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
|
||||
{},
|
||||
};
|
||||
|
||||
/* For a615 family (a615, a616, a618 and a619) */
|
||||
const struct adreno_reglist a615_hwcg[] = {
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
||||
|
@ -588,6 +638,63 @@ const struct adreno_reglist a660_hwcg[] = {
|
|||
{},
|
||||
};
|
||||
|
||||
const struct adreno_reglist a690_hwcg[] = {
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
|
||||
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
|
||||
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
|
||||
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL, 0x8AA8AA82},
|
||||
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
|
||||
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
|
||||
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
|
||||
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
|
||||
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
|
||||
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
|
||||
{REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
|
||||
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
|
||||
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
|
||||
{}
|
||||
};
|
||||
|
||||
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
@ -602,6 +709,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
|||
|
||||
if (adreno_is_a630(adreno_gpu))
|
||||
clock_cntl_on = 0x8aa8aa02;
|
||||
else if (adreno_is_a610(adreno_gpu))
|
||||
clock_cntl_on = 0xaaa8aa82;
|
||||
else
|
||||
clock_cntl_on = 0x8aa8aa82;
|
||||
|
||||
|
@ -612,13 +721,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
|||
return;
|
||||
|
||||
/* Disable SP clock before programming HWCG registers */
|
||||
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
|
||||
if (!adreno_is_a610(adreno_gpu))
|
||||
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
|
||||
|
||||
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
|
||||
gpu_write(gpu, reg->offset, state ? reg->value : 0);
|
||||
|
||||
/* Enable SP clock */
|
||||
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
|
||||
if (!adreno_is_a610(adreno_gpu))
|
||||
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
|
||||
|
||||
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
|
||||
}
|
||||
|
@ -747,6 +858,45 @@ static const u32 a660_protect[] = {
|
|||
A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
|
||||
};
|
||||
|
||||
/* These are for a690 */
|
||||
static const u32 a690_protect[] = {
|
||||
A6XX_PROTECT_RDONLY(0x00000, 0x004ff),
|
||||
A6XX_PROTECT_RDONLY(0x00501, 0x00001),
|
||||
A6XX_PROTECT_RDONLY(0x0050b, 0x002f4),
|
||||
A6XX_PROTECT_NORDWR(0x0050e, 0x00000),
|
||||
A6XX_PROTECT_NORDWR(0x00510, 0x00000),
|
||||
A6XX_PROTECT_NORDWR(0x00534, 0x00000),
|
||||
A6XX_PROTECT_NORDWR(0x00800, 0x00082),
|
||||
A6XX_PROTECT_NORDWR(0x008a0, 0x00008),
|
||||
A6XX_PROTECT_NORDWR(0x008ab, 0x00024),
|
||||
A6XX_PROTECT_RDONLY(0x008de, 0x000ae),
|
||||
A6XX_PROTECT_NORDWR(0x00900, 0x0004d),
|
||||
A6XX_PROTECT_NORDWR(0x0098d, 0x00272),
|
||||
A6XX_PROTECT_NORDWR(0x00e00, 0x00001),
|
||||
A6XX_PROTECT_NORDWR(0x00e03, 0x0000c),
|
||||
A6XX_PROTECT_NORDWR(0x03c00, 0x000c3),
|
||||
A6XX_PROTECT_RDONLY(0x03cc4, 0x01fff),
|
||||
A6XX_PROTECT_NORDWR(0x08630, 0x001cf),
|
||||
A6XX_PROTECT_NORDWR(0x08e00, 0x00000),
|
||||
A6XX_PROTECT_NORDWR(0x08e08, 0x00007),
|
||||
A6XX_PROTECT_NORDWR(0x08e50, 0x0001f),
|
||||
A6XX_PROTECT_NORDWR(0x08e80, 0x0027f),
|
||||
A6XX_PROTECT_NORDWR(0x09624, 0x001db),
|
||||
A6XX_PROTECT_NORDWR(0x09e60, 0x00011),
|
||||
A6XX_PROTECT_NORDWR(0x09e78, 0x00187),
|
||||
A6XX_PROTECT_NORDWR(0x0a630, 0x001cf),
|
||||
A6XX_PROTECT_NORDWR(0x0ae02, 0x00000),
|
||||
A6XX_PROTECT_NORDWR(0x0ae50, 0x0012f),
|
||||
A6XX_PROTECT_NORDWR(0x0b604, 0x00000),
|
||||
A6XX_PROTECT_NORDWR(0x0b608, 0x00006),
|
||||
A6XX_PROTECT_NORDWR(0x0be02, 0x00001),
|
||||
A6XX_PROTECT_NORDWR(0x0be20, 0x0015f),
|
||||
A6XX_PROTECT_NORDWR(0x0d000, 0x005ff),
|
||||
A6XX_PROTECT_NORDWR(0x0f000, 0x00bff),
|
||||
A6XX_PROTECT_RDONLY(0x0fc00, 0x01fff),
|
||||
A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
|
||||
};
|
||||
|
||||
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
@ -758,6 +908,11 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
|
|||
count = ARRAY_SIZE(a650_protect);
|
||||
count_max = 48;
|
||||
BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
|
||||
} else if (adreno_is_a690(adreno_gpu)) {
|
||||
regs = a690_protect;
|
||||
count = ARRAY_SIZE(a690_protect);
|
||||
count_max = 48;
|
||||
BUILD_BUG_ON(ARRAY_SIZE(a690_protect) > 48);
|
||||
} else if (adreno_is_a660_family(adreno_gpu)) {
|
||||
regs = a660_protect;
|
||||
count = ARRAY_SIZE(a660_protect);
|
||||
|
@ -786,39 +941,77 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
|
|||
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
u32 lower_bit = 2;
|
||||
u32 amsbc = 0;
|
||||
/* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
|
||||
u32 rgb565_predicator = 0;
|
||||
/* Unknown, introduced with A650 family */
|
||||
u32 uavflagprd_inv = 0;
|
||||
/* Whether the minimum access length is 64 bits */
|
||||
u32 min_acc_len = 0;
|
||||
/* Entirely magic, per-GPU-gen value */
|
||||
u32 ubwc_mode = 0;
|
||||
/*
|
||||
* The Highest Bank Bit value represents the bit of the highest DDR bank.
|
||||
* We then subtract 13 from it (13 is the minimum value allowed by hw) and
|
||||
* write the lowest two bits of the remaining value as hbb_lo and the
|
||||
* one above it as hbb_hi to the hardware. This should ideally use DRAM
|
||||
* type detection.
|
||||
*/
|
||||
u32 hbb_hi = 0;
|
||||
u32 hbb_lo = 2;
|
||||
/* Unknown, introduced with A640/680 */
|
||||
u32 amsbc = 0;
|
||||
|
||||
if (adreno_is_a610(adreno_gpu)) {
|
||||
/* HBB = 14 */
|
||||
hbb_lo = 1;
|
||||
min_acc_len = 1;
|
||||
ubwc_mode = 1;
|
||||
}
|
||||
|
||||
/* a618 is using the hw default values */
|
||||
if (adreno_is_a618(adreno_gpu))
|
||||
return;
|
||||
|
||||
if (adreno_is_a619_holi(adreno_gpu))
|
||||
hbb_lo = 0;
|
||||
|
||||
if (adreno_is_a640_family(adreno_gpu))
|
||||
amsbc = 1;
|
||||
|
||||
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
|
||||
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
|
||||
lower_bit = 3;
|
||||
hbb_lo = 3;
|
||||
amsbc = 1;
|
||||
rgb565_predicator = 1;
|
||||
uavflagprd_inv = 2;
|
||||
}
|
||||
|
||||
if (adreno_is_a690(adreno_gpu)) {
|
||||
hbb_lo = 2;
|
||||
amsbc = 1;
|
||||
rgb565_predicator = 1;
|
||||
uavflagprd_inv = 2;
|
||||
}
|
||||
|
||||
if (adreno_is_7c3(adreno_gpu)) {
|
||||
lower_bit = 1;
|
||||
hbb_lo = 1;
|
||||
amsbc = 1;
|
||||
rgb565_predicator = 1;
|
||||
uavflagprd_inv = 2;
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
|
||||
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
|
||||
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
|
||||
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
|
||||
uavflagprd_inv << 4 | lower_bit << 1);
|
||||
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
|
||||
rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
|
||||
min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
|
||||
|
||||
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
|
||||
min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
|
||||
|
||||
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
|
||||
uavflagprd_inv << 4 | min_acc_len << 3 |
|
||||
hbb_lo << 1 | ubwc_mode);
|
||||
|
||||
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
|
||||
}
|
||||
|
||||
static int a6xx_cp_init(struct msm_gpu *gpu)
|
||||
|
@ -997,17 +1190,32 @@ static int hw_init(struct msm_gpu *gpu)
|
|||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
int ret;
|
||||
|
||||
/* Make sure the GMU keeps the GPU on while we set it up */
|
||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
||||
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
|
||||
/* Make sure the GMU keeps the GPU on while we set it up */
|
||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
||||
}
|
||||
|
||||
/* Clear GBIF halt in case GX domain was not collapsed */
|
||||
if (a6xx_has_gbif(adreno_gpu))
|
||||
if (adreno_is_a619_holi(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
|
||||
gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
|
||||
/* Let's make extra sure that the GPU can access the memory.. */
|
||||
mb();
|
||||
} else if (a6xx_has_gbif(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
|
||||
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
|
||||
/* Let's make extra sure that the GPU can access the memory.. */
|
||||
mb();
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
|
||||
|
||||
if (adreno_is_a619_holi(adreno_gpu))
|
||||
a6xx_sptprac_enable(gmu);
|
||||
|
||||
/*
|
||||
* Disable the trusted memory range - we don't actually supported secure
|
||||
* memory rendering at this point in time and we don't want to block off
|
||||
|
@ -1034,13 +1242,13 @@ static int hw_init(struct msm_gpu *gpu)
|
|||
a6xx_set_hwcg(gpu, true);
|
||||
|
||||
/* VBIF/GBIF start*/
|
||||
if (adreno_is_a640_family(adreno_gpu) ||
|
||||
if (adreno_is_a610(adreno_gpu) ||
|
||||
adreno_is_a640_family(adreno_gpu) ||
|
||||
adreno_is_a650_family(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
|
||||
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
|
||||
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
|
||||
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
|
||||
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
|
||||
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
|
||||
} else {
|
||||
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
|
||||
|
@ -1068,28 +1276,40 @@ static int hw_init(struct msm_gpu *gpu)
|
|||
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
|
||||
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
|
||||
|
||||
if (adreno_is_a640_family(adreno_gpu) ||
|
||||
adreno_is_a650_family(adreno_gpu))
|
||||
if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
|
||||
else
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
|
||||
} else if (adreno_is_a610(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
|
||||
} else {
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
|
||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
|
||||
}
|
||||
|
||||
if (adreno_is_a660_family(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
|
||||
|
||||
/* Setting the mem pool size */
|
||||
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
|
||||
if (adreno_is_a610(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
|
||||
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
|
||||
} else
|
||||
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
|
||||
|
||||
/* Setting the primFifo thresholds default values,
|
||||
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
|
||||
*/
|
||||
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
|
||||
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
|
||||
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
|
||||
else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
|
||||
else if (adreno_is_a619(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
|
||||
else if (adreno_is_a610(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
|
||||
else
|
||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
|
||||
|
||||
|
@ -1105,8 +1325,12 @@ static int hw_init(struct msm_gpu *gpu)
|
|||
a6xx_set_ubwc_config(gpu);
|
||||
|
||||
/* Enable fault detection */
|
||||
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
|
||||
(1 << 30) | 0x1fffff);
|
||||
if (adreno_is_a619(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
|
||||
else if (adreno_is_a610(adreno_gpu))
|
||||
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
|
||||
else
|
||||
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
|
||||
|
||||
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
|
||||
|
||||
|
@ -1123,6 +1347,13 @@ static int hw_init(struct msm_gpu *gpu)
|
|||
0x3f0243f0);
|
||||
}
|
||||
|
||||
/* Set up the CX GMU counter 0 to count busy ticks */
|
||||
gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
|
||||
|
||||
/* Enable the power counter */
|
||||
gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5));
|
||||
gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
|
||||
|
||||
/* Protect registers from the CP */
|
||||
a6xx_set_cp_protect(gpu);
|
||||
|
||||
|
@ -1212,6 +1443,8 @@ static int hw_init(struct msm_gpu *gpu)
|
|||
}
|
||||
|
||||
out:
|
||||
if (adreno_has_gmu_wrapper(adreno_gpu))
|
||||
return ret;
|
||||
/*
|
||||
* Tell the GMU that we are done touching the GPU and it can start power
|
||||
* management
|
||||
|
@ -1246,9 +1479,6 @@ static void a6xx_dump(struct msm_gpu *gpu)
|
|||
adreno_dump(gpu);
|
||||
}
|
||||
|
||||
#define VBIF_RESET_ACK_TIMEOUT 100
|
||||
#define VBIF_RESET_ACK_MASK 0x00f0
|
||||
|
||||
static void a6xx_recover(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
@ -1274,12 +1504,6 @@ static void a6xx_recover(struct msm_gpu *gpu)
|
|||
/* Halt SQE first */
|
||||
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
|
||||
|
||||
/*
|
||||
* Turn off keep alive that might have been enabled by the hang
|
||||
* interrupt
|
||||
*/
|
||||
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
|
||||
|
||||
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
|
||||
|
||||
/* active_submit won't change until we make a submission */
|
||||
|
@ -1292,6 +1516,15 @@ static void a6xx_recover(struct msm_gpu *gpu)
|
|||
*/
|
||||
gpu->active_submits = 0;
|
||||
|
||||
if (adreno_has_gmu_wrapper(adreno_gpu)) {
|
||||
/* Drain the outstanding traffic on memory buses */
|
||||
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
|
||||
|
||||
/* Reset the GPU to a clean state */
|
||||
a6xx_gpu_sw_reset(gpu, true);
|
||||
a6xx_gpu_sw_reset(gpu, false);
|
||||
}
|
||||
|
||||
reinit_completion(&gmu->pd_gate);
|
||||
dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
|
||||
dev_pm_genpd_synced_poweroff(gmu->cxpd);
|
||||
|
@ -1442,7 +1675,8 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
|
|||
* Force the GPU to stay on until after we finish
|
||||
* collecting information
|
||||
*/
|
||||
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
|
||||
if (!adreno_has_gmu_wrapper(adreno_gpu))
|
||||
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
|
||||
|
||||
DRM_DEV_ERROR(&gpu->pdev->dev,
|
||||
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
|
||||
|
@ -1573,6 +1807,10 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
|
|||
|
||||
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
|
||||
if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
|
||||
return;
|
||||
|
||||
llcc_slice_putd(a6xx_gpu->llc_slice);
|
||||
llcc_slice_putd(a6xx_gpu->htw_llc_slice);
|
||||
}
|
||||
|
@ -1582,6 +1820,10 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
|
|||
{
|
||||
struct device_node *phandle;
|
||||
|
||||
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
|
||||
if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
|
||||
return;
|
||||
|
||||
/*
|
||||
* There is a different programming path for targets with an mmu500
|
||||
* attached, so detect if that is the case
|
||||
|
@ -1603,7 +1845,66 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
|
|||
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
||||
#define GBIF_ARB_HALT_MASK BIT(1)
|
||||
#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
|
||||
#define VBIF_RESET_ACK_MASK 0xF0
|
||||
#define GPR0_GBIF_HALT_REQUEST 0x1E0
|
||||
|
||||
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
|
||||
{
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
|
||||
if (adreno_is_a619_holi(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
|
||||
(VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
|
||||
} else if (!a6xx_has_gbif(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
|
||||
(VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK);
|
||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (gx_off) {
|
||||
/* Halt the gx side of GBIF */
|
||||
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
|
||||
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
|
||||
}
|
||||
|
||||
/* Halt new client requests on GBIF */
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
|
||||
|
||||
/* Halt all AXI requests on GBIF */
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
|
||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
|
||||
|
||||
/* The GBIF halt needs to be explicitly cleared */
|
||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
||||
}
|
||||
|
||||
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
|
||||
{
|
||||
/* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
|
||||
if (adreno_is_a610(to_adreno_gpu(gpu)))
|
||||
return;
|
||||
|
||||
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
|
||||
/* Perform a bogus read and add a brief delay to ensure ordering. */
|
||||
gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD);
|
||||
udelay(1);
|
||||
|
||||
/* The reset line needs to be asserted for at least 100 us */
|
||||
if (assert)
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
@ -1623,10 +1924,61 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
|
|||
|
||||
a6xx_llc_activate(a6xx_gpu);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
||||
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
unsigned long freq = gpu->fast_rate;
|
||||
struct dev_pm_opp *opp;
|
||||
int ret;
|
||||
|
||||
gpu->needs_hw_init = true;
|
||||
|
||||
trace_msm_gpu_resume(0);
|
||||
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
|
||||
if (IS_ERR(opp)) {
|
||||
ret = PTR_ERR(opp);
|
||||
goto err_set_opp;
|
||||
}
|
||||
dev_pm_opp_put(opp);
|
||||
|
||||
/* Set the core clock and bus bw, having VDD scaling in mind */
|
||||
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
|
||||
|
||||
pm_runtime_resume_and_get(gmu->dev);
|
||||
pm_runtime_resume_and_get(gmu->gxpd);
|
||||
|
||||
ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
|
||||
if (ret)
|
||||
goto err_bulk_clk;
|
||||
|
||||
if (adreno_is_a619_holi(adreno_gpu))
|
||||
a6xx_sptprac_enable(gmu);
|
||||
|
||||
/* If anything goes south, tear the GPU down piece by piece.. */
|
||||
if (ret) {
|
||||
err_bulk_clk:
|
||||
pm_runtime_put(gmu->gxpd);
|
||||
pm_runtime_put(gmu->dev);
|
||||
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
|
||||
}
|
||||
err_set_opp:
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
if (!ret)
|
||||
msm_devfreq_resume(gpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
@ -1653,7 +2005,43 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
int i;
|
||||
|
||||
trace_msm_gpu_suspend(0);
|
||||
|
||||
msm_devfreq_suspend(gpu);
|
||||
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
/* Drain the outstanding traffic on memory buses */
|
||||
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
|
||||
|
||||
if (adreno_is_a619_holi(adreno_gpu))
|
||||
a6xx_sptprac_disable(gmu);
|
||||
|
||||
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
|
||||
|
||||
pm_runtime_put_sync(gmu->gxpd);
|
||||
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
|
||||
pm_runtime_put_sync(gmu->dev);
|
||||
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
if (a6xx_gpu->shadow_bo)
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
a6xx_gpu->shadow[i] = 0;
|
||||
|
||||
gpu->suspend_count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
@ -1672,6 +2060,12 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
{
|
||||
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
@ -1744,7 +2138,8 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
|
|||
* This allows GPU to set the bus attributes required to use system
|
||||
* cache on behalf of the iommu page table walker.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
|
||||
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
|
||||
!device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
|
||||
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
|
||||
|
||||
return adreno_iommu_create_address_space(gpu, pdev, quirks);
|
||||
|
@ -1809,6 +2204,30 @@ static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
|||
return progress;
|
||||
}
|
||||
|
||||
static u32 a610_get_speed_bin(u32 fuse)
|
||||
{
|
||||
/*
|
||||
* There are (at least) three SoCs implementing A610: SM6125 (trinket),
|
||||
* SM6115 (bengal) and SM6225 (khaje). Trinket does not have speedbinning,
|
||||
* as only a single SKU exists and we don't support khaje upstream yet.
|
||||
* Hence, this matching table is only valid for bengal and can be easily
|
||||
* expanded if need be.
|
||||
*/
|
||||
|
||||
if (fuse == 0)
|
||||
return 0;
|
||||
else if (fuse == 206)
|
||||
return 1;
|
||||
else if (fuse == 200)
|
||||
return 2;
|
||||
else if (fuse == 157)
|
||||
return 3;
|
||||
else if (fuse == 127)
|
||||
return 4;
|
||||
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
static u32 a618_get_speed_bin(u32 fuse)
|
||||
{
|
||||
if (fuse == 0)
|
||||
|
@ -1821,6 +2240,34 @@ static u32 a618_get_speed_bin(u32 fuse)
|
|||
return UINT_MAX;
|
||||
}
|
||||
|
||||
static u32 a619_holi_get_speed_bin(u32 fuse)
|
||||
{
|
||||
/*
|
||||
* There are (at least) two SoCs implementing A619_holi: SM4350 (holi)
|
||||
* and SM6375 (blair). Limit the fuse matching to the corresponding
|
||||
* SoC to prevent bogus frequency setting (as improbable as it may be,
|
||||
* given unexpected fuse values are.. unexpected! But still possible.)
|
||||
*/
|
||||
|
||||
if (fuse == 0)
|
||||
return 0;
|
||||
|
||||
if (of_machine_is_compatible("qcom,sm4350")) {
|
||||
if (fuse == 138)
|
||||
return 1;
|
||||
else if (fuse == 92)
|
||||
return 2;
|
||||
} else if (of_machine_is_compatible("qcom,sm6375")) {
|
||||
if (fuse == 190)
|
||||
return 1;
|
||||
else if (fuse == 177)
|
||||
return 2;
|
||||
} else
|
||||
pr_warn("Unknown SoC implementing A619_holi!\n");
|
||||
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
static u32 a619_get_speed_bin(u32 fuse)
|
||||
{
|
||||
if (fuse == 0)
|
||||
|
@ -1874,23 +2321,29 @@ static u32 adreno_7c3_get_speed_bin(u32 fuse)
|
|||
return UINT_MAX;
|
||||
}
|
||||
|
||||
static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
|
||||
static u32 fuse_to_supp_hw(struct device *dev, struct adreno_gpu *adreno_gpu, u32 fuse)
|
||||
{
|
||||
u32 val = UINT_MAX;
|
||||
|
||||
if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
|
||||
if (adreno_is_a610(adreno_gpu))
|
||||
val = a610_get_speed_bin(fuse);
|
||||
|
||||
if (adreno_is_a618(adreno_gpu))
|
||||
val = a618_get_speed_bin(fuse);
|
||||
|
||||
if (adreno_cmp_rev(ADRENO_REV(6, 1, 9, ANY_ID), rev))
|
||||
else if (adreno_is_a619_holi(adreno_gpu))
|
||||
val = a619_holi_get_speed_bin(fuse);
|
||||
|
||||
else if (adreno_is_a619(adreno_gpu))
|
||||
val = a619_get_speed_bin(fuse);
|
||||
|
||||
if (adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), rev))
|
||||
else if (adreno_is_7c3(adreno_gpu))
|
||||
val = adreno_7c3_get_speed_bin(fuse);
|
||||
|
||||
if (adreno_cmp_rev(ADRENO_REV(6, 4, 0, ANY_ID), rev))
|
||||
else if (adreno_is_a640(adreno_gpu))
|
||||
val = a640_get_speed_bin(fuse);
|
||||
|
||||
if (adreno_cmp_rev(ADRENO_REV(6, 5, 0, ANY_ID), rev))
|
||||
else if (adreno_is_a650(adreno_gpu))
|
||||
val = a650_get_speed_bin(fuse);
|
||||
|
||||
if (val == UINT_MAX) {
|
||||
|
@ -1903,7 +2356,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
|
|||
return (1 << val);
|
||||
}
|
||||
|
||||
static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
|
||||
static int a6xx_set_supported_hw(struct device *dev, struct adreno_gpu *adreno_gpu)
|
||||
{
|
||||
u32 supp_hw;
|
||||
u32 speedbin;
|
||||
|
@ -1922,7 +2375,7 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
|
||||
supp_hw = fuse_to_supp_hw(dev, adreno_gpu, speedbin);
|
||||
|
||||
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
|
||||
if (ret)
|
||||
|
@ -1932,6 +2385,37 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
|
|||
}
|
||||
|
||||
static const struct adreno_gpu_funcs funcs = {
|
||||
.base = {
|
||||
.get_param = adreno_get_param,
|
||||
.set_param = adreno_set_param,
|
||||
.hw_init = a6xx_hw_init,
|
||||
.ucode_load = a6xx_ucode_load,
|
||||
.pm_suspend = a6xx_gmu_pm_suspend,
|
||||
.pm_resume = a6xx_gmu_pm_resume,
|
||||
.recover = a6xx_recover,
|
||||
.submit = a6xx_submit,
|
||||
.active_ring = a6xx_active_ring,
|
||||
.irq = a6xx_irq,
|
||||
.destroy = a6xx_destroy,
|
||||
#if defined(CONFIG_DRM_MSM_GPU_STATE)
|
||||
.show = a6xx_show,
|
||||
#endif
|
||||
.gpu_busy = a6xx_gpu_busy,
|
||||
.gpu_get_freq = a6xx_gmu_get_freq,
|
||||
.gpu_set_freq = a6xx_gpu_set_freq,
|
||||
#if defined(CONFIG_DRM_MSM_GPU_STATE)
|
||||
.gpu_state_get = a6xx_gpu_state_get,
|
||||
.gpu_state_put = a6xx_gpu_state_put,
|
||||
#endif
|
||||
.create_address_space = a6xx_create_address_space,
|
||||
.create_private_address_space = a6xx_create_private_address_space,
|
||||
.get_rptr = a6xx_get_rptr,
|
||||
.progress = a6xx_progress,
|
||||
},
|
||||
.get_timestamp = a6xx_gmu_get_timestamp,
|
||||
};
|
||||
|
||||
static const struct adreno_gpu_funcs funcs_gmuwrapper = {
|
||||
.base = {
|
||||
.get_param = adreno_get_param,
|
||||
.set_param = adreno_set_param,
|
||||
|
@ -1948,8 +2432,6 @@ static const struct adreno_gpu_funcs funcs = {
|
|||
.show = a6xx_show,
|
||||
#endif
|
||||
.gpu_busy = a6xx_gpu_busy,
|
||||
.gpu_get_freq = a6xx_gmu_get_freq,
|
||||
.gpu_set_freq = a6xx_gpu_set_freq,
|
||||
#if defined(CONFIG_DRM_MSM_GPU_STATE)
|
||||
.gpu_state_get = a6xx_gpu_state_get,
|
||||
.gpu_state_put = a6xx_gpu_state_put,
|
||||
|
@ -1985,26 +2467,45 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
|||
|
||||
adreno_gpu->registers = NULL;
|
||||
|
||||
/* Check if there is a GMU phandle and set it up */
|
||||
node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
|
||||
/* FIXME: How do we gracefully handle this? */
|
||||
BUG_ON(!node);
|
||||
|
||||
adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
|
||||
|
||||
/*
|
||||
* We need to know the platform type before calling into adreno_gpu_init
|
||||
* so that the hw_apriv flag can be correctly set. Snoop into the info
|
||||
* and grab the revision number
|
||||
*/
|
||||
info = adreno_info(config->rev);
|
||||
if (!info)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (info && (info->revn == 650 || info->revn == 660 ||
|
||||
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
|
||||
/* Assign these early so that we can use the is_aXYZ helpers */
|
||||
/* Numeric revision IDs (e.g. 630) */
|
||||
adreno_gpu->revn = info->revn;
|
||||
/* New-style ADRENO_REV()-only */
|
||||
adreno_gpu->rev = info->rev;
|
||||
/* Quirk data */
|
||||
adreno_gpu->info = info;
|
||||
|
||||
if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
|
||||
adreno_gpu->base.hw_apriv = true;
|
||||
|
||||
a6xx_llc_slices_init(pdev, a6xx_gpu);
|
||||
|
||||
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
|
||||
ret = a6xx_set_supported_hw(&pdev->dev, adreno_gpu);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
||||
if (adreno_has_gmu_wrapper(adreno_gpu))
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
|
||||
else
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
return ERR_PTR(ret);
|
||||
|
@ -2017,13 +2518,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
|||
if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
|
||||
priv->gpu_clamp_to_idle = true;
|
||||
|
||||
/* Check if there is a GMU phandle and set it up */
|
||||
node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
|
||||
|
||||
/* FIXME: How do we gracefully handle this? */
|
||||
BUG_ON(!node);
|
||||
|
||||
ret = a6xx_gmu_init(a6xx_gpu, node);
|
||||
if (adreno_has_gmu_wrapper(adreno_gpu))
|
||||
ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
|
||||
else
|
||||
ret = a6xx_gmu_init(a6xx_gpu, node);
|
||||
of_node_put(node);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
|
|
|
@ -76,6 +76,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
|||
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
||||
|
||||
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
|
||||
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
|
||||
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
|
||||
|
||||
void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
|
||||
|
@ -88,4 +89,7 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
|||
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
|
||||
int a6xx_gpu_state_put(struct msm_gpu_state *state);
|
||||
|
||||
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
|
||||
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
|
||||
|
||||
#endif /* __A6XX_GPU_H__ */
|
||||
|
|
|
@ -1041,16 +1041,18 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
|
|||
/* Get the generic state from the adreno core */
|
||||
adreno_gpu_state_get(gpu, &a6xx_state->base);
|
||||
|
||||
a6xx_get_gmu_registers(gpu, a6xx_state);
|
||||
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
|
||||
a6xx_get_gmu_registers(gpu, a6xx_state);
|
||||
|
||||
a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
|
||||
a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
|
||||
a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
|
||||
a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
|
||||
a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
|
||||
a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
|
||||
|
||||
a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
|
||||
a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
|
||||
}
|
||||
|
||||
/* If GX isn't on the rest of the data isn't going to be accessible */
|
||||
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
|
||||
if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
|
||||
return &a6xx_state->base;
|
||||
|
||||
/* Get the banks of indexed registers */
|
||||
|
|
|
@ -414,6 +414,37 @@ static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
|
|||
msg->cnoc_cmds_data[1][0] = 0x60000001;
|
||||
}
|
||||
|
||||
static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
|
||||
{
|
||||
/*
|
||||
* Send a single "off" entry just to get things running
|
||||
* TODO: bus scaling
|
||||
*/
|
||||
msg->bw_level_num = 1;
|
||||
|
||||
msg->ddr_cmds_num = 3;
|
||||
msg->ddr_wait_bitmask = 0x01;
|
||||
|
||||
msg->ddr_cmds_addrs[0] = 0x50004;
|
||||
msg->ddr_cmds_addrs[1] = 0x50000;
|
||||
msg->ddr_cmds_addrs[2] = 0x500ac;
|
||||
|
||||
msg->ddr_cmds_data[0][0] = 0x40000000;
|
||||
msg->ddr_cmds_data[0][1] = 0x40000000;
|
||||
msg->ddr_cmds_data[0][2] = 0x40000000;
|
||||
|
||||
/*
|
||||
* These are the CX (CNOC) votes - these are used by the GMU but the
|
||||
* votes are known and fixed for the target
|
||||
*/
|
||||
msg->cnoc_cmds_num = 1;
|
||||
msg->cnoc_wait_bitmask = 0x01;
|
||||
|
||||
msg->cnoc_cmds_addrs[0] = 0x5003c;
|
||||
msg->cnoc_cmds_data[0][0] = 0x40000000;
|
||||
msg->cnoc_cmds_data[1][0] = 0x60000001;
|
||||
}
|
||||
|
||||
static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
|
||||
{
|
||||
/*
|
||||
|
@ -531,6 +562,8 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
|
|||
adreno_7c3_build_bw_table(&msg);
|
||||
else if (adreno_is_a660(adreno_gpu))
|
||||
a660_build_bw_table(&msg);
|
||||
else if (adreno_is_a690(adreno_gpu))
|
||||
a690_build_bw_table(&msg);
|
||||
else
|
||||
a6xx_build_bw_table(&msg);
|
||||
|
||||
|
|
|
@ -253,6 +253,18 @@ static const struct adreno_info gpulist[] = {
|
|||
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
|
||||
.init = a5xx_gpu_init,
|
||||
.zapfw = "a540_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(6, 1, 0, ANY_ID),
|
||||
.revn = 610,
|
||||
.name = "A610",
|
||||
.fw = {
|
||||
[ADRENO_FW_SQE] = "a630_sqe.fw",
|
||||
},
|
||||
.gmem = (SZ_128K + SZ_4K),
|
||||
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||
.init = a6xx_gpu_init,
|
||||
.zapfw = "a610_zap.mdt",
|
||||
.hwcg = a612_hwcg,
|
||||
}, {
|
||||
.rev = ADRENO_REV(6, 1, 8, ANY_ID),
|
||||
.revn = 618,
|
||||
|
@ -355,6 +367,20 @@ static const struct adreno_info gpulist[] = {
|
|||
.init = a6xx_gpu_init,
|
||||
.zapfw = "a640_zap.mdt",
|
||||
.hwcg = a640_hwcg,
|
||||
}, {
|
||||
.rev = ADRENO_REV(6, 9, 0, ANY_ID),
|
||||
.revn = 690,
|
||||
.name = "A690",
|
||||
.fw = {
|
||||
[ADRENO_FW_SQE] = "a660_sqe.fw",
|
||||
[ADRENO_FW_GMU] = "a690_gmu.bin",
|
||||
},
|
||||
.gmem = SZ_4M,
|
||||
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||
.init = a6xx_gpu_init,
|
||||
.zapfw = "a690_zap.mdt",
|
||||
.hwcg = a690_hwcg,
|
||||
.address_space_size = SZ_16G,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -551,7 +577,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
|
|||
config.rev.minor, config.rev.patchid);
|
||||
|
||||
priv->is_a2xx = config.rev.core == 2;
|
||||
priv->has_cached_coherent = config.rev.core >= 6;
|
||||
|
||||
gpu = info->init(drm);
|
||||
if (IS_ERR(gpu)) {
|
||||
|
@ -563,6 +588,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (config.rev.core >= 6)
|
||||
if (!adreno_has_gmu_wrapper(to_adreno_gpu(gpu)))
|
||||
priv->has_cached_coherent = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -528,6 +528,10 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu)
|
|||
if (!adreno_gpu->info->fw[i])
|
||||
continue;
|
||||
|
||||
/* Skip loading GMU firwmare with GMU Wrapper */
|
||||
if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
|
||||
continue;
|
||||
|
||||
/* Skip if the firmware has already been loaded */
|
||||
if (adreno_gpu->fw[i])
|
||||
continue;
|
||||
|
@ -1074,8 +1078,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
u32 speedbin;
|
||||
int ret;
|
||||
|
||||
/* Only handle the core clock when GMU is not in use */
|
||||
if (config->rev.core < 6) {
|
||||
/* Only handle the core clock when GMU is not in use (or is absent). */
|
||||
if (adreno_has_gmu_wrapper(adreno_gpu) || config->rev.core < 6) {
|
||||
/*
|
||||
* This can only be done before devm_pm_opp_of_add_table(), or
|
||||
* dev_pm_opp_set_config() will WARN_ON()
|
||||
|
|
|
@ -55,7 +55,8 @@ struct adreno_reglist {
|
|||
u32 value;
|
||||
};
|
||||
|
||||
extern const struct adreno_reglist a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[];
|
||||
extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
|
||||
extern const struct adreno_reglist a660_hwcg[], a690_hwcg[];
|
||||
|
||||
struct adreno_info {
|
||||
struct adreno_rev rev;
|
||||
|
@ -115,6 +116,7 @@ struct adreno_gpu {
|
|||
* code (a3xx_gpu.c) and stored in this common location.
|
||||
*/
|
||||
const unsigned int *reg_offsets;
|
||||
bool gmu_is_wrapper;
|
||||
};
|
||||
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
|
||||
|
||||
|
@ -145,148 +147,194 @@ struct adreno_platform_config {
|
|||
|
||||
bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
|
||||
|
||||
static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
|
||||
{
|
||||
WARN_ON_ONCE(!gpu->revn);
|
||||
|
||||
return gpu->revn == revn;
|
||||
}
|
||||
|
||||
static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->gmu_is_wrapper;
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
|
||||
{
|
||||
WARN_ON_ONCE(!gpu->revn);
|
||||
|
||||
return (gpu->revn < 300);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
|
||||
{
|
||||
WARN_ON_ONCE(!gpu->revn);
|
||||
|
||||
return (gpu->revn < 210);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a225(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a225(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 225;
|
||||
return adreno_is_revn(gpu, 225);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a305(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a305(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 305;
|
||||
return adreno_is_revn(gpu, 305);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a306(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
|
||||
{
|
||||
/* yes, 307, because a305c is 306 */
|
||||
return gpu->revn == 307;
|
||||
return adreno_is_revn(gpu, 307);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a320(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 320;
|
||||
return adreno_is_revn(gpu, 320);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a330(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a330(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 330;
|
||||
return adreno_is_revn(gpu, 330);
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
|
||||
static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a405(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a405(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 405;
|
||||
return adreno_is_revn(gpu, 405);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a420(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a420(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 420;
|
||||
return adreno_is_revn(gpu, 420);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a430(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a430(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 430;
|
||||
return adreno_is_revn(gpu, 430);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a506(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a506(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 506;
|
||||
return adreno_is_revn(gpu, 506);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a508(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a508(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 508;
|
||||
return adreno_is_revn(gpu, 508);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a509(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a509(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 509;
|
||||
return adreno_is_revn(gpu, 509);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a510(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a510(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 510;
|
||||
return adreno_is_revn(gpu, 510);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a512(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a512(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 512;
|
||||
return adreno_is_revn(gpu, 512);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a530(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a530(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 530;
|
||||
return adreno_is_revn(gpu, 530);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a540(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a540(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 540;
|
||||
return adreno_is_revn(gpu, 540);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a618(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a610(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 618;
|
||||
return adreno_is_revn(gpu, 610);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a619(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a618(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 619;
|
||||
return adreno_is_revn(gpu, 618);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a630(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a619(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 630;
|
||||
return adreno_is_revn(gpu, 619);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return (gpu->revn == 640) || (gpu->revn == 680);
|
||||
return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a650(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 650;
|
||||
return adreno_is_revn(gpu, 630);
|
||||
}
|
||||
|
||||
static inline int adreno_is_7c3(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a640(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_revn(gpu, 640);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a650(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_revn(gpu, 650);
|
||||
}
|
||||
|
||||
static inline int adreno_is_7c3(const struct adreno_gpu *gpu)
|
||||
{
|
||||
/* The order of args is important here to handle ANY_ID correctly */
|
||||
return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a660(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a660(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 660;
|
||||
return adreno_is_revn(gpu, 660);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a680(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_revn(gpu, 680);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a690(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_revn(gpu, 690);
|
||||
};
|
||||
|
||||
/* check for a615, a616, a618, a619 or any derivatives */
|
||||
static inline int adreno_is_a615_family(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 615 || gpu->revn == 616 || gpu->revn == 618 || gpu->revn == 619;
|
||||
return adreno_is_revn(gpu, 615) ||
|
||||
adreno_is_revn(gpu, 616) ||
|
||||
adreno_is_revn(gpu, 618) ||
|
||||
adreno_is_revn(gpu, 619);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a660_family(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
|
||||
return adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_7c3(gpu);
|
||||
}
|
||||
|
||||
/* check for a650, a660, or any derivatives */
|
||||
static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
|
||||
static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 650 || gpu->revn == 620 || adreno_is_a660_family(gpu);
|
||||
return adreno_is_revn(gpu, 650) ||
|
||||
adreno_is_revn(gpu, 620) ||
|
||||
adreno_is_a660_family(gpu);
|
||||
}
|
||||
|
||||
static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
|
||||
{
|
||||
return adreno_is_a640(gpu) || adreno_is_a680(gpu);
|
||||
}
|
||||
|
||||
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
|
||||
|
|
|
@ -30,7 +30,7 @@ static const struct dpu_mdp_cfg msm8998_mdp[] = {
|
|||
{
|
||||
.name = "top_0", .id = MDP_TOP,
|
||||
.base = 0x0, .len = 0x458,
|
||||
.features = 0,
|
||||
.features = BIT(DPU_MDP_VSYNC_SEL),
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
|
||||
|
@ -39,8 +39,8 @@ static const struct dpu_mdp_cfg msm8998_mdp[] = {
|
|||
.clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 12 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 15 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 15 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 },
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -104,40 +104,53 @@ static const struct dpu_lm_cfg msm8998_lm[] = {
|
|||
LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
|
||||
&msm8998_lm_sblk, PINGPONG_2, LM_5, 0),
|
||||
LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
|
||||
&msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
|
||||
&msm8998_lm_sblk, PINGPONG_NONE, 0, 0),
|
||||
LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
|
||||
&msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
|
||||
&msm8998_lm_sblk, PINGPONG_NONE, 0, 0),
|
||||
LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
|
||||
&msm8998_lm_sblk, PINGPONG_3, LM_2, 0),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg msm8998_pp[] = {
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te,
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
};
|
||||
|
||||
static const struct dpu_dsc_cfg msm8998_dsc[] = {
|
||||
DSC_BLK("dsc_0", DSC_0, 0x80000, 0),
|
||||
DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg msm8998_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_MSM8998_MASK,
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&msm8998_dspp_sblk),
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_MSM8998_MASK,
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
|
||||
&msm8998_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg msm8998_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27)),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg msm8998_perf_data = {
|
||||
|
@ -191,11 +204,12 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = {
|
|||
.dspp = msm8998_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(msm8998_pp),
|
||||
.pingpong = msm8998_pp,
|
||||
.dsc_count = ARRAY_SIZE(msm8998_dsc),
|
||||
.dsc = msm8998_dsc,
|
||||
.intf_count = ARRAY_SIZE(msm8998_intf),
|
||||
.intf = msm8998_intf,
|
||||
.vbif_count = ARRAY_SIZE(msm8998_vbif),
|
||||
.vbif = msm8998_vbif,
|
||||
.reg_dma_count = 0,
|
||||
.perf = &msm8998_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
|
|
|
@ -30,7 +30,7 @@ static const struct dpu_mdp_cfg sdm845_mdp[] = {
|
|||
{
|
||||
.name = "top_0", .id = MDP_TOP,
|
||||
.base = 0x0, .len = 0x45c,
|
||||
.features = BIT(DPU_MDP_AUDIO_SELECT),
|
||||
.features = BIT(DPU_MDP_AUDIO_SELECT) | BIT(DPU_MDP_VSYNC_SEL),
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
|
||||
|
@ -96,30 +96,41 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
|
|||
|
||||
static const struct dpu_lm_cfg sdm845_lm[] = {
|
||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
|
||||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
|
||||
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_2, LM_5, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_2, LM_5, DSPP_2),
|
||||
LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_NONE, 0, DSPP_3),
|
||||
LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_NONE, 0, 0),
|
||||
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg sdm845_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sdm845_pp[] = {
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te,
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
};
|
||||
|
@ -132,10 +143,18 @@ static const struct dpu_dsc_cfg sdm845_dsc[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_intf_cfg sdm845_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 24, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27)),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 24, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SDM845_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sdm845_perf_data = {
|
||||
|
@ -185,6 +204,8 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
|
|||
.sspp = sdm845_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sdm845_lm),
|
||||
.mixer = sdm845_lm,
|
||||
.dspp_count = ARRAY_SIZE(sdm845_dspp),
|
||||
.dspp = sdm845_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sdm845_pp),
|
||||
.pingpong = sdm845_pp,
|
||||
.dsc_count = ARRAY_SIZE(sdm845_dsc),
|
||||
|
@ -193,8 +214,6 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
|
|||
.intf = sdm845_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sdm845_regdma,
|
||||
.perf = &sdm845_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
|
|
|
@ -128,22 +128,22 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sm8150_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
-1),
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
|
||||
-1),
|
||||
};
|
||||
|
@ -162,10 +162,20 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm8150_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x2bc, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x2bc, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2bc, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x6b000, 0x2bc, INTF_DSI, 1, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm8150_perf_data = {
|
||||
|
@ -220,15 +230,15 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
|
|||
.intf = sm8150_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sm8150_regdma,
|
||||
.perf = &sm8150_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_INTR) | \
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_INTR) | \
|
||||
BIT(MDP_INTF2_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_INTR) | \
|
||||
BIT(MDP_AD4_0_INTR) | \
|
||||
BIT(MDP_AD4_1_INTR),
|
||||
|
|
|
@ -102,9 +102,9 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
|
|||
|
||||
static const struct dpu_lm_cfg sc8180x_lm[] = {
|
||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
|
||||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
|
||||
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
|
||||
LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
|
||||
|
@ -115,23 +115,34 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
|
|||
&sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg sc8180x_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sc8180x_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
-1),
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
|
||||
-1),
|
||||
};
|
||||
|
@ -142,14 +153,37 @@ static const struct dpu_merge_3d_cfg sc8180x_merge_3d[] = {
|
|||
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
|
||||
};
|
||||
|
||||
static const struct dpu_dsc_cfg sc8180x_dsc[] = {
|
||||
DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
DSC_BLK("dsc_4", DSC_4, 0x81000, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
DSC_BLK("dsc_5", DSC_5, 0x81400, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sc8180x_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x2bc, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x2bc, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2bc, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x6b000, 0x2bc, INTF_DSI, 1, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2)),
|
||||
/* INTF_3 is for MST, wired to INTF_DP 0 and 1, use dummy index until this is supported */
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 999, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_4", INTF_4, 0x6c000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 20, 21),
|
||||
INTF_BLK("intf_5", INTF_5, 0x6c800, 0x280, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 999, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
INTF_BLK("intf_4", INTF_4, 0x6c000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21)),
|
||||
INTF_BLK("intf_5", INTF_5, 0x6c800, 0x280, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sc8180x_perf_data = {
|
||||
|
@ -190,6 +224,10 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
|
|||
.sspp = sc8180x_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sc8180x_lm),
|
||||
.mixer = sc8180x_lm,
|
||||
.dspp_count = ARRAY_SIZE(sc8180x_dspp),
|
||||
.dspp = sc8180x_dspp,
|
||||
.dsc_count = ARRAY_SIZE(sc8180x_dsc),
|
||||
.dsc = sc8180x_dsc,
|
||||
.pingpong_count = ARRAY_SIZE(sc8180x_pp),
|
||||
.pingpong = sc8180x_pp,
|
||||
.merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d),
|
||||
|
@ -198,15 +236,15 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
|
|||
.intf = sc8180x_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sm8150_regdma,
|
||||
.perf = &sc8180x_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_INTR) | \
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_INTR) | \
|
||||
BIT(MDP_INTF2_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_INTR) | \
|
||||
BIT(MDP_INTF4_INTR) | \
|
||||
BIT(MDP_INTF5_INTR) | \
|
||||
|
|
|
@ -129,22 +129,22 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sm8250_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk,
|
||||
-1),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
-1),
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
|
||||
-1),
|
||||
};
|
||||
|
@ -163,10 +163,20 @@ static const struct dpu_dsc_cfg sm8250_dsc[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm8250_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6b000, 0x2c0, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x6b000, 0x2c0, INTF_DSI, 1, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_wb_cfg sm8250_wb[] = {
|
||||
|
@ -228,15 +238,15 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
|
|||
.vbif = sdm845_vbif,
|
||||
.wb_count = ARRAY_SIZE(sm8250_wb),
|
||||
.wb = sm8250_wb,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sm8250_regdma,
|
||||
.perf = &sm8250_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_INTR) | \
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_INTR) | \
|
||||
BIT(MDP_INTF2_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_INTR) | \
|
||||
BIT(MDP_INTF4_INTR),
|
||||
};
|
||||
|
|
|
@ -76,17 +76,26 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
|
|||
|
||||
static const struct dpu_dspp_cfg sc7180_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sc7180_dspp_sblk),
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sc7180_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, -1, -1),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk, -1, -1),
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
-1),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
-1),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sc7180_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
};
|
||||
|
||||
static const struct dpu_wb_cfg sc7180_wb[] = {
|
||||
|
@ -143,14 +152,13 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = {
|
|||
.wb = sc7180_wb,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sdm845_regdma,
|
||||
.perf = &sc7180_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_INTR) | \
|
||||
BIT(MDP_INTF1_INTR),
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR),
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -60,14 +60,16 @@ static const struct dpu_dspp_cfg sm6115_dspp[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sm6115_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
-1),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm6115_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x00000, 0x280, INTF_NONE, 0, 0, 0, 0, 0, 0),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm6115_perf_data = {
|
||||
|
@ -122,7 +124,8 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = {
|
|||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF1_INTR),
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR),
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023, Linaro Limited
|
||||
*/
|
||||
|
||||
#ifndef _DPU_6_4_SM6350_H
|
||||
#define _DPU_6_4_SM6350_H
|
||||
|
||||
static const struct dpu_caps sm6350_dpu_caps = {
|
||||
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
|
||||
.max_mixer_blendstages = 0x7,
|
||||
.qseed_type = DPU_SSPP_SCALER_QSEED4,
|
||||
.has_src_split = true,
|
||||
.has_dim_layer = true,
|
||||
.has_idle_pc = true,
|
||||
.max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
|
||||
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
|
||||
};
|
||||
|
||||
static const struct dpu_ubwc_cfg sm6350_ubwc_cfg = {
|
||||
.ubwc_version = DPU_HW_UBWC_VER_20,
|
||||
.ubwc_swizzle = 6,
|
||||
.highest_bank_bit = 1,
|
||||
};
|
||||
|
||||
static const struct dpu_mdp_cfg sm6350_mdp[] = {
|
||||
{
|
||||
.name = "top_0", .id = MDP_TOP,
|
||||
.base = 0x0, .len = 0x494,
|
||||
.features = 0,
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
|
||||
},
|
||||
};
|
||||
|
||||
static const struct dpu_ctl_cfg sm6350_ctl[] = {
|
||||
{
|
||||
.name = "ctl_0", .id = CTL_0,
|
||||
.base = 0x1000, .len = 0x1dc,
|
||||
.features = BIT(DPU_CTL_ACTIVE_CFG),
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
|
||||
},
|
||||
{
|
||||
.name = "ctl_1", .id = CTL_1,
|
||||
.base = 0x1200, .len = 0x1dc,
|
||||
.features = BIT(DPU_CTL_ACTIVE_CFG),
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
|
||||
},
|
||||
{
|
||||
.name = "ctl_2", .id = CTL_2,
|
||||
.base = 0x1400, .len = 0x1dc,
|
||||
.features = BIT(DPU_CTL_ACTIVE_CFG),
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
|
||||
},
|
||||
{
|
||||
.name = "ctl_3", .id = CTL_3,
|
||||
.base = 0x1600, .len = 0x1dc,
|
||||
.features = BIT(DPU_CTL_ACTIVE_CFG),
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct dpu_sspp_cfg sm6350_sspp[] = {
|
||||
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
|
||||
sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
|
||||
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
|
||||
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
|
||||
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f8, DMA_CURSOR_SDM845_MASK,
|
||||
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
|
||||
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f8, DMA_CURSOR_SDM845_MASK,
|
||||
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
|
||||
};
|
||||
|
||||
static const struct dpu_lm_cfg sm6350_lm[] = {
|
||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
|
||||
&sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
|
||||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
||||
&sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg sm6350_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
static struct dpu_pingpong_cfg sm6350_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
-1),
|
||||
PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
-1),
|
||||
};
|
||||
|
||||
static const struct dpu_dsc_cfg sm6350_dsc[] = {
|
||||
DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm6350_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 35, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 35, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm6350_perf_data = {
|
||||
.max_bw_low = 4200000,
|
||||
.max_bw_high = 5100000,
|
||||
.min_core_ib = 2500000,
|
||||
.min_llcc_ib = 0,
|
||||
.min_dram_ib = 1600000,
|
||||
.min_prefill_lines = 35,
|
||||
/* TODO: confirm danger_lut_tbl */
|
||||
.danger_lut_tbl = {0xffff, 0xffff, 0x0},
|
||||
.safe_lut_tbl = {0xff00, 0xff00, 0xffff},
|
||||
.qos_lut_tbl = {
|
||||
{.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
|
||||
.entries = sm6350_qos_linear_macrotile
|
||||
},
|
||||
{.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
|
||||
.entries = sm6350_qos_linear_macrotile
|
||||
},
|
||||
{.nentry = ARRAY_SIZE(sc7180_qos_nrt),
|
||||
.entries = sc7180_qos_nrt
|
||||
},
|
||||
},
|
||||
.cdp_cfg = {
|
||||
{.rd_enable = 1, .wr_enable = 1},
|
||||
{.rd_enable = 1, .wr_enable = 0}
|
||||
},
|
||||
.clk_inefficiency_factor = 105,
|
||||
.bw_inefficiency_factor = 120,
|
||||
};
|
||||
|
||||
const struct dpu_mdss_cfg dpu_sm6350_cfg = {
|
||||
.caps = &sm6350_dpu_caps,
|
||||
.ubwc = &sm6350_ubwc_cfg,
|
||||
.mdp_count = ARRAY_SIZE(sm6350_mdp),
|
||||
.mdp = sm6350_mdp,
|
||||
.ctl_count = ARRAY_SIZE(sm6350_ctl),
|
||||
.ctl = sm6350_ctl,
|
||||
.sspp_count = ARRAY_SIZE(sm6350_sspp),
|
||||
.sspp = sm6350_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sm6350_lm),
|
||||
.mixer = sm6350_lm,
|
||||
.dspp_count = ARRAY_SIZE(sm6350_dspp),
|
||||
.dspp = sm6350_dspp,
|
||||
.dsc_count = ARRAY_SIZE(sm6350_dsc),
|
||||
.dsc = sm6350_dsc,
|
||||
.pingpong_count = ARRAY_SIZE(sm6350_pp),
|
||||
.pingpong = sm6350_pp,
|
||||
.intf_count = ARRAY_SIZE(sm6350_intf),
|
||||
.intf = sm6350_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.perf = &sm6350_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_INTR) | \
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR),
|
||||
};
|
||||
|
||||
#endif
|
|
@ -57,14 +57,16 @@ static const struct dpu_dspp_cfg qcm2290_dspp[] = {
|
|||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg qcm2290_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk,
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
-1),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg qcm2290_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x00000, 0x280, INTF_NONE, 0, 0, 0, 0, 0, 0),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg qcm2290_perf_data = {
|
||||
|
@ -112,7 +114,8 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
|
|||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF1_INTR),
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR),
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023, Linaro Limited
|
||||
*/
|
||||
|
||||
#ifndef _DPU_6_9_SM6375_H
|
||||
#define _DPU_6_9_SM6375_H
|
||||
|
||||
static const struct dpu_caps sm6375_dpu_caps = {
|
||||
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
|
||||
.max_mixer_blendstages = 0x4,
|
||||
.qseed_type = DPU_SSPP_SCALER_QSEED4,
|
||||
.has_dim_layer = true,
|
||||
.has_idle_pc = true,
|
||||
.max_linewidth = 2160,
|
||||
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
|
||||
};
|
||||
|
||||
static const struct dpu_ubwc_cfg sm6375_ubwc_cfg = {
|
||||
.ubwc_version = DPU_HW_UBWC_VER_20,
|
||||
.ubwc_swizzle = 6,
|
||||
.highest_bank_bit = 1,
|
||||
};
|
||||
|
||||
static const struct dpu_mdp_cfg sm6375_mdp[] = {
|
||||
{
|
||||
.name = "top_0", .id = MDP_TOP,
|
||||
.base = 0x0, .len = 0x494,
|
||||
.features = 0,
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
|
||||
},
|
||||
};
|
||||
|
||||
static const struct dpu_ctl_cfg sm6375_ctl[] = {
|
||||
{
|
||||
.name = "ctl_0", .id = CTL_0,
|
||||
.base = 0x1000, .len = 0x1dc,
|
||||
.features = BIT(DPU_CTL_ACTIVE_CFG),
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct dpu_sspp_cfg sm6375_sspp[] = {
|
||||
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
|
||||
sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
|
||||
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
|
||||
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
|
||||
};
|
||||
|
||||
static const struct dpu_lm_cfg sm6375_lm[] = {
|
||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_QCM2290_MASK,
|
||||
&qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg sm6375_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sm6375_pp[] = {
|
||||
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
-1),
|
||||
};
|
||||
|
||||
static const struct dpu_dsc_cfg sm6375_dsc[] = {
|
||||
DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm6375_intf[] = {
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm6375_perf_data = {
|
||||
.max_bw_low = 5200000,
|
||||
.max_bw_high = 6200000,
|
||||
.min_core_ib = 2500000,
|
||||
.min_llcc_ib = 0, /* No LLCC on this SoC */
|
||||
.min_dram_ib = 1600000,
|
||||
.min_prefill_lines = 24,
|
||||
/* TODO: confirm danger_lut_tbl */
|
||||
.danger_lut_tbl = {0xffff, 0xffff, 0x0},
|
||||
.safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
|
||||
.qos_lut_tbl = {
|
||||
{.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
|
||||
.entries = sm6350_qos_linear_macrotile
|
||||
},
|
||||
{.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
|
||||
.entries = sm6350_qos_linear_macrotile
|
||||
},
|
||||
{.nentry = ARRAY_SIZE(sc7180_qos_nrt),
|
||||
.entries = sc7180_qos_nrt
|
||||
},
|
||||
},
|
||||
.cdp_cfg = {
|
||||
{.rd_enable = 1, .wr_enable = 1},
|
||||
{.rd_enable = 1, .wr_enable = 0}
|
||||
},
|
||||
.clk_inefficiency_factor = 105,
|
||||
.bw_inefficiency_factor = 120,
|
||||
};
|
||||
|
||||
const struct dpu_mdss_cfg dpu_sm6375_cfg = {
|
||||
.caps = &sm6375_dpu_caps,
|
||||
.ubwc = &sm6375_ubwc_cfg,
|
||||
.mdp_count = ARRAY_SIZE(sm6375_mdp),
|
||||
.mdp = sm6375_mdp,
|
||||
.ctl_count = ARRAY_SIZE(sm6375_ctl),
|
||||
.ctl = sm6375_ctl,
|
||||
.sspp_count = ARRAY_SIZE(sm6375_sspp),
|
||||
.sspp = sm6375_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sm6375_lm),
|
||||
.mixer = sm6375_lm,
|
||||
.dspp_count = ARRAY_SIZE(sm6375_dspp),
|
||||
.dspp = sm6375_dspp,
|
||||
.dsc_count = ARRAY_SIZE(sm6375_dsc),
|
||||
.dsc = sm6375_dsc,
|
||||
.pingpong_count = ARRAY_SIZE(sm6375_pp),
|
||||
.pingpong = sm6375_pp,
|
||||
.intf_count = ARRAY_SIZE(sm6375_intf),
|
||||
.intf = sm6375_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.perf = &sm6375_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF1_INTR) | \
|
||||
BIT(MDP_INTF1_TEAR_INTR),
|
||||
};
|
||||
|
||||
#endif
|
|
@ -129,16 +129,16 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
|
|||
static const struct dpu_pingpong_cfg sm8350_pp[] = {
|
||||
PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
-1),
|
||||
|
@ -153,11 +153,33 @@ static const struct dpu_merge_3d_cfg sm8350_merge_3d[] = {
|
|||
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE: Each display compression engine (DCE) contains dual hard
|
||||
* slice DSC encoders so both share same base address but with
|
||||
* its own different sub block address.
|
||||
*/
|
||||
static const struct dpu_dsc_cfg sm8350_dsc[] = {
|
||||
DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
|
||||
DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm8350_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x35000, 0x2c4, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x36000, 0x2c4, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x2c4, INTF_DSI, 0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x2c4, INTF_DSI, 1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm8350_perf_data = {
|
||||
|
@ -205,21 +227,23 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
|
|||
.dspp = sm8350_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sm8350_pp),
|
||||
.pingpong = sm8350_pp,
|
||||
.dsc_count = ARRAY_SIZE(sm8350_dsc),
|
||||
.dsc = sm8350_dsc,
|
||||
.merge_3d_count = ARRAY_SIZE(sm8350_merge_3d),
|
||||
.merge_3d = sm8350_merge_3d,
|
||||
.intf_count = ARRAY_SIZE(sm8350_intf),
|
||||
.intf = sm8350_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sm8350_regdma,
|
||||
.perf = &sm8350_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_7xxx_INTR),
|
||||
};
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ static const struct dpu_mdp_cfg sc7280_mdp[] = {
|
|||
.clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
|
||||
.clk_ctrls[DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -83,20 +84,45 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
|
|||
|
||||
static const struct dpu_dspp_cfg sc7280_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sc7180_dspp_sblk),
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sc7280_pp[] = {
|
||||
PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
|
||||
PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
|
||||
PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
|
||||
PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
|
||||
PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
-1),
|
||||
};
|
||||
|
||||
/* NOTE: sc7280 only has one DSC hard slice encoder */
|
||||
static const struct dpu_dsc_cfg sc7280_dsc[] = {
|
||||
DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
|
||||
};
|
||||
|
||||
static const struct dpu_wb_cfg sc7280_wb[] = {
|
||||
WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
|
||||
VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sc7280_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x35000, 0x2c4, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_5", INTF_5, 0x39000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x2c4, INTF_DSI, 0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_5", INTF_5, 0x39000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sc7280_perf_data = {
|
||||
|
@ -142,6 +168,10 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
|
|||
.mixer = sc7280_lm,
|
||||
.pingpong_count = ARRAY_SIZE(sc7280_pp),
|
||||
.pingpong = sc7280_pp,
|
||||
.dsc_count = ARRAY_SIZE(sc7280_dsc),
|
||||
.dsc = sc7280_dsc,
|
||||
.wb_count = ARRAY_SIZE(sc7280_wb),
|
||||
.wb = sc7280_wb,
|
||||
.intf_count = ARRAY_SIZE(sc7280_intf),
|
||||
.intf = sc7280_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
|
@ -152,6 +182,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
|
|||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF5_7xxx_INTR),
|
||||
};
|
||||
|
||||
|
|
|
@ -42,17 +42,18 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
|
|||
},
|
||||
};
|
||||
|
||||
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
|
||||
static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
|
||||
{
|
||||
.name = "ctl_0", .id = CTL_0,
|
||||
.base = 0x15000, .len = 0x204,
|
||||
.features = CTL_SC7280_MASK,
|
||||
.features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
|
||||
},
|
||||
{
|
||||
.name = "ctl_1", .id = CTL_1,
|
||||
.base = 0x16000, .len = 0x204,
|
||||
.features = CTL_SC7280_MASK,
|
||||
.features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
|
||||
},
|
||||
{
|
||||
|
@ -141,17 +142,51 @@ static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
|
|||
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE: Each display compression engine (DCE) contains dual hard
|
||||
* slice DSC encoders so both share same base address but with
|
||||
* its own different sub block address.
|
||||
*/
|
||||
static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
|
||||
DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
|
||||
DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
|
||||
DSC_BLK_1_2("dce_2_0", DSC_4, 0x82000, 0x29c, 0, dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_2_1", DSC_5, 0x82000, 0x29c, 0, dsc_sblk_1),
|
||||
};
|
||||
|
||||
/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
|
||||
static const struct dpu_intf_cfg sc8280xp_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_4", INTF_4, 0x38000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 20, 21),
|
||||
INTF_BLK("intf_5", INTF_5, 0x39000, 0x280, INTF_DP, MSM_DP_CONTROLLER_3, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
|
||||
INTF_BLK("intf_6", INTF_6, 0x3a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 16, 17),
|
||||
INTF_BLK("intf_7", INTF_7, 0x3b000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 18, 19),
|
||||
INTF_BLK("intf_8", INTF_8, 0x3c000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 12, 13),
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
INTF_BLK("intf_4", INTF_4, 0x38000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21)),
|
||||
INTF_BLK("intf_5", INTF_5, 0x39000, 0x280, INTF_DP, MSM_DP_CONTROLLER_3, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23)),
|
||||
INTF_BLK("intf_6", INTF_6, 0x3a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17)),
|
||||
INTF_BLK("intf_7", INTF_7, 0x3b000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19)),
|
||||
INTF_BLK("intf_8", INTF_8, 0x3c000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sc8280xp_perf_data = {
|
||||
|
@ -196,21 +231,23 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
|
|||
.dspp = sc8280xp_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sc8280xp_pp),
|
||||
.pingpong = sc8280xp_pp,
|
||||
.dsc_count = ARRAY_SIZE(sc8280xp_dsc),
|
||||
.dsc = sc8280xp_dsc,
|
||||
.merge_3d_count = ARRAY_SIZE(sc8280xp_merge_3d),
|
||||
.merge_3d = sc8280xp_merge_3d,
|
||||
.intf_count = ARRAY_SIZE(sc8280xp_intf),
|
||||
.intf = sc8280xp_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sc8280xp_regdma,
|
||||
.perf = &sc8280xp_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_7xxx_INTR) | \
|
||||
BIT(MDP_INTF4_7xxx_INTR) | \
|
||||
BIT(MDP_INTF5_7xxx_INTR) | \
|
||||
|
|
|
@ -47,7 +47,7 @@ static const struct dpu_ctl_cfg sm8450_ctl[] = {
|
|||
{
|
||||
.name = "ctl_0", .id = CTL_0,
|
||||
.base = 0x15000, .len = 0x204,
|
||||
.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY) | BIT(DPU_CTL_FETCH_ACTIVE),
|
||||
.features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
|
||||
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
|
||||
},
|
||||
{
|
||||
|
@ -107,9 +107,9 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
|
|||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
|
||||
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_2, LM_3, DSPP_2),
|
||||
LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_3, LM_2, DSPP_3),
|
||||
LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
|
||||
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
|
||||
|
@ -126,20 +126,20 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
|
|||
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
/* FIXME: interrupts */
|
||||
|
||||
static const struct dpu_pingpong_cfg sm8450_pp[] = {
|
||||
PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
|
||||
-1),
|
||||
PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
-1),
|
||||
|
@ -161,11 +161,33 @@ static const struct dpu_merge_3d_cfg sm8450_merge_3d[] = {
|
|||
MERGE_3D_BLK("merge_3d_3", MERGE_3D_3, 0x65f00),
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE: Each display compression engine (DCE) contains dual hard
|
||||
* slice DSC encoders so both share same base address but with
|
||||
* its own different sub block address.
|
||||
*/
|
||||
static const struct dpu_dsc_cfg sm8450_dsc[] = {
|
||||
DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
|
||||
DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm8450_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
INTF_BLK("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm8450_perf_data = {
|
||||
|
@ -213,21 +235,23 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
|
|||
.dspp = sm8450_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sm8450_pp),
|
||||
.pingpong = sm8450_pp,
|
||||
.dsc_count = ARRAY_SIZE(sm8450_dsc),
|
||||
.dsc = sm8450_dsc,
|
||||
.merge_3d_count = ARRAY_SIZE(sm8450_merge_3d),
|
||||
.merge_3d = sm8450_merge_3d,
|
||||
.intf_count = ARRAY_SIZE(sm8450_intf),
|
||||
.intf = sm8450_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sm8450_regdma,
|
||||
.perf = &sm8450_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_7xxx_INTR),
|
||||
};
|
||||
|
||||
|
|
|
@ -165,12 +165,33 @@ static const struct dpu_merge_3d_cfg sm8550_merge_3d[] = {
|
|||
MERGE_3D_BLK("merge_3d_3", MERGE_3D_3, 0x66700),
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE: Each display compression engine (DCE) contains dual hard
|
||||
* slice DSC encoders so both share same base address but with
|
||||
* its own different sub block address.
|
||||
*/
|
||||
static const struct dpu_dsc_cfg sm8550_dsc[] = {
|
||||
DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
|
||||
DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
|
||||
DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm8550_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
|
||||
/* TODO TE sub-blocks for intf1 & intf2 */
|
||||
INTF_BLK("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
|
||||
INTF_BLK("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
|
||||
INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
|
||||
INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
|
||||
DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
|
||||
DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
|
||||
INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
|
||||
};
|
||||
|
||||
static const struct dpu_perf_cfg sm8550_perf_data = {
|
||||
|
@ -218,21 +239,23 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
|
|||
.dspp = sm8550_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sm8550_pp),
|
||||
.pingpong = sm8550_pp,
|
||||
.dsc_count = ARRAY_SIZE(sm8550_dsc),
|
||||
.dsc = sm8550_dsc,
|
||||
.merge_3d_count = ARRAY_SIZE(sm8550_merge_3d),
|
||||
.merge_3d = sm8550_merge_3d,
|
||||
.intf_count = ARRAY_SIZE(sm8550_intf),
|
||||
.intf = sm8550_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
.vbif = sdm845_vbif,
|
||||
.reg_dma_count = 1,
|
||||
.dma_cfg = &sm8450_regdma,
|
||||
.perf = &sm8550_perf_data,
|
||||
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
|
||||
BIT(MDP_SSPP_TOP0_INTR2) | \
|
||||
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
|
||||
BIT(MDP_INTF0_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_INTR) | \
|
||||
BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_INTR) | \
|
||||
BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
|
||||
BIT(MDP_INTF3_7xxx_INTR),
|
||||
};
|
||||
|
||||
|
|
|
@ -1392,7 +1392,7 @@ DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
|
|||
|
||||
static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
|
||||
struct drm_crtc *crtc = s->private;
|
||||
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
|
||||
|
||||
seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
|
||||
|
@ -1463,6 +1463,8 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
|
|||
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
|
||||
struct drm_plane *cursor)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct dpu_crtc *dpu_crtc = NULL;
|
||||
int i, ret;
|
||||
|
@ -1494,7 +1496,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
|
|||
|
||||
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
|
||||
|
||||
drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
|
||||
if (dpu_kms->catalog->dspp_count)
|
||||
drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
|
||||
|
||||
/* save user friendly CRTC name for later */
|
||||
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
|
||||
|
|
|
@ -339,7 +339,8 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
|
|||
DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
|
||||
DRMID(phys_enc->parent),
|
||||
dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
|
||||
phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
|
||||
phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
|
||||
phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
|
||||
phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
|
||||
|
||||
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
|
||||
|
@ -495,7 +496,7 @@ void dpu_encoder_helper_split_config(
|
|||
hw_mdptop = phys_enc->hw_mdptop;
|
||||
disp_info = &dpu_enc->disp_info;
|
||||
|
||||
if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
|
||||
if (disp_info->intf_type != INTF_DSI)
|
||||
return;
|
||||
|
||||
/**
|
||||
|
@ -666,6 +667,7 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
|
|||
struct dpu_kms *dpu_kms;
|
||||
struct dpu_hw_mdp *hw_mdptop;
|
||||
struct drm_encoder *drm_enc;
|
||||
struct dpu_encoder_phys *phys_enc;
|
||||
int i;
|
||||
|
||||
if (!dpu_enc || !disp_info) {
|
||||
|
@ -696,12 +698,22 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
|
|||
vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
|
||||
|
||||
vsync_cfg.pp_count = dpu_enc->num_phys_encs;
|
||||
vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
|
||||
|
||||
if (disp_info->is_te_using_watchdog_timer)
|
||||
vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
|
||||
else
|
||||
vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
|
||||
|
||||
hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
|
||||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
phys_enc = dpu_enc->phys_encs[i];
|
||||
|
||||
if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
|
||||
phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
|
||||
vsync_cfg.vsync_source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1127,7 +1139,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
|
|||
}
|
||||
|
||||
|
||||
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
|
||||
if (dpu_enc->disp_info.intf_type == INTF_DP &&
|
||||
dpu_enc->cur_master->hw_mdptop &&
|
||||
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
|
||||
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
|
||||
|
@ -1135,7 +1147,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
|
|||
|
||||
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
|
||||
|
||||
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
|
||||
if (dpu_enc->disp_info.intf_type == INTF_DSI &&
|
||||
!WARN_ON(dpu_enc->num_phys_encs == 0)) {
|
||||
unsigned bpc = dpu_enc->connector->display_info.bpc;
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
|
@ -1258,38 +1270,23 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
|
|||
mutex_unlock(&dpu_enc->enc_lock);
|
||||
}
|
||||
|
||||
static enum dpu_intf dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
|
||||
static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
|
||||
struct dpu_rm *dpu_rm,
|
||||
enum dpu_intf_type type, u32 controller_id)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (type == INTF_WB)
|
||||
return INTF_MAX;
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < catalog->intf_count; i++) {
|
||||
if (catalog->intf[i].type == type
|
||||
&& catalog->intf[i].controller_id == controller_id) {
|
||||
return catalog->intf[i].id;
|
||||
return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
|
||||
}
|
||||
}
|
||||
|
||||
return INTF_MAX;
|
||||
}
|
||||
|
||||
static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog,
|
||||
enum dpu_intf_type type, u32 controller_id)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (type != INTF_WB)
|
||||
return WB_MAX;
|
||||
|
||||
for (i = 0; i < catalog->wb_count; i++) {
|
||||
if (catalog->wb[i].id == controller_id)
|
||||
return catalog->wb[i].id;
|
||||
}
|
||||
|
||||
return WB_MAX;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
|
||||
|
@ -1408,7 +1405,8 @@ void dpu_encoder_frame_done_callback(
|
|||
*/
|
||||
trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
|
||||
dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
|
||||
ready_phys->intf_idx, ready_phys->wb_idx);
|
||||
ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
|
||||
ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1488,7 +1486,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
|
|||
|
||||
trace_dpu_enc_trigger_flush(DRMID(drm_enc),
|
||||
dpu_encoder_helper_get_intf_type(phys->intf_mode),
|
||||
phys->intf_idx, phys->wb_idx,
|
||||
phys->hw_intf ? phys->hw_intf->idx : -1,
|
||||
phys->hw_wb ? phys->hw_wb->idx : -1,
|
||||
pending_kickoff_cnt, ctl->idx,
|
||||
extra_flush_bits, ret);
|
||||
}
|
||||
|
@ -1823,7 +1822,8 @@ dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
|
|||
return DIV_ROUND_UP(total_pixels, dsc->slice_width);
|
||||
}
|
||||
|
||||
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
|
||||
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
|
||||
struct dpu_hw_dsc *hw_dsc,
|
||||
struct dpu_hw_pingpong *hw_pp,
|
||||
struct drm_dsc_config *dsc,
|
||||
u32 common_mode,
|
||||
|
@ -1839,10 +1839,13 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
|
|||
hw_pp->ops.setup_dsc(hw_pp);
|
||||
|
||||
if (hw_dsc->ops.dsc_bind_pingpong_blk)
|
||||
hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
|
||||
hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
|
||||
|
||||
if (hw_pp->ops.enable_dsc)
|
||||
hw_pp->ops.enable_dsc(hw_pp);
|
||||
|
||||
if (ctl->ops.update_pending_flush_dsc)
|
||||
ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
|
||||
}
|
||||
|
||||
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
|
||||
|
@ -1850,6 +1853,7 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
|
|||
{
|
||||
/* coding only for 2LM, 2enc, 1 dsc config */
|
||||
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
|
||||
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
|
||||
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
int this_frame_slices;
|
||||
|
@ -1887,7 +1891,8 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
|
|||
initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
|
||||
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
|
||||
dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
|
||||
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
|
||||
dsc, dsc_common_mode, initial_lines);
|
||||
}
|
||||
|
||||
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
|
||||
|
@ -1977,7 +1982,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
|
|||
phys->ops.handle_post_kickoff(phys);
|
||||
}
|
||||
|
||||
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
|
||||
if (dpu_enc->disp_info.intf_type == INTF_DSI &&
|
||||
!dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
|
||||
trace_dpu_enc_early_kickoff(DRMID(drm_enc),
|
||||
ktime_to_ms(wakeup_time));
|
||||
|
@ -2019,6 +2024,41 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
|
|||
}
|
||||
}
|
||||
|
||||
static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
|
||||
struct dpu_hw_dsc *hw_dsc,
|
||||
struct dpu_hw_pingpong *hw_pp)
|
||||
{
|
||||
if (hw_dsc->ops.dsc_disable)
|
||||
hw_dsc->ops.dsc_disable(hw_dsc);
|
||||
|
||||
if (hw_pp->ops.disable_dsc)
|
||||
hw_pp->ops.disable_dsc(hw_pp);
|
||||
|
||||
if (hw_dsc->ops.dsc_bind_pingpong_blk)
|
||||
hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
|
||||
|
||||
if (ctl->ops.update_pending_flush_dsc)
|
||||
ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
|
||||
}
|
||||
|
||||
static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
|
||||
{
|
||||
/* coding only for 2LM, 2enc, 1 dsc config */
|
||||
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
|
||||
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
|
||||
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
hw_pp[i] = dpu_enc->hw_pp[i];
|
||||
hw_dsc[i] = dpu_enc->hw_dsc[i];
|
||||
|
||||
if (hw_pp[i] && hw_dsc[i])
|
||||
dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
|
||||
|
@ -2040,8 +2080,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
|
|||
if (phys_enc->hw_wb) {
|
||||
/* disable the PP block */
|
||||
if (phys_enc->hw_wb->ops.bind_pingpong_blk)
|
||||
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
|
||||
phys_enc->hw_pp->idx);
|
||||
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
|
||||
|
||||
/* mark WB flush as pending */
|
||||
if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
|
||||
|
@ -2050,8 +2089,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
|
|||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
|
||||
phys_enc->hw_intf->ops.bind_pingpong_blk(
|
||||
dpu_enc->phys_encs[i]->hw_intf, false,
|
||||
dpu_enc->phys_encs[i]->hw_pp->idx);
|
||||
dpu_enc->phys_encs[i]->hw_intf,
|
||||
PINGPONG_NONE);
|
||||
|
||||
/* mark INTF flush as pending */
|
||||
if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
|
||||
|
@ -2069,8 +2108,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
|
|||
phys_enc->hw_pp->merge_3d->idx);
|
||||
}
|
||||
|
||||
if (dpu_enc->dsc)
|
||||
dpu_encoder_unprep_dsc(dpu_enc);
|
||||
|
||||
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
|
||||
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
|
||||
|
||||
if (phys_enc->hw_intf)
|
||||
intf_cfg.intf = phys_enc->hw_intf->idx;
|
||||
|
@ -2099,7 +2142,8 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
|
|||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
|
||||
seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
|
||||
phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
|
||||
phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
|
||||
phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
|
||||
atomic_read(&phys->vsync_cnt),
|
||||
atomic_read(&phys->underrun_cnt));
|
||||
|
||||
|
@ -2115,16 +2159,15 @@ DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
|
|||
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
|
||||
int i;
|
||||
|
||||
char name[DPU_NAME_SIZE];
|
||||
char name[12];
|
||||
|
||||
if (!drm_enc->dev) {
|
||||
DPU_ERROR("invalid encoder or kms\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
|
||||
snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
|
||||
|
||||
/* create overall sub-directory for the encoder */
|
||||
dpu_enc->debugfs_root = debugfs_create_dir(name,
|
||||
|
@ -2134,12 +2177,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
|
|||
debugfs_create_file("status", 0600,
|
||||
dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
|
||||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++)
|
||||
if (dpu_enc->phys_encs[i]->ops.late_register)
|
||||
dpu_enc->phys_encs[i]->ops.late_register(
|
||||
dpu_enc->phys_encs[i],
|
||||
dpu_enc->debugfs_root);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -2182,7 +2219,7 @@ static int dpu_encoder_virt_add_phys_encs(
|
|||
}
|
||||
|
||||
|
||||
if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
|
||||
if (disp_info->intf_type == INTF_WB) {
|
||||
enc = dpu_encoder_phys_wb_init(params);
|
||||
|
||||
if (IS_ERR(enc)) {
|
||||
|
@ -2231,7 +2268,6 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
|
|||
{
|
||||
int ret = 0;
|
||||
int i = 0;
|
||||
enum dpu_intf_type intf_type = INTF_NONE;
|
||||
struct dpu_enc_phys_init_params phys_params;
|
||||
|
||||
if (!dpu_enc) {
|
||||
|
@ -2246,23 +2282,11 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
|
|||
phys_params.parent = &dpu_enc->base;
|
||||
phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
|
||||
|
||||
switch (disp_info->intf_type) {
|
||||
case DRM_MODE_ENCODER_DSI:
|
||||
intf_type = INTF_DSI;
|
||||
break;
|
||||
case DRM_MODE_ENCODER_TMDS:
|
||||
intf_type = INTF_DP;
|
||||
break;
|
||||
case DRM_MODE_ENCODER_VIRTUAL:
|
||||
intf_type = INTF_WB;
|
||||
break;
|
||||
}
|
||||
|
||||
WARN_ON(disp_info->num_of_h_tiles < 1);
|
||||
|
||||
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
|
||||
|
||||
if (disp_info->intf_type != DRM_MODE_ENCODER_VIRTUAL)
|
||||
if (disp_info->intf_type != INTF_WB)
|
||||
dpu_enc->idle_pc_supported =
|
||||
dpu_kms->catalog->caps->has_idle_pc;
|
||||
|
||||
|
@ -2289,58 +2313,31 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
|
|||
DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
|
||||
i, controller_id, phys_params.split_role);
|
||||
|
||||
phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
|
||||
intf_type,
|
||||
controller_id);
|
||||
phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
|
||||
disp_info->intf_type,
|
||||
controller_id);
|
||||
|
||||
phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
|
||||
intf_type, controller_id);
|
||||
/*
|
||||
* The phys_params might represent either an INTF or a WB unit, but not
|
||||
* both of them at the same time.
|
||||
*/
|
||||
if ((phys_params.intf_idx == INTF_MAX) &&
|
||||
(phys_params.wb_idx == WB_MAX)) {
|
||||
DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
|
||||
intf_type, controller_id);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
|
||||
phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
|
||||
|
||||
if ((phys_params.intf_idx != INTF_MAX) &&
|
||||
(phys_params.wb_idx != WB_MAX)) {
|
||||
DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
|
||||
intf_type, controller_id);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
ret = dpu_encoder_virt_add_phys_encs(disp_info,
|
||||
dpu_enc, &phys_params);
|
||||
if (ret)
|
||||
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
atomic_set(&phys->vsync_cnt, 0);
|
||||
atomic_set(&phys->underrun_cnt, 0);
|
||||
|
||||
if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
|
||||
phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
|
||||
|
||||
if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
|
||||
phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
|
||||
|
||||
if (!phys->hw_intf && !phys->hw_wb) {
|
||||
if (!phys_params.hw_intf && !phys_params.hw_wb) {
|
||||
DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (phys->hw_intf && phys->hw_wb) {
|
||||
if (phys_params.hw_intf && phys_params.hw_wb) {
|
||||
DPU_ERROR_ENC(dpu_enc,
|
||||
"invalid phys both intf and wb block at idx: %d\n", i);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = dpu_encoder_virt_add_phys_encs(disp_info,
|
||||
dpu_enc, &phys_params);
|
||||
if (ret) {
|
||||
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2390,7 +2387,8 @@ static const struct drm_encoder_funcs dpu_encoder_funcs = {
|
|||
.early_unregister = dpu_encoder_early_unregister,
|
||||
};
|
||||
|
||||
int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
|
||||
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
|
||||
int drm_enc_mode,
|
||||
struct msm_display_info *disp_info)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
@ -2399,7 +2397,23 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
|
|||
struct dpu_encoder_virt *dpu_enc = NULL;
|
||||
int ret = 0;
|
||||
|
||||
dpu_enc = to_dpu_encoder_virt(enc);
|
||||
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
|
||||
if (!dpu_enc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
|
||||
drm_enc_mode, NULL);
|
||||
if (ret) {
|
||||
devm_kfree(dev->dev, dpu_enc);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
|
||||
|
||||
spin_lock_init(&dpu_enc->enc_spinlock);
|
||||
dpu_enc->enabled = false;
|
||||
mutex_init(&dpu_enc->enc_lock);
|
||||
mutex_init(&dpu_enc->rc_lock);
|
||||
|
||||
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
|
||||
if (ret)
|
||||
|
@ -2409,11 +2423,11 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
|
|||
timer_setup(&dpu_enc->frame_done_timer,
|
||||
dpu_encoder_frame_done_timeout, 0);
|
||||
|
||||
if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
|
||||
if (disp_info->intf_type == INTF_DSI)
|
||||
timer_setup(&dpu_enc->vsync_event_timer,
|
||||
dpu_encoder_vsync_event_handler,
|
||||
0);
|
||||
else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
|
||||
else if (disp_info->intf_type == INTF_DP)
|
||||
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
|
||||
priv->dp[disp_info->h_tile_instance[0]]);
|
||||
|
||||
|
@ -2428,44 +2442,14 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
|
|||
|
||||
DPU_DEBUG_ENC(dpu_enc, "created\n");
|
||||
|
||||
return ret;
|
||||
return &dpu_enc->base;
|
||||
|
||||
fail:
|
||||
DPU_ERROR("failed to create encoder\n");
|
||||
if (drm_enc)
|
||||
dpu_encoder_destroy(drm_enc);
|
||||
|
||||
return ret;
|
||||
|
||||
|
||||
}
|
||||
|
||||
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
|
||||
int drm_enc_mode)
|
||||
{
|
||||
struct dpu_encoder_virt *dpu_enc = NULL;
|
||||
int rc = 0;
|
||||
|
||||
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
|
||||
if (!dpu_enc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
||||
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
|
||||
drm_enc_mode, NULL);
|
||||
if (rc) {
|
||||
devm_kfree(dev->dev, dpu_enc);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
|
||||
|
||||
spin_lock_init(&dpu_enc->enc_spinlock);
|
||||
dpu_enc->enabled = false;
|
||||
mutex_init(&dpu_enc->enc_lock);
|
||||
mutex_init(&dpu_enc->rc_lock);
|
||||
|
||||
return &dpu_enc->base;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
||||
|
@ -2539,3 +2523,30 @@ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
|
|||
|
||||
return dpu_enc->dsc_mask;
|
||||
}
|
||||
|
||||
void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
|
||||
struct dpu_enc_phys_init_params *p)
|
||||
{
|
||||
int i;
|
||||
|
||||
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
|
||||
phys_enc->hw_intf = p->hw_intf;
|
||||
phys_enc->hw_wb = p->hw_wb;
|
||||
phys_enc->parent = p->parent;
|
||||
phys_enc->dpu_kms = p->dpu_kms;
|
||||
phys_enc->split_role = p->split_role;
|
||||
phys_enc->enc_spinlock = p->enc_spinlock;
|
||||
phys_enc->enable_state = DPU_ENC_DISABLED;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
|
||||
phys_enc->irq[i] = -EINVAL;
|
||||
|
||||
atomic_set(&phys_enc->vblank_refcount, 0);
|
||||
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
|
||||
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
|
||||
|
||||
atomic_set(&phys_enc->vsync_cnt, 0);
|
||||
atomic_set(&phys_enc->underrun_cnt, 0);
|
||||
|
||||
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
/**
|
||||
* struct msm_display_info - defines display properties
|
||||
* @intf_type: DRM_MODE_ENCODER_ type
|
||||
* @intf_type: INTF_ type
|
||||
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
|
||||
* @h_tile_instance: Controller instance used per tile. Number of elements is
|
||||
* based on num_of_h_tiles
|
||||
|
@ -31,7 +31,7 @@
|
|||
* @dsc: DSC configuration data for DSC-enabled displays
|
||||
*/
|
||||
struct msm_display_info {
|
||||
int intf_type;
|
||||
enum dpu_intf_type intf_type;
|
||||
uint32_t num_of_h_tiles;
|
||||
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
|
||||
bool is_cmd_mode;
|
||||
|
@ -130,20 +130,12 @@ void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
|
|||
/**
|
||||
* dpu_encoder_init - initialize virtual encoder object
|
||||
* @dev: Pointer to drm device structure
|
||||
* @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant
|
||||
* @disp_info: Pointer to display information structure
|
||||
* Returns: Pointer to newly created drm encoder
|
||||
*/
|
||||
struct drm_encoder *dpu_encoder_init(
|
||||
struct drm_device *dev,
|
||||
int drm_enc_mode);
|
||||
|
||||
/**
|
||||
* dpu_encoder_setup - setup dpu_encoder for the display probed
|
||||
* @dev: Pointer to drm device structure
|
||||
* @enc: Pointer to the drm_encoder
|
||||
* @disp_info: Pointer to the display info
|
||||
*/
|
||||
int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
|
||||
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
|
||||
int drm_enc_mode,
|
||||
struct msm_display_info *disp_info);
|
||||
|
||||
/**
|
||||
|
|
|
@ -63,7 +63,6 @@ struct dpu_encoder_phys;
|
|||
/**
|
||||
* struct dpu_encoder_phys_ops - Interface the physical encoders provide to
|
||||
* the containing virtual encoder.
|
||||
* @late_register: DRM Call. Add Userspace interfaces, debugfs.
|
||||
* @prepare_commit: MSM Atomic Call, start of atomic commit sequence
|
||||
* @is_master: Whether this phys_enc is the current master
|
||||
* encoder. Can be switched at enable time. Based
|
||||
|
@ -93,8 +92,6 @@ struct dpu_encoder_phys;
|
|||
*/
|
||||
|
||||
struct dpu_encoder_phys_ops {
|
||||
int (*late_register)(struct dpu_encoder_phys *encoder,
|
||||
struct dentry *debugfs_root);
|
||||
void (*prepare_commit)(struct dpu_encoder_phys *encoder);
|
||||
bool (*is_master)(struct dpu_encoder_phys *encoder);
|
||||
void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
|
||||
|
@ -129,10 +126,10 @@ struct dpu_encoder_phys_ops {
|
|||
/**
|
||||
* enum dpu_intr_idx - dpu encoder interrupt index
|
||||
* @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
|
||||
* @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
|
||||
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
|
||||
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
|
||||
* @INTR_IDX_WB_DONE: Writeback fone interrupt for virtual connector
|
||||
* @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel
|
||||
* @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel
|
||||
* @INTR_IDX_RDPTR: Readpointer done interrupt for cmd mode panel
|
||||
* @INTR_IDX_WB_DONE: Writeback done interrupt for virtual connector
|
||||
*/
|
||||
enum dpu_intr_idx {
|
||||
INTR_IDX_VSYNC,
|
||||
|
@ -161,8 +158,6 @@ enum dpu_intr_idx {
|
|||
* @enabled: Whether the encoder has enabled and running a mode
|
||||
* @split_role: Role to play in a split-panel configuration
|
||||
* @intf_mode: Interface mode
|
||||
* @intf_idx: Interface index on dpu hardware
|
||||
* @wb_idx: Writeback index on dpu hardware
|
||||
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
|
||||
* @enable_state: Enable state tracking
|
||||
* @vblank_refcount: Reference count of vblank request
|
||||
|
@ -176,6 +171,7 @@ enum dpu_intr_idx {
|
|||
* pending.
|
||||
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
|
||||
* @irq: IRQ indices
|
||||
* @has_intf_te: Interface TE configuration support
|
||||
*/
|
||||
struct dpu_encoder_phys {
|
||||
struct drm_encoder *parent;
|
||||
|
@ -189,8 +185,6 @@ struct dpu_encoder_phys {
|
|||
struct drm_display_mode cached_mode;
|
||||
enum dpu_enc_split_role split_role;
|
||||
enum dpu_intf_mode intf_mode;
|
||||
enum dpu_intf intf_idx;
|
||||
enum dpu_wb wb_idx;
|
||||
spinlock_t *enc_spinlock;
|
||||
enum dpu_enc_enable_state enable_state;
|
||||
atomic_t vblank_refcount;
|
||||
|
@ -200,6 +194,7 @@ struct dpu_encoder_phys {
|
|||
atomic_t pending_kickoff_cnt;
|
||||
wait_queue_head_t pending_kickoff_wq;
|
||||
int irq[INTR_IDX_MAX];
|
||||
bool has_intf_te;
|
||||
};
|
||||
|
||||
static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
|
||||
|
@ -256,16 +251,16 @@ struct dpu_encoder_phys_cmd {
|
|||
* @parent: Pointer to the containing virtual encoder
|
||||
* @parent_ops: Callbacks exposed by the parent to the phys_enc
|
||||
* @split_role: Role to play in a split-panel configuration
|
||||
* @intf_idx: Interface index this phys_enc will control
|
||||
* @wb_idx: Writeback index this phys_enc will control
|
||||
* @hw_intf: Hardware interface to the intf registers
|
||||
* @hw_wb: Hardware interface to the wb registers
|
||||
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
|
||||
*/
|
||||
struct dpu_enc_phys_init_params {
|
||||
struct dpu_kms *dpu_kms;
|
||||
struct drm_encoder *parent;
|
||||
enum dpu_enc_split_role split_role;
|
||||
enum dpu_intf intf_idx;
|
||||
enum dpu_wb wb_idx;
|
||||
struct dpu_hw_intf *hw_intf;
|
||||
struct dpu_hw_wb *hw_wb;
|
||||
spinlock_t *enc_spinlock;
|
||||
};
|
||||
|
||||
|
@ -405,4 +400,7 @@ void dpu_encoder_frame_done_callback(
|
|||
struct drm_encoder *drm_enc,
|
||||
struct dpu_encoder_phys *ready_phys, u32 event);
|
||||
|
||||
void dpu_encoder_phys_init(struct dpu_encoder_phys *phys,
|
||||
struct dpu_enc_phys_init_params *p);
|
||||
|
||||
#endif /* __dpu_encoder_phys_H__ */
|
||||
|
|
|
@ -16,12 +16,12 @@
|
|||
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
|
||||
(e) && (e)->base.parent ? \
|
||||
(e)->base.parent->base.id : -1, \
|
||||
(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
|
||||
(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
|
||||
|
||||
#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
|
||||
(e) && (e)->base.parent ? \
|
||||
(e)->base.parent->base.id : -1, \
|
||||
(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
|
||||
(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
|
||||
|
||||
#define to_dpu_encoder_phys_cmd(x) \
|
||||
container_of(x, struct dpu_encoder_phys_cmd, base)
|
||||
|
@ -36,10 +36,6 @@
|
|||
#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
|
||||
#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
|
||||
|
||||
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
|
||||
|
||||
#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
|
||||
|
||||
static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
|
||||
|
||||
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
|
||||
|
@ -59,7 +55,7 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
|
|||
if (!ctl->ops.setup_intf_cfg)
|
||||
return;
|
||||
|
||||
intf_cfg.intf = phys_enc->intf_idx;
|
||||
intf_cfg.intf = phys_enc->hw_intf->idx;
|
||||
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
|
||||
intf_cfg.stream_sel = cmd_enc->stream_sel;
|
||||
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
|
@ -70,8 +66,10 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
|
|||
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
|
||||
phys_enc->hw_intf->ops.bind_pingpong_blk(
|
||||
phys_enc->hw_intf,
|
||||
true,
|
||||
phys_enc->hw_pp->idx);
|
||||
|
||||
if (intf_cfg.dsc != 0 && phys_enc->hw_intf->ops.enable_compression)
|
||||
phys_enc->hw_intf->ops.enable_compression(phys_enc->hw_intf);
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
|
||||
|
@ -101,13 +99,18 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
|
|||
DPU_ATRACE_END("pp_done_irq");
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
|
||||
static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
|
||||
{
|
||||
struct dpu_encoder_phys *phys_enc = arg;
|
||||
struct dpu_encoder_phys_cmd *cmd_enc;
|
||||
|
||||
if (!phys_enc->hw_pp)
|
||||
return;
|
||||
if (phys_enc->has_intf_te) {
|
||||
if (!phys_enc->hw_intf)
|
||||
return;
|
||||
} else {
|
||||
if (!phys_enc->hw_pp)
|
||||
return;
|
||||
}
|
||||
|
||||
DPU_ATRACE_BEGIN("rd_ptr_irq");
|
||||
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
|
||||
|
@ -148,7 +151,10 @@ static void dpu_encoder_phys_cmd_atomic_mode_set(
|
|||
|
||||
phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
|
||||
|
||||
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
|
||||
if (phys_enc->has_intf_te)
|
||||
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
|
||||
else
|
||||
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
|
||||
|
||||
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
|
||||
}
|
||||
|
@ -259,7 +265,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
|
|||
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
|
||||
ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
|
||||
phys_enc->irq[INTR_IDX_RDPTR],
|
||||
dpu_encoder_phys_cmd_pp_rd_ptr_irq,
|
||||
dpu_encoder_phys_cmd_te_rd_ptr_irq,
|
||||
phys_enc);
|
||||
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
|
||||
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
|
||||
|
@ -320,23 +326,29 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
|
|||
struct dpu_hw_tear_check tc_cfg = { 0 };
|
||||
struct drm_display_mode *mode;
|
||||
bool tc_enable = true;
|
||||
u32 vsync_hz;
|
||||
unsigned long vsync_hz;
|
||||
struct dpu_kms *dpu_kms;
|
||||
|
||||
if (!phys_enc->hw_pp) {
|
||||
DPU_ERROR("invalid encoder\n");
|
||||
return;
|
||||
if (phys_enc->has_intf_te) {
|
||||
if (!phys_enc->hw_intf ||
|
||||
!phys_enc->hw_intf->ops.enable_tearcheck) {
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "");
|
||||
} else {
|
||||
if (!phys_enc->hw_pp ||
|
||||
!phys_enc->hw_pp->ops.enable_tearcheck) {
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
|
||||
}
|
||||
|
||||
mode = &phys_enc->cached_mode;
|
||||
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
|
||||
|
||||
if (!phys_enc->hw_pp->ops.setup_tearcheck ||
|
||||
!phys_enc->hw_pp->ops.enable_tearcheck) {
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dpu_kms = phys_enc->dpu_kms;
|
||||
|
||||
/*
|
||||
|
@ -349,9 +361,8 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
|
|||
* frequency divided by the no. of rows (lines) in the LCDpanel.
|
||||
*/
|
||||
vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
|
||||
if (vsync_hz <= 0) {
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
|
||||
vsync_hz);
|
||||
if (!vsync_hz) {
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -371,24 +382,24 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
|
|||
tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
|
||||
|
||||
DPU_DEBUG_CMDENC(cmd_enc,
|
||||
"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
|
||||
phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
|
||||
mode->vtotal, drm_mode_vrefresh(mode));
|
||||
"tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
|
||||
vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
|
||||
DPU_DEBUG_CMDENC(cmd_enc,
|
||||
"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
|
||||
phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
|
||||
tc_cfg.rd_ptr_irq);
|
||||
"tc enable %u start_pos %u rd_ptr_irq %u\n",
|
||||
tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
|
||||
DPU_DEBUG_CMDENC(cmd_enc,
|
||||
"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
|
||||
phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
|
||||
tc_cfg.vsync_count, tc_cfg.vsync_init_val);
|
||||
"tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
|
||||
tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
|
||||
tc_cfg.vsync_init_val);
|
||||
DPU_DEBUG_CMDENC(cmd_enc,
|
||||
"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
|
||||
phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
|
||||
tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
|
||||
"tc cfgheight %u thresh_start %u thresh_cont %u\n",
|
||||
tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
|
||||
tc_cfg.sync_threshold_continue);
|
||||
|
||||
phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
|
||||
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
|
||||
if (phys_enc->has_intf_te)
|
||||
phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
|
||||
else
|
||||
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
|
||||
}
|
||||
|
||||
static void _dpu_encoder_phys_cmd_pingpong_config(
|
||||
|
@ -430,7 +441,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
|
|||
return;
|
||||
}
|
||||
|
||||
dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
|
||||
dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
|
||||
|
||||
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
|
||||
|
||||
|
@ -438,7 +449,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
|
|||
return;
|
||||
|
||||
ctl = phys_enc->hw_ctl;
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
|
||||
|
@ -465,11 +476,19 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
|
|||
static void _dpu_encoder_phys_cmd_connect_te(
|
||||
struct dpu_encoder_phys *phys_enc, bool enable)
|
||||
{
|
||||
if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
|
||||
return;
|
||||
if (phys_enc->has_intf_te) {
|
||||
if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
|
||||
return;
|
||||
|
||||
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
|
||||
phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
|
||||
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
|
||||
phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
|
||||
} else {
|
||||
if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
|
||||
return;
|
||||
|
||||
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
|
||||
phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
|
||||
}
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_prepare_idle_pc(
|
||||
|
@ -482,17 +501,21 @@ static int dpu_encoder_phys_cmd_get_line_count(
|
|||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_pingpong *hw_pp;
|
||||
|
||||
if (!phys_enc->hw_pp)
|
||||
return -EINVAL;
|
||||
struct dpu_hw_intf *hw_intf;
|
||||
|
||||
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
|
||||
return -EINVAL;
|
||||
|
||||
hw_pp = phys_enc->hw_pp;
|
||||
if (!hw_pp->ops.get_line_count)
|
||||
return -EINVAL;
|
||||
if (phys_enc->has_intf_te) {
|
||||
hw_intf = phys_enc->hw_intf;
|
||||
if (!hw_intf || !hw_intf->ops.get_line_count)
|
||||
return -EINVAL;
|
||||
return hw_intf->ops.get_line_count(hw_intf);
|
||||
}
|
||||
|
||||
hw_pp = phys_enc->hw_pp;
|
||||
if (!hw_pp || !hw_pp->ops.get_line_count)
|
||||
return -EINVAL;
|
||||
return hw_pp->ops.get_line_count(hw_pp);
|
||||
}
|
||||
|
||||
|
@ -502,30 +525,39 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
|
|||
to_dpu_encoder_phys_cmd(phys_enc);
|
||||
struct dpu_hw_ctl *ctl;
|
||||
|
||||
if (!phys_enc->hw_pp) {
|
||||
DPU_ERROR("invalid encoder\n");
|
||||
return;
|
||||
}
|
||||
DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
|
||||
phys_enc->hw_pp->idx - PINGPONG_0,
|
||||
phys_enc->enable_state);
|
||||
|
||||
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
|
||||
DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (phys_enc->hw_pp->ops.enable_tearcheck)
|
||||
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
|
||||
if (phys_enc->has_intf_te) {
|
||||
DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
|
||||
phys_enc->hw_intf->idx - INTF_0,
|
||||
phys_enc->enable_state);
|
||||
|
||||
if (phys_enc->hw_intf->ops.disable_tearcheck)
|
||||
phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
|
||||
} else {
|
||||
if (!phys_enc->hw_pp) {
|
||||
DPU_ERROR("invalid encoder\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
|
||||
phys_enc->hw_pp->idx - PINGPONG_0,
|
||||
phys_enc->enable_state);
|
||||
|
||||
if (phys_enc->hw_pp->ops.disable_tearcheck)
|
||||
phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
|
||||
}
|
||||
|
||||
if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
|
||||
phys_enc->hw_intf->ops.bind_pingpong_blk(
|
||||
phys_enc->hw_intf,
|
||||
false,
|
||||
phys_enc->hw_pp->idx);
|
||||
PINGPONG_NONE);
|
||||
|
||||
ctl = phys_enc->hw_ctl;
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
|
||||
}
|
||||
|
||||
phys_enc->enable_state = DPU_ENC_DISABLED;
|
||||
|
@ -574,66 +606,31 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
|
|||
atomic_read(&phys_enc->pending_kickoff_cnt));
|
||||
}
|
||||
|
||||
static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_pp_vsync_info info;
|
||||
|
||||
if (!phys_enc)
|
||||
return false;
|
||||
|
||||
phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
|
||||
if (info.wr_ptr_line_count > 0 &&
|
||||
info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_encoder_phys_cmd *cmd_enc =
|
||||
to_dpu_encoder_phys_cmd(phys_enc);
|
||||
int trial = 0;
|
||||
|
||||
if (!phys_enc)
|
||||
return;
|
||||
if (!phys_enc->hw_pp)
|
||||
return;
|
||||
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
|
||||
return;
|
||||
|
||||
/* If autorefresh is already disabled, we have nothing to do */
|
||||
if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
|
||||
return;
|
||||
if (phys_enc->has_intf_te) {
|
||||
if (!phys_enc->hw_intf->ops.disable_autorefresh)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If autorefresh is enabled, disable it and make sure it is safe to
|
||||
* proceed with current frame commit/push. Sequence fallowed is,
|
||||
* 1. Disable TE
|
||||
* 2. Disable autorefresh config
|
||||
* 4. Poll for frame transfer ongoing to be false
|
||||
* 5. Enable TE back
|
||||
*/
|
||||
_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
|
||||
phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
|
||||
phys_enc->hw_intf->ops.disable_autorefresh(
|
||||
phys_enc->hw_intf,
|
||||
DRMID(phys_enc->parent),
|
||||
phys_enc->cached_mode.vdisplay);
|
||||
} else {
|
||||
if (!phys_enc->hw_pp ||
|
||||
!phys_enc->hw_pp->ops.disable_autorefresh)
|
||||
return;
|
||||
|
||||
do {
|
||||
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
|
||||
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
|
||||
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
|
||||
DPU_ERROR_CMDENC(cmd_enc,
|
||||
"disable autorefresh failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
trial++;
|
||||
} while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
|
||||
|
||||
_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
|
||||
|
||||
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
|
||||
"disabled autorefresh\n");
|
||||
phys_enc->hw_pp->ops.disable_autorefresh(
|
||||
phys_enc->hw_pp,
|
||||
DRMID(phys_enc->parent),
|
||||
phys_enc->cached_mode.vdisplay);
|
||||
}
|
||||
}
|
||||
|
||||
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
|
||||
|
@ -670,7 +667,7 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
|
|||
if (rc) {
|
||||
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
|
||||
DRMID(phys_enc->parent), rc,
|
||||
phys_enc->intf_idx - INTF_0);
|
||||
phys_enc->hw_intf->idx - INTF_0);
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -710,7 +707,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
|
|||
|
||||
rc = dpu_encoder_helper_wait_for_irq(phys_enc,
|
||||
phys_enc->irq[INTR_IDX_RDPTR],
|
||||
dpu_encoder_phys_cmd_pp_rd_ptr_irq,
|
||||
dpu_encoder_phys_cmd_te_rd_ptr_irq,
|
||||
&wait_info);
|
||||
|
||||
return rc;
|
||||
|
@ -759,36 +756,26 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
|
|||
{
|
||||
struct dpu_encoder_phys *phys_enc = NULL;
|
||||
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
|
||||
int i, ret = 0;
|
||||
|
||||
DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
|
||||
DPU_DEBUG("intf\n");
|
||||
|
||||
cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
|
||||
if (!cmd_enc) {
|
||||
ret = -ENOMEM;
|
||||
DPU_ERROR("failed to allocate\n");
|
||||
return ERR_PTR(ret);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
phys_enc = &cmd_enc->base;
|
||||
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
|
||||
phys_enc->intf_idx = p->intf_idx;
|
||||
|
||||
dpu_encoder_phys_init(phys_enc, p);
|
||||
|
||||
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
|
||||
phys_enc->parent = p->parent;
|
||||
phys_enc->dpu_kms = p->dpu_kms;
|
||||
phys_enc->split_role = p->split_role;
|
||||
phys_enc->intf_mode = INTF_MODE_CMD;
|
||||
phys_enc->enc_spinlock = p->enc_spinlock;
|
||||
cmd_enc->stream_sel = 0;
|
||||
phys_enc->enable_state = DPU_ENC_DISABLED;
|
||||
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
|
||||
phys_enc->irq[i] = -EINVAL;
|
||||
|
||||
atomic_set(&phys_enc->vblank_refcount, 0);
|
||||
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
|
||||
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
|
||||
phys_enc->has_intf_te = test_bit(DPU_INTF_TE,
|
||||
&phys_enc->hw_intf->cap->features);
|
||||
|
||||
atomic_set(&cmd_enc->pending_vblank_cnt, 0);
|
||||
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
|
||||
init_waitqueue_head(&cmd_enc->pending_vblank_wq);
|
||||
|
||||
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
|
||||
|
|
|
@ -287,7 +287,6 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
|
|||
if (phys_enc->hw_intf->ops.bind_pingpong_blk)
|
||||
phys_enc->hw_intf->ops.bind_pingpong_blk(
|
||||
phys_enc->hw_intf,
|
||||
true,
|
||||
phys_enc->hw_pp->idx);
|
||||
|
||||
if (phys_enc->hw_pp->merge_3d)
|
||||
|
@ -699,7 +698,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
|
|||
struct dpu_enc_phys_init_params *p)
|
||||
{
|
||||
struct dpu_encoder_phys *phys_enc = NULL;
|
||||
int i;
|
||||
|
||||
if (!p) {
|
||||
DPU_ERROR("failed to create encoder due to invalid parameter\n");
|
||||
|
@ -712,26 +710,14 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
|
||||
phys_enc->intf_idx = p->intf_idx;
|
||||
|
||||
DPU_DEBUG_VIDENC(phys_enc, "\n");
|
||||
|
||||
dpu_encoder_phys_init(phys_enc, p);
|
||||
|
||||
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
|
||||
phys_enc->parent = p->parent;
|
||||
phys_enc->dpu_kms = p->dpu_kms;
|
||||
phys_enc->split_role = p->split_role;
|
||||
phys_enc->intf_mode = INTF_MODE_VIDEO;
|
||||
phys_enc->enc_spinlock = p->enc_spinlock;
|
||||
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
|
||||
phys_enc->irq[i] = -EINVAL;
|
||||
|
||||
atomic_set(&phys_enc->vblank_refcount, 0);
|
||||
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
|
||||
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
|
||||
phys_enc->enable_state = DPU_ENC_DISABLED;
|
||||
|
||||
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
|
||||
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
|
||||
|
||||
return phys_enc;
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
|
|||
static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_wb *hw_wb;
|
||||
struct dpu_hw_wb_qos_cfg qos_cfg;
|
||||
struct dpu_hw_qos_cfg qos_cfg;
|
||||
const struct dpu_mdss_cfg *catalog;
|
||||
const struct dpu_qos_lut_tbl *qos_lut_tb;
|
||||
|
||||
|
@ -115,7 +115,7 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
|
|||
|
||||
hw_wb = phys_enc->hw_wb;
|
||||
|
||||
memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg));
|
||||
memset(&qos_cfg, 0, sizeof(struct dpu_hw_qos_cfg));
|
||||
qos_cfg.danger_safe_en = true;
|
||||
qos_cfg.danger_lut =
|
||||
catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
|
||||
|
@ -140,7 +140,6 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
|
|||
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
|
||||
struct dpu_hw_wb *hw_wb;
|
||||
struct dpu_hw_wb_cfg *wb_cfg;
|
||||
struct dpu_hw_cdp_cfg cdp_cfg;
|
||||
|
||||
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
|
||||
DPU_ERROR("invalid encoder\n");
|
||||
|
@ -163,18 +162,10 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
|
|||
hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
|
||||
|
||||
if (hw_wb->ops.setup_cdp) {
|
||||
memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
|
||||
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
|
||||
|
||||
cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf->cdp_cfg
|
||||
[DPU_PERF_CDP_USAGE_NRT].wr_enable;
|
||||
cdp_cfg.ubwc_meta_enable =
|
||||
DPU_FORMAT_IS_UBWC(wb_cfg->dest.format);
|
||||
cdp_cfg.tile_amortize_enable =
|
||||
DPU_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
|
||||
DPU_FORMAT_IS_TILE(wb_cfg->dest.format);
|
||||
cdp_cfg.preload_ahead = DPU_WB_CDP_PRELOAD_AHEAD_64;
|
||||
|
||||
hw_wb->ops.setup_cdp(hw_wb, &cdp_cfg);
|
||||
hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format,
|
||||
perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
|
||||
}
|
||||
|
||||
if (hw_wb->ops.setup_outaddress)
|
||||
|
@ -219,7 +210,7 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
|
|||
|
||||
/* setup which pp blk will connect to this wb */
|
||||
if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
|
||||
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, true,
|
||||
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
|
||||
phys_enc->hw_pp->idx);
|
||||
|
||||
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
|
||||
|
@ -249,7 +240,7 @@ static int dpu_encoder_phys_wb_atomic_check(
|
|||
const struct drm_display_mode *mode = &crtc_state->mode;
|
||||
|
||||
DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
|
||||
phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
|
||||
phys_enc->hw_wb->idx, mode->name, mode->hdisplay, mode->vdisplay);
|
||||
|
||||
if (!conn_state || !conn_state->connector) {
|
||||
DPU_ERROR("invalid connector state\n");
|
||||
|
@ -570,7 +561,7 @@ static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
|
|||
if (!phys_enc)
|
||||
return;
|
||||
|
||||
DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
|
||||
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
|
||||
|
||||
kfree(phys_enc);
|
||||
}
|
||||
|
@ -693,53 +684,32 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
|
|||
{
|
||||
struct dpu_encoder_phys *phys_enc = NULL;
|
||||
struct dpu_encoder_phys_wb *wb_enc = NULL;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
DPU_DEBUG("\n");
|
||||
|
||||
if (!p || !p->parent) {
|
||||
DPU_ERROR("invalid params\n");
|
||||
ret = -EINVAL;
|
||||
goto fail_alloc;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
|
||||
if (!wb_enc) {
|
||||
DPU_ERROR("failed to allocate wb phys_enc enc\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_alloc;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
phys_enc = &wb_enc->base;
|
||||
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
|
||||
phys_enc->wb_idx = p->wb_idx;
|
||||
|
||||
dpu_encoder_phys_init(phys_enc, p);
|
||||
|
||||
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
|
||||
phys_enc->parent = p->parent;
|
||||
phys_enc->dpu_kms = p->dpu_kms;
|
||||
phys_enc->split_role = p->split_role;
|
||||
phys_enc->intf_mode = INTF_MODE_WB_LINE;
|
||||
phys_enc->wb_idx = p->wb_idx;
|
||||
phys_enc->enc_spinlock = p->enc_spinlock;
|
||||
|
||||
atomic_set(&wb_enc->wbirq_refcount, 0);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
|
||||
phys_enc->irq[i] = -EINVAL;
|
||||
|
||||
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
|
||||
atomic_set(&phys_enc->vblank_refcount, 0);
|
||||
wb_enc->wb_done_timeout_cnt = 0;
|
||||
|
||||
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
|
||||
phys_enc->enable_state = DPU_ENC_DISABLED;
|
||||
|
||||
DPU_DEBUG("Created dpu_encoder_phys for wb %d\n",
|
||||
phys_enc->wb_idx);
|
||||
DPU_DEBUG("Created dpu_encoder_phys for wb %d\n", phys_enc->hw_wb->idx);
|
||||
|
||||
return phys_enc;
|
||||
|
||||
fail_alloc:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
|
@ -13,7 +13,7 @@
|
|||
#include "dpu_kms.h"
|
||||
|
||||
#define VIG_BASE_MASK \
|
||||
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
|
||||
(BIT(DPU_SSPP_QOS) |\
|
||||
BIT(DPU_SSPP_CDP) |\
|
||||
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
|||
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
|
||||
|
||||
#define DMA_MSM8998_MASK \
|
||||
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
|
||||
(BIT(DPU_SSPP_QOS) |\
|
||||
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
|
||||
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
|
||||
|
||||
|
@ -50,7 +50,7 @@
|
|||
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
|
||||
|
||||
#define DMA_SDM845_MASK \
|
||||
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
|
||||
(BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
|
||||
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
|
||||
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
|
||||
|
||||
|
@ -75,11 +75,15 @@
|
|||
#define MIXER_QCM2290_MASK \
|
||||
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
|
||||
|
||||
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
|
||||
#define PINGPONG_SDM845_MASK \
|
||||
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_TE) | BIT(DPU_PINGPONG_DSC))
|
||||
|
||||
#define PINGPONG_SDM845_SPLIT_MASK \
|
||||
#define PINGPONG_SDM845_TE2_MASK \
|
||||
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
|
||||
|
||||
#define PINGPONG_SM8150_MASK \
|
||||
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
|
||||
|
||||
#define CTL_SC7280_MASK \
|
||||
(BIT(DPU_CTL_ACTIVE_CFG) | \
|
||||
BIT(DPU_CTL_FETCH_ACTIVE) | \
|
||||
|
@ -91,16 +95,17 @@
|
|||
|
||||
#define MERGE_3D_SM8150_MASK (0)
|
||||
|
||||
#define DSPP_MSM8998_MASK BIT(DPU_DSPP_PCC) | BIT(DPU_DSPP_GC)
|
||||
|
||||
#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
|
||||
|
||||
#define INTF_SDM845_MASK (0)
|
||||
|
||||
#define INTF_SC7180_MASK \
|
||||
(BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE) | BIT(DPU_INTF_STATUS_SUPPORTED))
|
||||
(BIT(DPU_INTF_INPUT_CTRL) | \
|
||||
BIT(DPU_INTF_TE) | \
|
||||
BIT(DPU_INTF_STATUS_SUPPORTED) | \
|
||||
BIT(DPU_DATA_HCTL_EN))
|
||||
|
||||
#define INTF_SC7280_MASK INTF_SC7180_MASK | BIT(DPU_DATA_HCTL_EN)
|
||||
#define INTF_SC7280_MASK (INTF_SC7180_MASK | BIT(DPU_INTF_DATA_COMPRESS))
|
||||
|
||||
#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
|
||||
BIT(DPU_WB_UBWC) | \
|
||||
|
@ -252,8 +257,6 @@ static const uint32_t wb2_formats[] = {
|
|||
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
|
||||
.maxupscale = MAX_UPSCALE_RATIO, \
|
||||
.smart_dma_priority = sdma_pri, \
|
||||
.src_blk = {.name = STRCAT("sspp_src_", num), \
|
||||
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
|
||||
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
|
||||
.id = qseed_ver, \
|
||||
.base = 0xa00, .len = 0xa0,}, \
|
||||
|
@ -272,8 +275,6 @@ static const uint32_t wb2_formats[] = {
|
|||
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
|
||||
.maxupscale = MAX_UPSCALE_RATIO, \
|
||||
.smart_dma_priority = sdma_pri, \
|
||||
.src_blk = {.name = STRCAT("sspp_src_", num), \
|
||||
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
|
||||
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
|
||||
.id = qseed_ver, \
|
||||
.base = 0xa00, .len = 0xa0,}, \
|
||||
|
@ -292,8 +293,6 @@ static const uint32_t wb2_formats[] = {
|
|||
.maxdwnscale = SSPP_UNITY_SCALE, \
|
||||
.maxupscale = SSPP_UNITY_SCALE, \
|
||||
.smart_dma_priority = sdma_pri, \
|
||||
.src_blk = {.name = STRCAT("sspp_src_", num), \
|
||||
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
|
||||
.format_list = plane_formats, \
|
||||
.num_formats = ARRAY_SIZE(plane_formats), \
|
||||
.virt_format_list = plane_formats, \
|
||||
|
@ -375,8 +374,6 @@ static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK("13", 6);
|
|||
.maxdwnscale = SSPP_UNITY_SCALE, \
|
||||
.maxupscale = SSPP_UNITY_SCALE, \
|
||||
.smart_dma_priority = sdma_pri, \
|
||||
.src_blk = {.name = STRCAT("sspp_src_", num), \
|
||||
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
|
||||
.format_list = plane_formats_yuv, \
|
||||
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
|
||||
.virt_format_list = plane_formats, \
|
||||
|
@ -449,13 +446,6 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
|
|||
static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
|
||||
.pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
|
||||
.len = 0x90, .version = 0x10007},
|
||||
.gc = { .id = DPU_DSPP_GC, .base = 0x17c0,
|
||||
.len = 0x90, .version = 0x10007},
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
|
||||
.pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
|
||||
.len = 0x90, .version = 0x10000},
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
|
||||
|
@ -501,21 +491,11 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
|
|||
.intr_done = _done, \
|
||||
.intr_rdptr = _rdptr, \
|
||||
}
|
||||
#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
|
||||
#define PP_BLK(_name, _id, _base, _features, _merge_3d, _sblk, _done, _rdptr) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0xd4, \
|
||||
.features = PINGPONG_SDM845_SPLIT_MASK, \
|
||||
.merge_3d = _merge_3d, \
|
||||
.sblk = &_sblk, \
|
||||
.intr_done = _done, \
|
||||
.intr_rdptr = _rdptr, \
|
||||
}
|
||||
#define PP_BLK(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0xd4, \
|
||||
.features = PINGPONG_SDM845_MASK, \
|
||||
.features = _features, \
|
||||
.merge_3d = _merge_3d, \
|
||||
.sblk = &_sblk, \
|
||||
.intr_done = _done, \
|
||||
|
@ -528,7 +508,7 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
|
|||
#define MERGE_3D_BLK(_name, _id, _base) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0x100, \
|
||||
.base = _base, .len = 0x8, \
|
||||
.features = MERGE_3D_SM8150_MASK, \
|
||||
.sblk = NULL \
|
||||
}
|
||||
|
@ -536,6 +516,16 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
|
|||
/*************************************************************
|
||||
* DSC sub blocks config
|
||||
*************************************************************/
|
||||
static const struct dpu_dsc_sub_blks dsc_sblk_0 = {
|
||||
.enc = {.base = 0x100, .len = 0x100},
|
||||
.ctl = {.base = 0xF00, .len = 0x10},
|
||||
};
|
||||
|
||||
static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
|
||||
.enc = {.base = 0x200, .len = 0x100},
|
||||
.ctl = {.base = 0xF80, .len = 0x10},
|
||||
};
|
||||
|
||||
#define DSC_BLK(_name, _id, _base, _features) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
|
@ -543,10 +533,18 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
|
|||
.features = _features, \
|
||||
}
|
||||
|
||||
#define DSC_BLK_1_2(_name, _id, _base, _len, _features, _sblk) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = _len, \
|
||||
.features = BIT(DPU_DSC_HW_REV_1_2) | _features, \
|
||||
.sblk = &_sblk, \
|
||||
}
|
||||
|
||||
/*************************************************************
|
||||
* INTF sub blocks config
|
||||
*************************************************************/
|
||||
#define INTF_BLK(_name, _id, _base, _len, _type, _ctrl_id, _progfetch, _features, _reg, _underrun_bit, _vsync_bit) \
|
||||
#define INTF_BLK(_name, _id, _base, _len, _type, _ctrl_id, _progfetch, _features, _underrun, _vsync) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = _len, \
|
||||
|
@ -554,8 +552,23 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
|
|||
.type = _type, \
|
||||
.controller_id = _ctrl_id, \
|
||||
.prog_fetch_lines_worst_case = _progfetch, \
|
||||
.intr_underrun = DPU_IRQ_IDX(_reg, _underrun_bit), \
|
||||
.intr_vsync = DPU_IRQ_IDX(_reg, _vsync_bit), \
|
||||
.intr_underrun = _underrun, \
|
||||
.intr_vsync = _vsync, \
|
||||
.intr_tear_rd_ptr = -1, \
|
||||
}
|
||||
|
||||
/* DSI Interface sub-block with TEAR registers (since DPU 5.0.0) */
|
||||
#define INTF_BLK_DSI_TE(_name, _id, _base, _len, _type, _ctrl_id, _progfetch, _features, _underrun, _vsync, _tear_rd_ptr) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = _len, \
|
||||
.features = _features, \
|
||||
.type = _type, \
|
||||
.controller_id = _ctrl_id, \
|
||||
.prog_fetch_lines_worst_case = _progfetch, \
|
||||
.intr_underrun = _underrun, \
|
||||
.intr_vsync = _vsync, \
|
||||
.intr_tear_rd_ptr = _tear_rd_ptr, \
|
||||
}
|
||||
|
||||
/*************************************************************
|
||||
|
@ -650,46 +663,6 @@ static const struct dpu_vbif_cfg sdm845_vbif[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct dpu_reg_dma_cfg sc8280xp_regdma = {
|
||||
.base = 0x0,
|
||||
.version = 0x00020000,
|
||||
.trigger_sel_off = 0x119c,
|
||||
.xin_id = 7,
|
||||
.clk_ctrl = DPU_CLK_CTRL_REG_DMA,
|
||||
};
|
||||
|
||||
static const struct dpu_reg_dma_cfg sdm845_regdma = {
|
||||
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
|
||||
};
|
||||
|
||||
static const struct dpu_reg_dma_cfg sm8150_regdma = {
|
||||
.base = 0x0, .version = 0x00010001, .trigger_sel_off = 0x119c
|
||||
};
|
||||
|
||||
static const struct dpu_reg_dma_cfg sm8250_regdma = {
|
||||
.base = 0x0,
|
||||
.version = 0x00010002,
|
||||
.trigger_sel_off = 0x119c,
|
||||
.xin_id = 7,
|
||||
.clk_ctrl = DPU_CLK_CTRL_REG_DMA,
|
||||
};
|
||||
|
||||
static const struct dpu_reg_dma_cfg sm8350_regdma = {
|
||||
.base = 0x400,
|
||||
.version = 0x00020000,
|
||||
.trigger_sel_off = 0x119c,
|
||||
.xin_id = 7,
|
||||
.clk_ctrl = DPU_CLK_CTRL_REG_DMA,
|
||||
};
|
||||
|
||||
static const struct dpu_reg_dma_cfg sm8450_regdma = {
|
||||
.base = 0x0,
|
||||
.version = 0x00020000,
|
||||
.trigger_sel_off = 0x119c,
|
||||
.xin_id = 7,
|
||||
.clk_ctrl = DPU_CLK_CTRL_REG_DMA,
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
* PERF data config
|
||||
*************************************************************/
|
||||
|
@ -734,6 +707,10 @@ static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
|
|||
{.fl = 0, .lut = 0x0011222222335777},
|
||||
};
|
||||
|
||||
static const struct dpu_qos_lut_entry sm6350_qos_linear_macrotile[] = {
|
||||
{.fl = 0, .lut = 0x0011223445566777 },
|
||||
};
|
||||
|
||||
static const struct dpu_qos_lut_entry sm8150_qos_linear[] = {
|
||||
{.fl = 0, .lut = 0x0011222222223357 },
|
||||
};
|
||||
|
@ -789,7 +766,9 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
|
|||
#include "catalog/dpu_6_0_sm8250.h"
|
||||
#include "catalog/dpu_6_2_sc7180.h"
|
||||
#include "catalog/dpu_6_3_sm6115.h"
|
||||
#include "catalog/dpu_6_4_sm6350.h"
|
||||
#include "catalog/dpu_6_5_qcm2290.h"
|
||||
#include "catalog/dpu_6_9_sm6375.h"
|
||||
|
||||
#include "catalog/dpu_7_0_sm8350.h"
|
||||
#include "catalog/dpu_7_2_sc7280.h"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
|
@ -48,6 +48,8 @@ enum {
|
|||
* @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
|
||||
* @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
|
||||
* in a failure
|
||||
* @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register
|
||||
* (moved into INTF block since DPU 5.0.0)
|
||||
* @DPU_MDP_MAX Maximum value
|
||||
|
||||
*/
|
||||
|
@ -59,12 +61,12 @@ enum {
|
|||
DPU_MDP_UBWC_1_5,
|
||||
DPU_MDP_AUDIO_SELECT,
|
||||
DPU_MDP_PERIPH_0_REMOVED,
|
||||
DPU_MDP_VSYNC_SEL,
|
||||
DPU_MDP_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* SSPP sub-blocks/features
|
||||
* @DPU_SSPP_SRC Src and fetch part of the pipes,
|
||||
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
|
||||
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
|
||||
* @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
|
||||
|
@ -85,8 +87,7 @@ enum {
|
|||
* @DPU_SSPP_MAX maximum value
|
||||
*/
|
||||
enum {
|
||||
DPU_SSPP_SRC = 0x1,
|
||||
DPU_SSPP_SCALER_QSEED2,
|
||||
DPU_SSPP_SCALER_QSEED2 = 0x1,
|
||||
DPU_SSPP_SCALER_QSEED3,
|
||||
DPU_SSPP_SCALER_QSEED3LITE,
|
||||
DPU_SSPP_SCALER_QSEED4,
|
||||
|
@ -127,13 +128,9 @@ enum {
|
|||
/**
|
||||
* DSPP sub-blocks
|
||||
* @DPU_DSPP_PCC Panel color correction block
|
||||
* @DPU_DSPP_GC Gamma correction block
|
||||
* @DPU_DSPP_IGC Inverse gamma correction block
|
||||
*/
|
||||
enum {
|
||||
DPU_DSPP_PCC = 0x1,
|
||||
DPU_DSPP_GC,
|
||||
DPU_DSPP_IGC,
|
||||
DPU_DSPP_MAX
|
||||
};
|
||||
|
||||
|
@ -143,7 +140,8 @@ enum {
|
|||
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
|
||||
* @DPU_PINGPONG_SPLIT PP block supports split fifo
|
||||
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
|
||||
* @DPU_PINGPONG_DITHER, Dither blocks
|
||||
* @DPU_PINGPONG_DITHER Dither blocks
|
||||
* @DPU_PINGPONG_DSC PP block supports DSC
|
||||
* @DPU_PINGPONG_MAX
|
||||
*/
|
||||
enum {
|
||||
|
@ -152,6 +150,7 @@ enum {
|
|||
DPU_PINGPONG_SPLIT,
|
||||
DPU_PINGPONG_SLAVE,
|
||||
DPU_PINGPONG_DITHER,
|
||||
DPU_PINGPONG_DSC,
|
||||
DPU_PINGPONG_MAX
|
||||
};
|
||||
|
||||
|
@ -182,6 +181,7 @@ enum {
|
|||
* @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
|
||||
* than video timing
|
||||
* @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
|
||||
* @DPU_INTF_DATA_COMPRESS INTF block has DATA_COMPRESS register
|
||||
* @DPU_INTF_MAX
|
||||
*/
|
||||
enum {
|
||||
|
@ -189,6 +189,7 @@ enum {
|
|||
DPU_INTF_TE,
|
||||
DPU_DATA_HCTL_EN,
|
||||
DPU_INTF_STATUS_SUPPORTED,
|
||||
DPU_INTF_DATA_COMPRESS,
|
||||
DPU_INTF_MAX
|
||||
};
|
||||
|
||||
|
@ -241,12 +242,18 @@ enum {
|
|||
};
|
||||
|
||||
/**
|
||||
* DSC features
|
||||
* DSC sub-blocks/features
|
||||
* @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets
|
||||
* the pixel output from this DSC.
|
||||
* @DPU_DSC_HW_REV_1_2 DSC block supports DSC 1.1 and 1.2
|
||||
* @DPU_DSC_NATIVE_42x_EN Supports NATIVE_422_EN and NATIVE_420_EN encoding
|
||||
* @DPU_DSC_MAX
|
||||
*/
|
||||
enum {
|
||||
DPU_DSC_OUTPUT_CTRL = 0x1,
|
||||
DPU_DSC_HW_REV_1_2,
|
||||
DPU_DSC_NATIVE_42x_EN,
|
||||
DPU_DSC_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -278,14 +285,6 @@ enum {
|
|||
u32 base; \
|
||||
u32 len
|
||||
|
||||
/**
|
||||
* struct dpu_src_blk: SSPP part of the source pipes
|
||||
* @info: HW register and features supported by this sub-blk
|
||||
*/
|
||||
struct dpu_src_blk {
|
||||
DPU_HW_SUBBLK_INFO;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_scaler_blk: Scaler information
|
||||
* @info: HW register and features supported by this sub-blk
|
||||
|
@ -310,6 +309,14 @@ struct dpu_pp_blk {
|
|||
u32 version;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_dsc_blk - DSC Encoder sub-blk information
|
||||
* @info: HW register and features supported by this sub-blk
|
||||
*/
|
||||
struct dpu_dsc_blk {
|
||||
DPU_HW_SUBBLK_INFO;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dpu_qos_lut_usage - define QoS LUT use cases
|
||||
*/
|
||||
|
@ -385,20 +392,13 @@ struct dpu_caps {
|
|||
/**
|
||||
* struct dpu_sspp_sub_blks : SSPP sub-blocks
|
||||
* common: Pointer to common configurations shared by sub blocks
|
||||
* @creq_vblank: creq priority during vertical blanking
|
||||
* @danger_vblank: danger priority during vertical blanking
|
||||
* @maxdwnscale: max downscale ratio supported(without DECIMATION)
|
||||
* @maxupscale: maxupscale ratio supported
|
||||
* @smart_dma_priority: hw priority of rect1 of multirect pipe
|
||||
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
|
||||
* @qseed_ver: qseed version
|
||||
* @src_blk:
|
||||
* @scaler_blk:
|
||||
* @csc_blk:
|
||||
* @hsic:
|
||||
* @memcolor:
|
||||
* @pcc_blk:
|
||||
* @igc_blk:
|
||||
* @format_list: Pointer to list of supported formats
|
||||
* @num_formats: Number of supported formats
|
||||
* @virt_format_list: Pointer to list of supported formats for virtual planes
|
||||
|
@ -406,20 +406,13 @@ struct dpu_caps {
|
|||
* @dpu_rotation_cfg: inline rotation configuration
|
||||
*/
|
||||
struct dpu_sspp_sub_blks {
|
||||
u32 creq_vblank;
|
||||
u32 danger_vblank;
|
||||
u32 maxdwnscale;
|
||||
u32 maxupscale;
|
||||
u32 smart_dma_priority;
|
||||
u32 max_per_pipe_bw;
|
||||
u32 qseed_ver;
|
||||
struct dpu_src_blk src_blk;
|
||||
struct dpu_scaler_blk scaler_blk;
|
||||
struct dpu_pp_blk csc_blk;
|
||||
struct dpu_pp_blk hsic_blk;
|
||||
struct dpu_pp_blk memcolor_blk;
|
||||
struct dpu_pp_blk pcc_blk;
|
||||
struct dpu_pp_blk igc_blk;
|
||||
|
||||
const u32 *format_list;
|
||||
u32 num_formats;
|
||||
|
@ -433,22 +426,18 @@ struct dpu_sspp_sub_blks {
|
|||
* @maxwidth: Max pixel width supported by this mixer
|
||||
* @maxblendstages: Max number of blend-stages supported
|
||||
* @blendstage_base: Blend-stage register base offset
|
||||
* @gc: gamma correction block
|
||||
*/
|
||||
struct dpu_lm_sub_blks {
|
||||
u32 maxwidth;
|
||||
u32 maxblendstages;
|
||||
u32 blendstage_base[MAX_BLOCKS];
|
||||
struct dpu_pp_blk gc;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_dspp_sub_blks: Information of DSPP block
|
||||
* @gc : gamma correction block
|
||||
* @pcc: pixel color correction block
|
||||
*/
|
||||
struct dpu_dspp_sub_blks {
|
||||
struct dpu_pp_blk gc;
|
||||
struct dpu_pp_blk pcc;
|
||||
};
|
||||
|
||||
|
@ -458,6 +447,16 @@ struct dpu_pingpong_sub_blks {
|
|||
struct dpu_pp_blk dither;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_dsc_sub_blks - DSC sub-blks
|
||||
* @enc: DSC encoder sub-block
|
||||
* @ctl: DSC controller sub-block
|
||||
*/
|
||||
struct dpu_dsc_sub_blks {
|
||||
struct dpu_dsc_blk enc;
|
||||
struct dpu_dsc_blk ctl;
|
||||
};
|
||||
|
||||
/**
|
||||
* dpu_clk_ctrl_type - Defines top level clock control signals
|
||||
*/
|
||||
|
@ -554,7 +553,7 @@ struct dpu_sspp_cfg {
|
|||
* @base register offset of this block
|
||||
* @features bit mask identifying sub-blocks/features
|
||||
* @sblk: LM Sub-blocks information
|
||||
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
|
||||
* @pingpong: ID of connected PingPong, PINGPONG_NONE if unsupported
|
||||
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
|
||||
*/
|
||||
struct dpu_lm_cfg {
|
||||
|
@ -612,10 +611,13 @@ struct dpu_merge_3d_cfg {
|
|||
* struct dpu_dsc_cfg - information of DSC blocks
|
||||
* @id enum identifying this block
|
||||
* @base register offset of this block
|
||||
* @len: length of hardware block
|
||||
* @features bit mask identifying sub-blocks/features
|
||||
* @sblk: sub-blocks information
|
||||
*/
|
||||
struct dpu_dsc_cfg {
|
||||
DPU_HW_BLK_INFO;
|
||||
const struct dpu_dsc_sub_blks *sblk;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -628,6 +630,7 @@ struct dpu_dsc_cfg {
|
|||
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
|
||||
* @intr_underrun: index for INTF underrun interrupt
|
||||
* @intr_vsync: index for INTF VSYNC interrupt
|
||||
* @intr_tear_rd_ptr: Index for INTF TEAR_RD_PTR interrupt
|
||||
*/
|
||||
struct dpu_intf_cfg {
|
||||
DPU_HW_BLK_INFO;
|
||||
|
@ -636,6 +639,7 @@ struct dpu_intf_cfg {
|
|||
u32 prog_fetch_lines_worst_case;
|
||||
s32 intr_underrun;
|
||||
s32 intr_vsync;
|
||||
s32 intr_tear_rd_ptr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -720,21 +724,6 @@ struct dpu_vbif_cfg {
|
|||
u32 memtype_count;
|
||||
u32 memtype[MAX_XIN_COUNT];
|
||||
};
|
||||
/**
|
||||
* struct dpu_reg_dma_cfg - information of lut dma blocks
|
||||
* @id enum identifying this block
|
||||
* @base register offset of this block
|
||||
* @features bit mask identifying sub-blocks/features
|
||||
* @version version of lutdma hw block
|
||||
* @trigger_sel_off offset to trigger select registers of lutdma
|
||||
*/
|
||||
struct dpu_reg_dma_cfg {
|
||||
DPU_HW_BLK_INFO;
|
||||
u32 version;
|
||||
u32 trigger_sel_off;
|
||||
u32 xin_id;
|
||||
enum dpu_clk_ctrl_type clk_ctrl;
|
||||
};
|
||||
|
||||
/**
|
||||
* Define CDP use cases
|
||||
|
@ -850,9 +839,6 @@ struct dpu_mdss_cfg {
|
|||
u32 wb_count;
|
||||
const struct dpu_wb_cfg *wb;
|
||||
|
||||
u32 reg_dma_count;
|
||||
const struct dpu_reg_dma_cfg *dma_cfg;
|
||||
|
||||
u32 ad_count;
|
||||
|
||||
u32 dspp_count;
|
||||
|
@ -875,7 +861,9 @@ extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
|
|||
extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sm6375_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sm8350_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sc7280_cfg;
|
||||
extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
|
||||
|
|
|
@ -53,23 +53,6 @@ static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
|
|||
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
|
||||
1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
|
||||
|
||||
static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->ctl_count; i++) {
|
||||
if (ctl == m->ctl[i].id) {
|
||||
b->blk_addr = addr + m->ctl[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_CTL;
|
||||
return &m->ctl[i];
|
||||
}
|
||||
}
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
|
||||
enum dpu_lm lm)
|
||||
{
|
||||
|
@ -117,6 +100,10 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
|
|||
trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
|
||||
dpu_hw_ctl_get_flush_register(ctx));
|
||||
ctx->pending_flush_mask = 0x0;
|
||||
ctx->pending_intf_flush_mask = 0;
|
||||
ctx->pending_wb_flush_mask = 0;
|
||||
ctx->pending_merge_3d_flush_mask = 0;
|
||||
ctx->pending_dsc_flush_mask = 0;
|
||||
|
||||
memset(ctx->pending_dspp_flush_mask, 0,
|
||||
sizeof(ctx->pending_dspp_flush_mask));
|
||||
|
@ -156,6 +143,11 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
|
|||
CTL_DSPP_n_FLUSH(dspp - DSPP_0),
|
||||
ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
|
||||
}
|
||||
|
||||
if (ctx->pending_flush_mask & BIT(DSC_IDX))
|
||||
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
|
||||
ctx->pending_dsc_flush_mask);
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
|
||||
}
|
||||
|
||||
|
@ -302,6 +294,13 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
|
|||
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
|
||||
}
|
||||
|
||||
static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_dsc dsc_num)
|
||||
{
|
||||
ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
|
||||
ctx->pending_flush_mask |= BIT(DSC_IDX);
|
||||
}
|
||||
|
||||
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_dspp dspp, u32 dspp_sub_blk)
|
||||
{
|
||||
|
@ -330,15 +329,9 @@ static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
|
|||
return;
|
||||
|
||||
switch (dspp_sub_blk) {
|
||||
case DPU_DSPP_IGC:
|
||||
ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(2);
|
||||
break;
|
||||
case DPU_DSPP_PCC:
|
||||
ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
|
||||
break;
|
||||
case DPU_DSPP_GC:
|
||||
ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(5);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
@ -519,9 +512,6 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
|
|||
if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
|
||||
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
|
||||
|
||||
if (cfg->dsc)
|
||||
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
|
||||
|
||||
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
|
||||
mode_sel |= BIT(17);
|
||||
|
||||
|
@ -541,10 +531,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
|
|||
if (cfg->merge_3d)
|
||||
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
|
||||
BIT(cfg->merge_3d - MERGE_3D_0));
|
||||
if (cfg->dsc) {
|
||||
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
|
||||
|
||||
if (cfg->dsc)
|
||||
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
|
||||
}
|
||||
}
|
||||
|
||||
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
|
||||
|
@ -587,6 +576,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
|
|||
u32 intf_active = 0;
|
||||
u32 wb_active = 0;
|
||||
u32 merge3d_active = 0;
|
||||
u32 dsc_active;
|
||||
|
||||
/*
|
||||
* This API resets each portion of the CTL path namely,
|
||||
|
@ -616,6 +606,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
|
|||
wb_active &= ~BIT(cfg->wb - WB_0);
|
||||
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
|
||||
}
|
||||
|
||||
if (cfg->dsc) {
|
||||
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
|
||||
dsc_active &= ~cfg->dsc;
|
||||
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
|
||||
}
|
||||
}
|
||||
|
||||
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
|
||||
|
@ -647,6 +643,8 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
|
|||
ops->update_pending_flush_merge_3d =
|
||||
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
|
||||
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
|
||||
ops->update_pending_flush_dsc =
|
||||
dpu_hw_ctl_update_pending_flush_dsc_v1;
|
||||
} else {
|
||||
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
|
||||
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
|
||||
|
@ -676,29 +674,25 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
|
|||
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
|
||||
};
|
||||
|
||||
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
|
||||
struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
u32 mixer_count,
|
||||
const struct dpu_lm_cfg *mixer)
|
||||
{
|
||||
struct dpu_hw_ctl *c;
|
||||
const struct dpu_ctl_cfg *cfg;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _ctl_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
pr_err("failed to create dpu_hw_ctl %d\n", idx);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_CTL;
|
||||
|
||||
c->caps = cfg;
|
||||
_setup_ctl_ops(&c->ops, c->caps->features);
|
||||
c->idx = idx;
|
||||
c->mixer_count = m->mixer_count;
|
||||
c->mixer_hw_caps = m->mixer;
|
||||
c->idx = cfg->id;
|
||||
c->mixer_count = mixer_count;
|
||||
c->mixer_hw_caps = mixer;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
|
|
@ -157,6 +157,15 @@ struct dpu_hw_ctl_ops {
|
|||
void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_dspp blk, u32 dspp_sub_blk);
|
||||
|
||||
/**
|
||||
* OR in the given flushbits to the cached pending_(dsc_)flush_mask
|
||||
* No effect on hardware
|
||||
* @ctx: ctl path ctx pointer
|
||||
* @blk: interface block index
|
||||
*/
|
||||
void (*update_pending_flush_dsc)(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_dsc blk);
|
||||
|
||||
/**
|
||||
* Write the value of the pending_flush_mask to hardware
|
||||
* @ctx : ctl path ctx pointer
|
||||
|
@ -229,6 +238,7 @@ struct dpu_hw_ctl_ops {
|
|||
* @pending_flush_mask: storage for pending ctl_flush managed via ops
|
||||
* @pending_intf_flush_mask: pending INTF flush
|
||||
* @pending_wb_flush_mask: pending WB flush
|
||||
* @pending_dsc_flush_mask: pending DSC flush
|
||||
* @ops: operation list
|
||||
*/
|
||||
struct dpu_hw_ctl {
|
||||
|
@ -245,6 +255,7 @@ struct dpu_hw_ctl {
|
|||
u32 pending_wb_flush_mask;
|
||||
u32 pending_merge_3d_flush_mask;
|
||||
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
|
||||
u32 pending_dsc_flush_mask;
|
||||
|
||||
/* ops */
|
||||
struct dpu_hw_ctl_ops ops;
|
||||
|
@ -261,15 +272,17 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
|
|||
}
|
||||
|
||||
/**
|
||||
* dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
|
||||
* should be called before accessing every ctl path registers.
|
||||
* @idx: ctl_path index for which driver object is required
|
||||
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
|
||||
* Should be called before accessing any ctl_path register.
|
||||
* @cfg: ctl_path catalog entry for which driver object is required
|
||||
* @addr: mapped register io address of MDP
|
||||
* @m : pointer to mdss catalog data
|
||||
* @mixer_count: Number of mixers in @mixer
|
||||
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
|
||||
*/
|
||||
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
|
||||
struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
u32 mixer_count,
|
||||
const struct dpu_lm_cfg *mixer);
|
||||
|
||||
/**
|
||||
* dpu_hw_ctl_destroy(): Destroys ctl driver context
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
* Copyright (c) 2020-2022, Linaro Limited
|
||||
*/
|
||||
|
||||
#include <drm/display/drm_dsc_helper.h>
|
||||
|
||||
#include "dpu_kms.h"
|
||||
#include "dpu_hw_catalog.h"
|
||||
#include "dpu_hwio.h"
|
||||
|
@ -54,9 +56,10 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
|
|||
if (is_cmd_mode)
|
||||
initial_lines += 1;
|
||||
|
||||
slice_last_group_size = 3 - (dsc->slice_width % 3);
|
||||
slice_last_group_size = (dsc->slice_width + 2) % 3;
|
||||
|
||||
data = (initial_lines << 20);
|
||||
data |= ((slice_last_group_size - 1) << 18);
|
||||
data |= (slice_last_group_size << 18);
|
||||
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
|
||||
data |= (dsc->bits_per_pixel << 8);
|
||||
data |= (dsc->block_pred_enable << 7);
|
||||
|
@ -102,7 +105,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
|
|||
data |= dsc->final_offset;
|
||||
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
|
||||
|
||||
det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8);
|
||||
det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc);
|
||||
data = det_thresh_flatness << 10;
|
||||
data |= dsc->flatness_max_qp << 5;
|
||||
data |= dsc->flatness_min_qp;
|
||||
|
@ -154,7 +157,6 @@ static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
|
|||
|
||||
static void dpu_hw_dsc_bind_pingpong_blk(
|
||||
struct dpu_hw_dsc *hw_dsc,
|
||||
bool enable,
|
||||
const enum dpu_pingpong pp)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
|
||||
|
@ -163,36 +165,19 @@ static void dpu_hw_dsc_bind_pingpong_blk(
|
|||
|
||||
dsc_ctl_offset = DSC_CTL(hw_dsc->idx);
|
||||
|
||||
if (enable)
|
||||
if (pp)
|
||||
mux_cfg = (pp - PINGPONG_0) & 0x7;
|
||||
|
||||
DRM_DEBUG_KMS("%s dsc:%d %s pp:%d\n",
|
||||
enable ? "Binding" : "Unbinding",
|
||||
hw_dsc->idx - DSC_0,
|
||||
enable ? "to" : "from",
|
||||
pp - PINGPONG_0);
|
||||
if (pp)
|
||||
DRM_DEBUG_KMS("Binding dsc:%d to pp:%d\n",
|
||||
hw_dsc->idx - DSC_0, pp - PINGPONG_0);
|
||||
else
|
||||
DRM_DEBUG_KMS("Unbinding dsc:%d from any pp\n",
|
||||
hw_dsc->idx - DSC_0);
|
||||
|
||||
DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
|
||||
}
|
||||
|
||||
static const struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->dsc_count; i++) {
|
||||
if (dsc == m->dsc[i].id) {
|
||||
b->blk_addr = addr + m->dsc[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_DSC;
|
||||
return &m->dsc[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
|
||||
unsigned long cap)
|
||||
{
|
||||
|
@ -203,23 +188,19 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
|
|||
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
|
||||
};
|
||||
|
||||
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_dsc *c;
|
||||
const struct dpu_dsc_cfg *cfg;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _dsc_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_DSC;
|
||||
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->caps = cfg;
|
||||
_setup_dsc_ops(&c->ops, c->caps->features);
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2020-2022, Linaro Limited */
|
||||
/*
|
||||
* Copyright (c) 2020-2022, Linaro Limited
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
|
||||
*/
|
||||
|
||||
#ifndef _DPU_HW_DSC_H
|
||||
#define _DPU_HW_DSC_H
|
||||
|
@ -44,7 +47,6 @@ struct dpu_hw_dsc_ops {
|
|||
struct drm_dsc_config *dsc);
|
||||
|
||||
void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc,
|
||||
bool enable,
|
||||
enum dpu_pingpong pp);
|
||||
};
|
||||
|
||||
|
@ -61,14 +63,22 @@ struct dpu_hw_dsc {
|
|||
};
|
||||
|
||||
/**
|
||||
* dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx.
|
||||
* @idx: DSC index for which driver object is required
|
||||
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
|
||||
* @cfg: DSC catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* Return: Error code or allocated dpu_hw_dsc context
|
||||
*/
|
||||
struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
|
||||
* @cfg: DSC catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @m: Pointer to mdss catalog data
|
||||
* Returns: Error code or allocated dpu_hw_dsc context
|
||||
*/
|
||||
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_dsc_destroy - destroys dsc driver context
|
||||
|
|
|
@ -0,0 +1,387 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
|
||||
*/
|
||||
|
||||
#include <drm/display/drm_dsc_helper.h>
|
||||
|
||||
#include "dpu_kms.h"
|
||||
#include "dpu_hw_catalog.h"
|
||||
#include "dpu_hwio.h"
|
||||
#include "dpu_hw_mdss.h"
|
||||
#include "dpu_hw_dsc.h"
|
||||
|
||||
#define DSC_CMN_MAIN_CNF 0x00
|
||||
|
||||
/* DPU_DSC_ENC register offsets */
|
||||
#define ENC_DF_CTRL 0x00
|
||||
#define ENC_GENERAL_STATUS 0x04
|
||||
#define ENC_HSLICE_STATUS 0x08
|
||||
#define ENC_OUT_STATUS 0x0C
|
||||
#define ENC_INT_STAT 0x10
|
||||
#define ENC_INT_CLR 0x14
|
||||
#define ENC_INT_MASK 0x18
|
||||
#define DSC_MAIN_CONF 0x30
|
||||
#define DSC_PICTURE_SIZE 0x34
|
||||
#define DSC_SLICE_SIZE 0x38
|
||||
#define DSC_MISC_SIZE 0x3C
|
||||
#define DSC_HRD_DELAYS 0x40
|
||||
#define DSC_RC_SCALE 0x44
|
||||
#define DSC_RC_SCALE_INC_DEC 0x48
|
||||
#define DSC_RC_OFFSETS_1 0x4C
|
||||
#define DSC_RC_OFFSETS_2 0x50
|
||||
#define DSC_RC_OFFSETS_3 0x54
|
||||
#define DSC_RC_OFFSETS_4 0x58
|
||||
#define DSC_FLATNESS_QP 0x5C
|
||||
#define DSC_RC_MODEL_SIZE 0x60
|
||||
#define DSC_RC_CONFIG 0x64
|
||||
#define DSC_RC_BUF_THRESH_0 0x68
|
||||
#define DSC_RC_BUF_THRESH_1 0x6C
|
||||
#define DSC_RC_BUF_THRESH_2 0x70
|
||||
#define DSC_RC_BUF_THRESH_3 0x74
|
||||
#define DSC_RC_MIN_QP_0 0x78
|
||||
#define DSC_RC_MIN_QP_1 0x7C
|
||||
#define DSC_RC_MIN_QP_2 0x80
|
||||
#define DSC_RC_MAX_QP_0 0x84
|
||||
#define DSC_RC_MAX_QP_1 0x88
|
||||
#define DSC_RC_MAX_QP_2 0x8C
|
||||
#define DSC_RC_RANGE_BPG_OFFSETS_0 0x90
|
||||
#define DSC_RC_RANGE_BPG_OFFSETS_1 0x94
|
||||
#define DSC_RC_RANGE_BPG_OFFSETS_2 0x98
|
||||
|
||||
/* DPU_DSC_CTL register offsets */
|
||||
#define DSC_CTL 0x00
|
||||
#define DSC_CFG 0x04
|
||||
#define DSC_DATA_IN_SWAP 0x08
|
||||
#define DSC_CLK_CTRL 0x0C
|
||||
|
||||
static int _dsc_calc_output_buf_max_addr(struct dpu_hw_dsc *hw_dsc, int num_softslice)
|
||||
{
|
||||
int max_addr = 2400 / num_softslice;
|
||||
|
||||
if (hw_dsc->caps->features & BIT(DPU_DSC_NATIVE_42x_EN))
|
||||
max_addr /= 2;
|
||||
|
||||
return max_addr - 1;
|
||||
};
|
||||
|
||||
static void dpu_hw_dsc_disable_1_2(struct dpu_hw_dsc *hw_dsc)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *hw;
|
||||
const struct dpu_dsc_sub_blks *sblk;
|
||||
|
||||
if (!hw_dsc)
|
||||
return;
|
||||
|
||||
hw = &hw_dsc->hw;
|
||||
sblk = hw_dsc->caps->sblk;
|
||||
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, 0);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, 0);
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, 0);
|
||||
}
|
||||
|
||||
static void dpu_hw_dsc_config_1_2(struct dpu_hw_dsc *hw_dsc,
|
||||
struct drm_dsc_config *dsc,
|
||||
u32 mode,
|
||||
u32 initial_lines)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *hw;
|
||||
const struct dpu_dsc_sub_blks *sblk;
|
||||
u32 data = 0;
|
||||
u32 det_thresh_flatness;
|
||||
u32 num_active_slice_per_enc;
|
||||
u32 bpp;
|
||||
|
||||
if (!hw_dsc || !dsc)
|
||||
return;
|
||||
|
||||
hw = &hw_dsc->hw;
|
||||
|
||||
sblk = hw_dsc->caps->sblk;
|
||||
|
||||
if (mode & DSC_MODE_SPLIT_PANEL)
|
||||
data |= BIT(0);
|
||||
|
||||
if (mode & DSC_MODE_MULTIPLEX)
|
||||
data |= BIT(1);
|
||||
|
||||
num_active_slice_per_enc = dsc->slice_count;
|
||||
if (mode & DSC_MODE_MULTIPLEX)
|
||||
num_active_slice_per_enc = dsc->slice_count / 2;
|
||||
|
||||
data |= (num_active_slice_per_enc & 0x3) << 7;
|
||||
|
||||
DPU_REG_WRITE(hw, DSC_CMN_MAIN_CNF, data);
|
||||
|
||||
data = (initial_lines & 0xff);
|
||||
|
||||
if (mode & DSC_MODE_VIDEO)
|
||||
data |= BIT(9);
|
||||
|
||||
data |= (_dsc_calc_output_buf_max_addr(hw_dsc, num_active_slice_per_enc) << 18);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, data);
|
||||
|
||||
data = (dsc->dsc_version_minor & 0xf) << 28;
|
||||
if (dsc->dsc_version_minor == 0x2) {
|
||||
if (dsc->native_422)
|
||||
data |= BIT(22);
|
||||
if (dsc->native_420)
|
||||
data |= BIT(21);
|
||||
}
|
||||
|
||||
bpp = dsc->bits_per_pixel;
|
||||
/* as per hw requirement bpp should be programmed
|
||||
* twice the actual value in case of 420 or 422 encoding
|
||||
*/
|
||||
if (dsc->native_422 || dsc->native_420)
|
||||
bpp = 2 * bpp;
|
||||
|
||||
data |= bpp << 10;
|
||||
|
||||
if (dsc->block_pred_enable)
|
||||
data |= BIT(20);
|
||||
|
||||
if (dsc->convert_rgb)
|
||||
data |= BIT(4);
|
||||
|
||||
data |= (dsc->line_buf_depth & 0xf) << 6;
|
||||
data |= dsc->bits_per_component & 0xf;
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, data);
|
||||
|
||||
data = (dsc->pic_width & 0xffff) |
|
||||
((dsc->pic_height & 0xffff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_PICTURE_SIZE, data);
|
||||
|
||||
data = (dsc->slice_width & 0xffff) |
|
||||
((dsc->slice_height & 0xffff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_SLICE_SIZE, data);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_MISC_SIZE,
|
||||
(dsc->slice_chunk_size) & 0xffff);
|
||||
|
||||
data = (dsc->initial_xmit_delay & 0xffff) |
|
||||
((dsc->initial_dec_delay & 0xffff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_HRD_DELAYS, data);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE,
|
||||
dsc->initial_scale_value & 0x3f);
|
||||
|
||||
data = (dsc->scale_increment_interval & 0xffff) |
|
||||
((dsc->scale_decrement_interval & 0x7ff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE_INC_DEC, data);
|
||||
|
||||
data = (dsc->first_line_bpg_offset & 0x1f) |
|
||||
((dsc->second_line_bpg_offset & 0x1f) << 5);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_1, data);
|
||||
|
||||
data = (dsc->nfl_bpg_offset & 0xffff) |
|
||||
((dsc->slice_bpg_offset & 0xffff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_2, data);
|
||||
|
||||
data = (dsc->initial_offset & 0xffff) |
|
||||
((dsc->final_offset & 0xffff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_3, data);
|
||||
|
||||
data = (dsc->nsl_bpg_offset & 0xffff) |
|
||||
((dsc->second_line_offset_adj & 0xffff) << 16);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_4, data);
|
||||
|
||||
det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc);
|
||||
data = (dsc->flatness_min_qp & 0x1f) |
|
||||
((dsc->flatness_max_qp & 0x1f) << 5) |
|
||||
((det_thresh_flatness & 0xff) << 10);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_FLATNESS_QP, data);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MODEL_SIZE,
|
||||
(dsc->rc_model_size) & 0xffff);
|
||||
|
||||
data = dsc->rc_edge_factor & 0xf;
|
||||
data |= (dsc->rc_quant_incr_limit0 & 0x1f) << 8;
|
||||
data |= (dsc->rc_quant_incr_limit1 & 0x1f) << 13;
|
||||
data |= (dsc->rc_tgt_offset_high & 0xf) << 20;
|
||||
data |= (dsc->rc_tgt_offset_low & 0xf) << 24;
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_CONFIG, data);
|
||||
|
||||
/* program the dsc wrapper */
|
||||
data = BIT(0); /* encoder enable */
|
||||
if (dsc->native_422)
|
||||
data |= BIT(8);
|
||||
else if (dsc->native_420)
|
||||
data |= BIT(9);
|
||||
if (!dsc->convert_rgb)
|
||||
data |= BIT(10);
|
||||
if (dsc->bits_per_component == 8)
|
||||
data |= BIT(11);
|
||||
if (mode & DSC_MODE_SPLIT_PANEL)
|
||||
data |= BIT(12);
|
||||
if (mode & DSC_MODE_MULTIPLEX)
|
||||
data |= BIT(13);
|
||||
if (!(mode & DSC_MODE_VIDEO))
|
||||
data |= BIT(17);
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, data);
|
||||
}
|
||||
|
||||
static void dpu_hw_dsc_config_thresh_1_2(struct dpu_hw_dsc *hw_dsc,
|
||||
struct drm_dsc_config *dsc)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *hw;
|
||||
const struct dpu_dsc_sub_blks *sblk;
|
||||
struct drm_dsc_rc_range_parameters *rc;
|
||||
|
||||
if (!hw_dsc || !dsc)
|
||||
return;
|
||||
|
||||
hw = &hw_dsc->hw;
|
||||
|
||||
sblk = hw_dsc->caps->sblk;
|
||||
|
||||
rc = dsc->rc_range_params;
|
||||
|
||||
/*
|
||||
* With BUF_THRESH -- 14 in total
|
||||
* each register contains 4 thresh values with the last register
|
||||
* containing only 2 thresh values
|
||||
*/
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_0,
|
||||
(dsc->rc_buf_thresh[0] << 0) |
|
||||
(dsc->rc_buf_thresh[1] << 8) |
|
||||
(dsc->rc_buf_thresh[2] << 16) |
|
||||
(dsc->rc_buf_thresh[3] << 24));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_1,
|
||||
(dsc->rc_buf_thresh[4] << 0) |
|
||||
(dsc->rc_buf_thresh[5] << 8) |
|
||||
(dsc->rc_buf_thresh[6] << 16) |
|
||||
(dsc->rc_buf_thresh[7] << 24));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_2,
|
||||
(dsc->rc_buf_thresh[8] << 0) |
|
||||
(dsc->rc_buf_thresh[9] << 8) |
|
||||
(dsc->rc_buf_thresh[10] << 16) |
|
||||
(dsc->rc_buf_thresh[11] << 24));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_3,
|
||||
(dsc->rc_buf_thresh[12] << 0) |
|
||||
(dsc->rc_buf_thresh[13] << 8));
|
||||
|
||||
/*
|
||||
* with min/max_QP -- 5 bits
|
||||
* each register contains 5 min_qp or max_qp for total of 15
|
||||
*
|
||||
* With BPG_OFFSET -- 6 bits
|
||||
* each register contains 5 BPG_offset for total of 15
|
||||
*/
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_0,
|
||||
(rc[0].range_min_qp << 0) |
|
||||
(rc[1].range_min_qp << 5) |
|
||||
(rc[2].range_min_qp << 10) |
|
||||
(rc[3].range_min_qp << 15) |
|
||||
(rc[4].range_min_qp << 20));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_0,
|
||||
(rc[0].range_max_qp << 0) |
|
||||
(rc[1].range_max_qp << 5) |
|
||||
(rc[2].range_max_qp << 10) |
|
||||
(rc[3].range_max_qp << 15) |
|
||||
(rc[4].range_max_qp << 20));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_0,
|
||||
(rc[0].range_bpg_offset << 0) |
|
||||
(rc[1].range_bpg_offset << 6) |
|
||||
(rc[2].range_bpg_offset << 12) |
|
||||
(rc[3].range_bpg_offset << 18) |
|
||||
(rc[4].range_bpg_offset << 24));
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_1,
|
||||
(rc[5].range_min_qp << 0) |
|
||||
(rc[6].range_min_qp << 5) |
|
||||
(rc[7].range_min_qp << 10) |
|
||||
(rc[8].range_min_qp << 15) |
|
||||
(rc[9].range_min_qp << 20));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_1,
|
||||
(rc[5].range_max_qp << 0) |
|
||||
(rc[6].range_max_qp << 5) |
|
||||
(rc[7].range_max_qp << 10) |
|
||||
(rc[8].range_max_qp << 15) |
|
||||
(rc[9].range_max_qp << 20));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_1,
|
||||
(rc[5].range_bpg_offset << 0) |
|
||||
(rc[6].range_bpg_offset << 6) |
|
||||
(rc[7].range_bpg_offset << 12) |
|
||||
(rc[8].range_bpg_offset << 18) |
|
||||
(rc[9].range_bpg_offset << 24));
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_2,
|
||||
(rc[10].range_min_qp << 0) |
|
||||
(rc[11].range_min_qp << 5) |
|
||||
(rc[12].range_min_qp << 10) |
|
||||
(rc[13].range_min_qp << 15) |
|
||||
(rc[14].range_min_qp << 20));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_2,
|
||||
(rc[10].range_max_qp << 0) |
|
||||
(rc[11].range_max_qp << 5) |
|
||||
(rc[12].range_max_qp << 10) |
|
||||
(rc[13].range_max_qp << 15) |
|
||||
(rc[14].range_max_qp << 20));
|
||||
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_2,
|
||||
(rc[10].range_bpg_offset << 0) |
|
||||
(rc[11].range_bpg_offset << 6) |
|
||||
(rc[12].range_bpg_offset << 12) |
|
||||
(rc[13].range_bpg_offset << 18) |
|
||||
(rc[14].range_bpg_offset << 24));
|
||||
}
|
||||
|
||||
static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc,
|
||||
const enum dpu_pingpong pp)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *hw;
|
||||
const struct dpu_dsc_sub_blks *sblk;
|
||||
int mux_cfg = 0xf; /* Disabled */
|
||||
|
||||
hw = &hw_dsc->hw;
|
||||
|
||||
sblk = hw_dsc->caps->sblk;
|
||||
|
||||
if (pp)
|
||||
mux_cfg = (pp - PINGPONG_0) & 0x7;
|
||||
|
||||
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg);
|
||||
}
|
||||
|
||||
static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
|
||||
const unsigned long features)
|
||||
{
|
||||
ops->dsc_disable = dpu_hw_dsc_disable_1_2;
|
||||
ops->dsc_config = dpu_hw_dsc_config_1_2;
|
||||
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh_1_2;
|
||||
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
|
||||
}
|
||||
|
||||
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_dsc *c;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_DSC;
|
||||
|
||||
c->idx = cfg->id;
|
||||
c->caps = cfg;
|
||||
_setup_dcs_ops_1_2(&c->ops, c->caps->features);
|
||||
|
||||
return c;
|
||||
}
|
|
@ -68,49 +68,23 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c,
|
|||
c->ops.setup_pcc = dpu_setup_dspp_pcc;
|
||||
}
|
||||
|
||||
static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!m || !addr || !b)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
for (i = 0; i < m->dspp_count; i++) {
|
||||
if (dspp == m->dspp[i].id) {
|
||||
b->blk_addr = addr + m->dspp[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_DSPP;
|
||||
return &m->dspp[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_dspp *c;
|
||||
const struct dpu_dspp_cfg *cfg;
|
||||
|
||||
if (!addr || !m)
|
||||
if (!addr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _dspp_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_DSPP;
|
||||
|
||||
/* Assign ops */
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->cap = cfg;
|
||||
_setup_dspp_ops(c, c->cap->features);
|
||||
|
||||
|
|
|
@ -79,14 +79,14 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
|
|||
}
|
||||
|
||||
/**
|
||||
* dpu_hw_dspp_init - initializes the dspp hw driver object.
|
||||
* should be called once before accessing every dspp.
|
||||
* @idx: DSPP index for which driver object is required
|
||||
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
|
||||
* should be called once before accessing every DSPP.
|
||||
* @cfg: DSPP catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @Return: pointer to structure or ERR_PTR
|
||||
* Return: pointer to structure or ERR_PTR
|
||||
*/
|
||||
struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
|
||||
void __iomem *addr, const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_dspp_destroy(): Destroys DSPP driver context
|
||||
|
|
|
@ -17,30 +17,26 @@
|
|||
* Register offsets in MDSS register file for the interrupt registers
|
||||
* w.r.t. the MDP base
|
||||
*/
|
||||
#define MDP_SSPP_TOP0_OFF 0x0
|
||||
#define MDP_INTF_0_OFF 0x6A000
|
||||
#define MDP_INTF_1_OFF 0x6A800
|
||||
#define MDP_INTF_2_OFF 0x6B000
|
||||
#define MDP_INTF_3_OFF 0x6B800
|
||||
#define MDP_INTF_4_OFF 0x6C000
|
||||
#define MDP_INTF_5_OFF 0x6C800
|
||||
#define INTF_INTR_EN 0x1c0
|
||||
#define INTF_INTR_STATUS 0x1c4
|
||||
#define INTF_INTR_CLEAR 0x1c8
|
||||
#define MDP_AD4_0_OFF 0x7C000
|
||||
#define MDP_AD4_1_OFF 0x7D000
|
||||
#define MDP_AD4_INTR_EN_OFF 0x41c
|
||||
#define MDP_AD4_INTR_CLEAR_OFF 0x424
|
||||
#define MDP_AD4_INTR_STATUS_OFF 0x420
|
||||
#define MDP_INTF_0_OFF_REV_7xxx 0x34000
|
||||
#define MDP_INTF_1_OFF_REV_7xxx 0x35000
|
||||
#define MDP_INTF_2_OFF_REV_7xxx 0x36000
|
||||
#define MDP_INTF_3_OFF_REV_7xxx 0x37000
|
||||
#define MDP_INTF_4_OFF_REV_7xxx 0x38000
|
||||
#define MDP_INTF_5_OFF_REV_7xxx 0x39000
|
||||
#define MDP_INTF_6_OFF_REV_7xxx 0x3a000
|
||||
#define MDP_INTF_7_OFF_REV_7xxx 0x3b000
|
||||
#define MDP_INTF_8_OFF_REV_7xxx 0x3c000
|
||||
#define MDP_INTF_OFF(intf) (0x6A000 + 0x800 * (intf))
|
||||
#define MDP_INTF_INTR_EN(intf) (MDP_INTF_OFF(intf) + 0x1c0)
|
||||
#define MDP_INTF_INTR_STATUS(intf) (MDP_INTF_OFF(intf) + 0x1c4)
|
||||
#define MDP_INTF_INTR_CLEAR(intf) (MDP_INTF_OFF(intf) + 0x1c8)
|
||||
#define MDP_INTF_TEAR_OFF(intf) (0x6D700 + 0x100 * (intf))
|
||||
#define MDP_INTF_INTR_TEAR_EN(intf) (MDP_INTF_TEAR_OFF(intf) + 0x000)
|
||||
#define MDP_INTF_INTR_TEAR_STATUS(intf) (MDP_INTF_TEAR_OFF(intf) + 0x004)
|
||||
#define MDP_INTF_INTR_TEAR_CLEAR(intf) (MDP_INTF_TEAR_OFF(intf) + 0x008)
|
||||
#define MDP_AD4_OFF(ad4) (0x7C000 + 0x1000 * (ad4))
|
||||
#define MDP_AD4_INTR_EN_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x41c)
|
||||
#define MDP_AD4_INTR_CLEAR_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x424)
|
||||
#define MDP_AD4_INTR_STATUS_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x420)
|
||||
#define MDP_INTF_REV_7xxx_OFF(intf) (0x34000 + 0x1000 * (intf))
|
||||
#define MDP_INTF_REV_7xxx_INTR_EN(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
|
||||
#define MDP_INTF_REV_7xxx_INTR_STATUS(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
|
||||
#define MDP_INTF_REV_7xxx_INTR_CLEAR(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
|
||||
#define MDP_INTF_REV_7xxx_TEAR_OFF(intf) (0x34800 + 0x1000 * (intf))
|
||||
#define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
|
||||
#define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
|
||||
#define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
|
||||
|
||||
/**
|
||||
* struct dpu_intr_reg - array of DPU register sets
|
||||
|
@ -61,104 +57,124 @@ struct dpu_intr_reg {
|
|||
*/
|
||||
static const struct dpu_intr_reg dpu_intr_set[] = {
|
||||
[MDP_SSPP_TOP0_INTR] = {
|
||||
MDP_SSPP_TOP0_OFF+INTR_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+INTR_EN,
|
||||
MDP_SSPP_TOP0_OFF+INTR_STATUS
|
||||
INTR_CLEAR,
|
||||
INTR_EN,
|
||||
INTR_STATUS
|
||||
},
|
||||
[MDP_SSPP_TOP0_INTR2] = {
|
||||
MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+INTR2_EN,
|
||||
MDP_SSPP_TOP0_OFF+INTR2_STATUS
|
||||
INTR2_CLEAR,
|
||||
INTR2_EN,
|
||||
INTR2_STATUS
|
||||
},
|
||||
[MDP_SSPP_TOP0_HIST_INTR] = {
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
|
||||
HIST_INTR_CLEAR,
|
||||
HIST_INTR_EN,
|
||||
HIST_INTR_STATUS
|
||||
},
|
||||
[MDP_INTF0_INTR] = {
|
||||
MDP_INTF_0_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_0_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_0_OFF+INTF_INTR_STATUS
|
||||
MDP_INTF_INTR_CLEAR(0),
|
||||
MDP_INTF_INTR_EN(0),
|
||||
MDP_INTF_INTR_STATUS(0)
|
||||
},
|
||||
[MDP_INTF1_INTR] = {
|
||||
MDP_INTF_1_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_1_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_1_OFF+INTF_INTR_STATUS
|
||||
MDP_INTF_INTR_CLEAR(1),
|
||||
MDP_INTF_INTR_EN(1),
|
||||
MDP_INTF_INTR_STATUS(1)
|
||||
},
|
||||
[MDP_INTF2_INTR] = {
|
||||
MDP_INTF_2_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_2_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_2_OFF+INTF_INTR_STATUS
|
||||
MDP_INTF_INTR_CLEAR(2),
|
||||
MDP_INTF_INTR_EN(2),
|
||||
MDP_INTF_INTR_STATUS(2)
|
||||
},
|
||||
[MDP_INTF3_INTR] = {
|
||||
MDP_INTF_3_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_3_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_3_OFF+INTF_INTR_STATUS
|
||||
MDP_INTF_INTR_CLEAR(3),
|
||||
MDP_INTF_INTR_EN(3),
|
||||
MDP_INTF_INTR_STATUS(3)
|
||||
},
|
||||
[MDP_INTF4_INTR] = {
|
||||
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_4_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_4_OFF+INTF_INTR_STATUS
|
||||
MDP_INTF_INTR_CLEAR(4),
|
||||
MDP_INTF_INTR_EN(4),
|
||||
MDP_INTF_INTR_STATUS(4)
|
||||
},
|
||||
[MDP_INTF5_INTR] = {
|
||||
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_5_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_5_OFF+INTF_INTR_STATUS
|
||||
MDP_INTF_INTR_CLEAR(5),
|
||||
MDP_INTF_INTR_EN(5),
|
||||
MDP_INTF_INTR_STATUS(5)
|
||||
},
|
||||
[MDP_INTF1_TEAR_INTR] = {
|
||||
MDP_INTF_INTR_TEAR_CLEAR(1),
|
||||
MDP_INTF_INTR_TEAR_EN(1),
|
||||
MDP_INTF_INTR_TEAR_STATUS(1)
|
||||
},
|
||||
[MDP_INTF2_TEAR_INTR] = {
|
||||
MDP_INTF_INTR_TEAR_CLEAR(2),
|
||||
MDP_INTF_INTR_TEAR_EN(2),
|
||||
MDP_INTF_INTR_TEAR_STATUS(2)
|
||||
},
|
||||
[MDP_AD4_0_INTR] = {
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
|
||||
MDP_AD4_INTR_CLEAR_OFF(0),
|
||||
MDP_AD4_INTR_EN_OFF(0),
|
||||
MDP_AD4_INTR_STATUS_OFF(0),
|
||||
},
|
||||
[MDP_AD4_1_INTR] = {
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
|
||||
MDP_AD4_INTR_CLEAR_OFF(1),
|
||||
MDP_AD4_INTR_EN_OFF(1),
|
||||
MDP_AD4_INTR_STATUS_OFF(1),
|
||||
},
|
||||
[MDP_INTF0_7xxx_INTR] = {
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(0),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(0),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(0)
|
||||
},
|
||||
[MDP_INTF1_7xxx_INTR] = {
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(1),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(1),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(1)
|
||||
},
|
||||
[MDP_INTF1_7xxx_TEAR_INTR] = {
|
||||
MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
|
||||
MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
|
||||
MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
|
||||
},
|
||||
[MDP_INTF2_7xxx_INTR] = {
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(2),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(2),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(2)
|
||||
},
|
||||
[MDP_INTF2_7xxx_TEAR_INTR] = {
|
||||
MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
|
||||
MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
|
||||
MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
|
||||
},
|
||||
[MDP_INTF3_7xxx_INTR] = {
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(3),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(3),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(3)
|
||||
},
|
||||
[MDP_INTF4_7xxx_INTR] = {
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(4),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(4),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(4)
|
||||
},
|
||||
[MDP_INTF5_7xxx_INTR] = {
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(5),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(5),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(5)
|
||||
},
|
||||
[MDP_INTF6_7xxx_INTR] = {
|
||||
MDP_INTF_6_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_6_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_6_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(6),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(6),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(6)
|
||||
},
|
||||
[MDP_INTF7_7xxx_INTR] = {
|
||||
MDP_INTF_7_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_7_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_7_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(7),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(7),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(7)
|
||||
},
|
||||
[MDP_INTF8_7xxx_INTR] = {
|
||||
MDP_INTF_8_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_8_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_8_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
MDP_INTF_REV_7xxx_INTR_CLEAR(8),
|
||||
MDP_INTF_REV_7xxx_INTR_EN(8),
|
||||
MDP_INTF_REV_7xxx_INTR_STATUS(8)
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -23,11 +23,15 @@ enum dpu_hw_intr_reg {
|
|||
MDP_INTF3_INTR,
|
||||
MDP_INTF4_INTR,
|
||||
MDP_INTF5_INTR,
|
||||
MDP_INTF1_TEAR_INTR,
|
||||
MDP_INTF2_TEAR_INTR,
|
||||
MDP_AD4_0_INTR,
|
||||
MDP_AD4_1_INTR,
|
||||
MDP_INTF0_7xxx_INTR,
|
||||
MDP_INTF1_7xxx_INTR,
|
||||
MDP_INTF1_7xxx_TEAR_INTR,
|
||||
MDP_INTF2_7xxx_INTR,
|
||||
MDP_INTF2_7xxx_TEAR_INTR,
|
||||
MDP_INTF3_7xxx_INTR,
|
||||
MDP_INTF4_7xxx_INTR,
|
||||
MDP_INTF5_7xxx_INTR,
|
||||
|
@ -67,7 +71,7 @@ struct dpu_hw_intr {
|
|||
/**
|
||||
* dpu_hw_intr_init(): Initializes the interrupts hw object
|
||||
* @addr: mapped register io address of MDP
|
||||
* @m : pointer to mdss catalog data
|
||||
* @m: pointer to MDSS catalog data
|
||||
*/
|
||||
struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
#include "dpu_hw_catalog.h"
|
||||
#include "dpu_hw_intf.h"
|
||||
#include "dpu_kms.h"
|
||||
#include "dpu_trace.h"
|
||||
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#define INTF_TIMING_ENGINE_EN 0x000
|
||||
#define INTF_CONFIG 0x004
|
||||
|
@ -36,56 +39,60 @@
|
|||
#define INTF_CONFIG2 0x060
|
||||
#define INTF_DISPLAY_DATA_HCTL 0x064
|
||||
#define INTF_ACTIVE_DATA_HCTL 0x068
|
||||
|
||||
#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
|
||||
#define INTF_PANEL_FORMAT 0x090
|
||||
|
||||
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
|
||||
#define INTF_FRAME_COUNT 0x0AC
|
||||
#define INTF_LINE_COUNT 0x0B0
|
||||
#define INTF_LINE_COUNT 0x0B0
|
||||
|
||||
#define INTF_DEFLICKER_CONFIG 0x0F0
|
||||
#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
|
||||
#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
|
||||
#define INTF_DEFLICKER_CONFIG 0x0F0
|
||||
#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
|
||||
#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
|
||||
|
||||
#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
|
||||
#define INTF_PANEL_FORMAT 0x090
|
||||
#define INTF_TPG_ENABLE 0x100
|
||||
#define INTF_TPG_MAIN_CONTROL 0x104
|
||||
#define INTF_TPG_VIDEO_CONFIG 0x108
|
||||
#define INTF_TPG_COMPONENT_LIMITS 0x10C
|
||||
#define INTF_TPG_RECTANGLE 0x110
|
||||
#define INTF_TPG_INITIAL_VALUE 0x114
|
||||
#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
|
||||
#define INTF_TPG_RGB_MAPPING 0x11C
|
||||
#define INTF_PROG_FETCH_START 0x170
|
||||
#define INTF_PROG_ROT_START 0x174
|
||||
#define INTF_MUX 0x25C
|
||||
#define INTF_STATUS 0x26C
|
||||
#define INTF_TPG_ENABLE 0x100
|
||||
#define INTF_TPG_MAIN_CONTROL 0x104
|
||||
#define INTF_TPG_VIDEO_CONFIG 0x108
|
||||
#define INTF_TPG_COMPONENT_LIMITS 0x10C
|
||||
#define INTF_TPG_RECTANGLE 0x110
|
||||
#define INTF_TPG_INITIAL_VALUE 0x114
|
||||
#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
|
||||
#define INTF_TPG_RGB_MAPPING 0x11C
|
||||
#define INTF_PROG_FETCH_START 0x170
|
||||
#define INTF_PROG_ROT_START 0x174
|
||||
|
||||
#define INTF_MISR_CTRL 0x180
|
||||
#define INTF_MISR_SIGNATURE 0x184
|
||||
|
||||
#define INTF_MUX 0x25C
|
||||
#define INTF_STATUS 0x26C
|
||||
#define INTF_AVR_CONTROL 0x270
|
||||
#define INTF_AVR_MODE 0x274
|
||||
#define INTF_AVR_TRIGGER 0x278
|
||||
#define INTF_AVR_VTOTAL 0x27C
|
||||
#define INTF_TEAR_MDP_VSYNC_SEL 0x280
|
||||
#define INTF_TEAR_TEAR_CHECK_EN 0x284
|
||||
#define INTF_TEAR_SYNC_CONFIG_VSYNC 0x288
|
||||
#define INTF_TEAR_SYNC_CONFIG_HEIGHT 0x28C
|
||||
#define INTF_TEAR_SYNC_WRCOUNT 0x290
|
||||
#define INTF_TEAR_VSYNC_INIT_VAL 0x294
|
||||
#define INTF_TEAR_INT_COUNT_VAL 0x298
|
||||
#define INTF_TEAR_SYNC_THRESH 0x29C
|
||||
#define INTF_TEAR_START_POS 0x2A0
|
||||
#define INTF_TEAR_RD_PTR_IRQ 0x2A4
|
||||
#define INTF_TEAR_WR_PTR_IRQ 0x2A8
|
||||
#define INTF_TEAR_OUT_LINE_COUNT 0x2AC
|
||||
#define INTF_TEAR_LINE_COUNT 0x2B0
|
||||
#define INTF_TEAR_AUTOREFRESH_CONFIG 0x2B4
|
||||
|
||||
#define INTF_CFG_ACTIVE_H_EN BIT(29)
|
||||
#define INTF_CFG_ACTIVE_V_EN BIT(30)
|
||||
|
||||
#define INTF_CFG2_DATABUS_WIDEN BIT(0)
|
||||
#define INTF_CFG2_DATA_HCTL_EN BIT(4)
|
||||
#define INTF_CFG2_DCE_DATA_COMPRESS BIT(12)
|
||||
|
||||
#define INTF_MISR_CTRL 0x180
|
||||
#define INTF_MISR_SIGNATURE 0x184
|
||||
|
||||
static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->intf_count; i++) {
|
||||
if ((intf == m->intf[i].id) &&
|
||||
(m->intf[i].type != INTF_NONE)) {
|
||||
b->blk_addr = addr + m->intf[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_INTF;
|
||||
return &m->intf[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
|
||||
const struct intf_timing_params *p,
|
||||
|
@ -99,7 +106,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
|
|||
u32 active_h_start, active_h_end;
|
||||
u32 active_v_start, active_v_end;
|
||||
u32 active_hctl, display_hctl, hsync_ctl;
|
||||
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
|
||||
u32 polarity_ctl, den_polarity;
|
||||
u32 panel_format;
|
||||
u32 intf_cfg, intf_cfg2 = 0;
|
||||
u32 display_data_hctl = 0, active_data_hctl = 0;
|
||||
|
@ -186,19 +193,9 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
|
|||
}
|
||||
|
||||
den_polarity = 0;
|
||||
if (ctx->cap->type == INTF_HDMI) {
|
||||
hsync_polarity = p->yres >= 720 ? 0 : 1;
|
||||
vsync_polarity = p->yres >= 720 ? 0 : 1;
|
||||
} else if (ctx->cap->type == INTF_DP) {
|
||||
hsync_polarity = p->hsync_polarity;
|
||||
vsync_polarity = p->vsync_polarity;
|
||||
} else {
|
||||
hsync_polarity = 0;
|
||||
vsync_polarity = 0;
|
||||
}
|
||||
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
|
||||
(vsync_polarity << 1) | /* VSYNC Polarity */
|
||||
(hsync_polarity << 0); /* HSYNC Polarity */
|
||||
(p->vsync_polarity << 1) | /* VSYNC Polarity */
|
||||
(p->hsync_polarity << 0); /* HSYNC Polarity */
|
||||
|
||||
if (!DPU_FORMAT_IS_YUV(fmt))
|
||||
panel_format = (fmt->bits[C0_G_Y] |
|
||||
|
@ -271,7 +268,6 @@ static void dpu_hw_intf_setup_prg_fetch(
|
|||
|
||||
static void dpu_hw_intf_bind_pingpong_blk(
|
||||
struct dpu_hw_intf *intf,
|
||||
bool enable,
|
||||
const enum dpu_pingpong pp)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c = &intf->hw;
|
||||
|
@ -280,7 +276,7 @@ static void dpu_hw_intf_bind_pingpong_blk(
|
|||
mux_cfg = DPU_REG_READ(c, INTF_MUX);
|
||||
mux_cfg &= ~0xf;
|
||||
|
||||
if (enable)
|
||||
if (pp)
|
||||
mux_cfg |= (pp - PINGPONG_0) & 0x7;
|
||||
else
|
||||
mux_cfg |= 0xf;
|
||||
|
@ -332,6 +328,200 @@ static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
|
|||
return dpu_hw_collect_misr(&intf->hw, INTF_MISR_CTRL, INTF_MISR_SIGNATURE, misr_value);
|
||||
}
|
||||
|
||||
static int dpu_hw_intf_enable_te(struct dpu_hw_intf *intf,
|
||||
struct dpu_hw_tear_check *te)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
int cfg;
|
||||
|
||||
if (!intf)
|
||||
return -EINVAL;
|
||||
|
||||
c = &intf->hw;
|
||||
|
||||
cfg = BIT(19); /* VSYNC_COUNTER_EN */
|
||||
if (te->hw_vsync_mode)
|
||||
cfg |= BIT(20);
|
||||
|
||||
cfg |= te->vsync_count;
|
||||
|
||||
DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
|
||||
DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
|
||||
DPU_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
|
||||
DPU_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
|
||||
DPU_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos);
|
||||
DPU_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
|
||||
((te->sync_threshold_continue << 16) |
|
||||
te->sync_threshold_start));
|
||||
DPU_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
|
||||
(te->start_pos + te->sync_threshold_start + 1));
|
||||
|
||||
DPU_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dpu_hw_intf_setup_autorefresh_config(struct dpu_hw_intf *intf,
|
||||
u32 frame_count, bool enable)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 refresh_cfg;
|
||||
|
||||
c = &intf->hw;
|
||||
refresh_cfg = DPU_REG_READ(c, INTF_TEAR_AUTOREFRESH_CONFIG);
|
||||
if (enable)
|
||||
refresh_cfg = BIT(31) | frame_count;
|
||||
else
|
||||
refresh_cfg &= ~BIT(31);
|
||||
|
||||
DPU_REG_WRITE(c, INTF_TEAR_AUTOREFRESH_CONFIG, refresh_cfg);
|
||||
}
|
||||
|
||||
/*
|
||||
* dpu_hw_intf_get_autorefresh_config - Get autorefresh config from HW
|
||||
* @intf: DPU intf structure
|
||||
* @frame_count: Used to return the current frame count from hw
|
||||
*
|
||||
* Returns: True if autorefresh enabled, false if disabled.
|
||||
*/
|
||||
static bool dpu_hw_intf_get_autorefresh_config(struct dpu_hw_intf *intf,
|
||||
u32 *frame_count)
|
||||
{
|
||||
u32 val = DPU_REG_READ(&intf->hw, INTF_TEAR_AUTOREFRESH_CONFIG);
|
||||
|
||||
if (frame_count != NULL)
|
||||
*frame_count = val & 0xffff;
|
||||
return !!((val & BIT(31)) >> 31);
|
||||
}
|
||||
|
||||
static int dpu_hw_intf_disable_te(struct dpu_hw_intf *intf)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
|
||||
if (!intf)
|
||||
return -EINVAL;
|
||||
|
||||
c = &intf->hw;
|
||||
DPU_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dpu_hw_intf_connect_external_te(struct dpu_hw_intf *intf,
|
||||
bool enable_external_te)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c = &intf->hw;
|
||||
u32 cfg;
|
||||
int orig;
|
||||
|
||||
if (!intf)
|
||||
return -EINVAL;
|
||||
|
||||
c = &intf->hw;
|
||||
cfg = DPU_REG_READ(c, INTF_TEAR_SYNC_CONFIG_VSYNC);
|
||||
orig = (bool)(cfg & BIT(20));
|
||||
if (enable_external_te)
|
||||
cfg |= BIT(20);
|
||||
else
|
||||
cfg &= ~BIT(20);
|
||||
DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
|
||||
trace_dpu_intf_connect_ext_te(intf->idx - INTF_0, cfg);
|
||||
|
||||
return orig;
|
||||
}
|
||||
|
||||
static int dpu_hw_intf_get_vsync_info(struct dpu_hw_intf *intf,
|
||||
struct dpu_hw_pp_vsync_info *info)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c = &intf->hw;
|
||||
u32 val;
|
||||
|
||||
if (!intf || !info)
|
||||
return -EINVAL;
|
||||
|
||||
c = &intf->hw;
|
||||
|
||||
val = DPU_REG_READ(c, INTF_TEAR_VSYNC_INIT_VAL);
|
||||
info->rd_ptr_init_val = val & 0xffff;
|
||||
|
||||
val = DPU_REG_READ(c, INTF_TEAR_INT_COUNT_VAL);
|
||||
info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
|
||||
info->rd_ptr_line_count = val & 0xffff;
|
||||
|
||||
val = DPU_REG_READ(c, INTF_TEAR_LINE_COUNT);
|
||||
info->wr_ptr_line_count = val & 0xffff;
|
||||
|
||||
val = DPU_REG_READ(c, INTF_FRAME_COUNT);
|
||||
info->intf_frame_count = val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dpu_hw_intf_vsync_sel(struct dpu_hw_intf *intf,
|
||||
u32 vsync_source)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
|
||||
if (!intf)
|
||||
return;
|
||||
|
||||
c = &intf->hw;
|
||||
|
||||
DPU_REG_WRITE(c, INTF_TEAR_MDP_VSYNC_SEL, (vsync_source & 0xf));
|
||||
}
|
||||
|
||||
static void dpu_hw_intf_disable_autorefresh(struct dpu_hw_intf *intf,
|
||||
uint32_t encoder_id, u16 vdisplay)
|
||||
{
|
||||
struct dpu_hw_pp_vsync_info info;
|
||||
int trial = 0;
|
||||
|
||||
/* If autorefresh is already disabled, we have nothing to do */
|
||||
if (!dpu_hw_intf_get_autorefresh_config(intf, NULL))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If autorefresh is enabled, disable it and make sure it is safe to
|
||||
* proceed with current frame commit/push. Sequence followed is,
|
||||
* 1. Disable TE
|
||||
* 2. Disable autorefresh config
|
||||
* 4. Poll for frame transfer ongoing to be false
|
||||
* 5. Enable TE back
|
||||
*/
|
||||
|
||||
dpu_hw_intf_connect_external_te(intf, false);
|
||||
dpu_hw_intf_setup_autorefresh_config(intf, 0, false);
|
||||
|
||||
do {
|
||||
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
|
||||
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
|
||||
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
|
||||
DPU_ERROR("enc%d intf%d disable autorefresh failed\n",
|
||||
encoder_id, intf->idx - INTF_0);
|
||||
break;
|
||||
}
|
||||
|
||||
trial++;
|
||||
|
||||
dpu_hw_intf_get_vsync_info(intf, &info);
|
||||
} while (info.wr_ptr_line_count > 0 &&
|
||||
info.wr_ptr_line_count < vdisplay);
|
||||
|
||||
dpu_hw_intf_connect_external_te(intf, true);
|
||||
|
||||
DPU_DEBUG("enc%d intf%d disabled autorefresh\n",
|
||||
encoder_id, intf->idx - INTF_0);
|
||||
|
||||
}
|
||||
|
||||
static void dpu_hw_intf_enable_compression(struct dpu_hw_intf *ctx)
|
||||
{
|
||||
u32 intf_cfg2 = DPU_REG_READ(&ctx->hw, INTF_CONFIG2);
|
||||
|
||||
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
|
||||
}
|
||||
|
||||
static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
|
||||
unsigned long cap)
|
||||
{
|
||||
|
@ -344,32 +534,41 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
|
|||
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
|
||||
ops->setup_misr = dpu_hw_intf_setup_misr;
|
||||
ops->collect_misr = dpu_hw_intf_collect_misr;
|
||||
|
||||
if (cap & BIT(DPU_INTF_TE)) {
|
||||
ops->enable_tearcheck = dpu_hw_intf_enable_te;
|
||||
ops->disable_tearcheck = dpu_hw_intf_disable_te;
|
||||
ops->connect_external_te = dpu_hw_intf_connect_external_te;
|
||||
ops->vsync_sel = dpu_hw_intf_vsync_sel;
|
||||
ops->disable_autorefresh = dpu_hw_intf_disable_autorefresh;
|
||||
}
|
||||
|
||||
if (cap & BIT(DPU_INTF_DATA_COMPRESS))
|
||||
ops->enable_compression = dpu_hw_intf_enable_compression;
|
||||
}
|
||||
|
||||
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_intf *c;
|
||||
const struct dpu_intf_cfg *cfg;
|
||||
|
||||
if (cfg->type == INTF_NONE) {
|
||||
DPU_DEBUG("Skip intf %d with type NONE\n", cfg->id - INTF_0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _intf_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
pr_err("failed to create dpu_hw_intf %d\n", idx);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_INTF;
|
||||
|
||||
/*
|
||||
* Assign ops
|
||||
*/
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->cap = cfg;
|
||||
c->mdss = m;
|
||||
_setup_intf_ops(&c->ops, c->cap->features);
|
||||
|
||||
return c;
|
||||
|
|
|
@ -60,6 +60,17 @@ struct intf_status {
|
|||
* feed pixels to this interface
|
||||
* @setup_misr: enable/disable MISR
|
||||
* @collect_misr: read MISR signature
|
||||
* @enable_tearcheck: Enables vsync generation and sets up init value of read
|
||||
* pointer and programs the tear check configuration
|
||||
* @disable_tearcheck: Disables tearcheck block
|
||||
* @connect_external_te: Read, modify, write to either set or clear listening to external TE
|
||||
* Return: 1 if TE was originally connected, 0 if not, or -ERROR
|
||||
* @get_vsync_info: Provides the programmed and current line_count
|
||||
* @setup_autorefresh: Configure and enable the autorefresh config
|
||||
* @get_autorefresh: Retrieve autorefresh config from hardware
|
||||
* Return: 0 on success, -ETIMEDOUT on timeout
|
||||
* @vsync_sel: Select vsync signal for tear-effect configuration
|
||||
* @enable_compression: Enable data compression
|
||||
*/
|
||||
struct dpu_hw_intf_ops {
|
||||
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
|
||||
|
@ -78,10 +89,26 @@ struct dpu_hw_intf_ops {
|
|||
u32 (*get_line_count)(struct dpu_hw_intf *intf);
|
||||
|
||||
void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
|
||||
bool enable,
|
||||
const enum dpu_pingpong pp);
|
||||
void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count);
|
||||
int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
|
||||
|
||||
// Tearcheck on INTF since DPU 5.0.0
|
||||
|
||||
int (*enable_tearcheck)(struct dpu_hw_intf *intf, struct dpu_hw_tear_check *cfg);
|
||||
|
||||
int (*disable_tearcheck)(struct dpu_hw_intf *intf);
|
||||
|
||||
int (*connect_external_te)(struct dpu_hw_intf *intf, bool enable_external_te);
|
||||
|
||||
void (*vsync_sel)(struct dpu_hw_intf *intf, u32 vsync_source);
|
||||
|
||||
/**
|
||||
* Disable autorefresh if enabled
|
||||
*/
|
||||
void (*disable_autorefresh)(struct dpu_hw_intf *intf, uint32_t encoder_id, u16 vdisplay);
|
||||
|
||||
void (*enable_compression)(struct dpu_hw_intf *intf);
|
||||
};
|
||||
|
||||
struct dpu_hw_intf {
|
||||
|
@ -90,22 +117,19 @@ struct dpu_hw_intf {
|
|||
/* intf */
|
||||
enum dpu_intf idx;
|
||||
const struct dpu_intf_cfg *cap;
|
||||
const struct dpu_mdss_cfg *mdss;
|
||||
|
||||
/* ops */
|
||||
struct dpu_hw_intf_ops ops;
|
||||
};
|
||||
|
||||
/**
|
||||
* dpu_hw_intf_init(): Initializes the intf driver for the passed
|
||||
* interface idx.
|
||||
* @idx: interface index for which driver object is required
|
||||
* dpu_hw_intf_init() - Initializes the INTF driver for the passed
|
||||
* interface catalog entry.
|
||||
* @cfg: interface catalog entry for which driver object is required
|
||||
* @addr: mapped register io address of MDP
|
||||
* @m : pointer to mdss catalog data
|
||||
*/
|
||||
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_intf_destroy(): Destroys INTF driver context
|
||||
|
|
|
@ -30,24 +30,6 @@
|
|||
#define LM_MISR_SIGNATURE 0x314
|
||||
|
||||
|
||||
static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->mixer_count; i++) {
|
||||
if (mixer == m->mixer[i].id) {
|
||||
b->blk_addr = addr + m->mixer[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_LM;
|
||||
return &m->mixer[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* _stage_offset(): returns the relative offset of the blend registers
|
||||
* for the stage to be setup
|
||||
|
@ -160,8 +142,7 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
|
|||
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
|
||||
}
|
||||
|
||||
static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
|
||||
struct dpu_hw_lm_ops *ops,
|
||||
static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
|
||||
unsigned long features)
|
||||
{
|
||||
ops->setup_mixer_out = dpu_hw_lm_setup_out;
|
||||
|
@ -175,27 +156,27 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
|
|||
ops->collect_misr = dpu_hw_lm_collect_misr;
|
||||
}
|
||||
|
||||
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_mixer *c;
|
||||
const struct dpu_lm_cfg *cfg;
|
||||
|
||||
if (cfg->pingpong == PINGPONG_NONE) {
|
||||
DPU_DEBUG("skip mixer %d without pingpong\n", cfg->id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _lm_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_LM;
|
||||
|
||||
/* Assign ops */
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->cap = cfg;
|
||||
_setup_mixer_ops(m, &c->ops, c->cap->features);
|
||||
_setup_mixer_ops(&c->ops, c->cap->features);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
|
|
@ -93,15 +93,13 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
|
|||
}
|
||||
|
||||
/**
|
||||
* dpu_hw_lm_init(): Initializes the mixer hw driver object.
|
||||
* dpu_hw_lm_init() - Initializes the mixer hw driver object.
|
||||
* should be called once before accessing every mixer.
|
||||
* @idx: mixer index for which driver object is required
|
||||
* @cfg: mixer catalog entry for which driver object is required
|
||||
* @addr: mapped register io address of MDP
|
||||
* @m : pointer to mdss catalog data
|
||||
*/
|
||||
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_lm_destroy(): Destroys layer mixer driver context
|
||||
|
|
|
@ -191,7 +191,8 @@ enum dpu_dsc {
|
|||
};
|
||||
|
||||
enum dpu_pingpong {
|
||||
PINGPONG_0 = 1,
|
||||
PINGPONG_NONE,
|
||||
PINGPONG_0,
|
||||
PINGPONG_1,
|
||||
PINGPONG_2,
|
||||
PINGPONG_3,
|
||||
|
@ -463,4 +464,52 @@ struct dpu_mdss_color {
|
|||
#define DPU_DBG_MASK_DSPP (1 << 10)
|
||||
#define DPU_DBG_MASK_DSC (1 << 11)
|
||||
|
||||
/**
|
||||
* struct dpu_hw_tear_check - Struct contains parameters to configure
|
||||
* tear-effect module. This structure is used to configure tear-check
|
||||
* logic present either in ping-pong or in interface module.
|
||||
* @vsync_count: Ratio of MDP VSYNC clk freq(Hz) to refresh rate divided
|
||||
* by no of lines
|
||||
* @sync_cfg_height: Total vertical lines (display height - 1)
|
||||
* @vsync_init_val: Init value to which the read pointer gets loaded at
|
||||
* vsync edge
|
||||
* @sync_threshold_start: Read pointer threshold start ROI for write operation
|
||||
* @sync_threshold_continue: The minimum number of lines the write pointer
|
||||
* needs to be above the read pointer
|
||||
* @start_pos: The position from which the start_threshold value is added
|
||||
* @rd_ptr_irq: The read pointer line at which interrupt has to be generated
|
||||
* @hw_vsync_mode: Sync with external frame sync input
|
||||
*/
|
||||
struct dpu_hw_tear_check {
|
||||
/*
|
||||
* This is ratio of MDP VSYNC clk freq(Hz) to
|
||||
* refresh rate divided by no of lines
|
||||
*/
|
||||
u32 vsync_count;
|
||||
u32 sync_cfg_height;
|
||||
u32 vsync_init_val;
|
||||
u32 sync_threshold_start;
|
||||
u32 sync_threshold_continue;
|
||||
u32 start_pos;
|
||||
u32 rd_ptr_irq;
|
||||
u8 hw_vsync_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_pp_vsync_info - Struct contains parameters to configure
|
||||
* read and write pointers for command mode panels
|
||||
* @rd_ptr_init_val: Value of rd pointer at vsync edge
|
||||
* @rd_ptr_frame_count: Num frames sent since enabling interface
|
||||
* @rd_ptr_line_count: Current line on panel (rd ptr)
|
||||
* @wr_ptr_line_count: Current line within pp fifo (wr ptr)
|
||||
* @intf_frame_count: Frames read from intf
|
||||
*/
|
||||
struct dpu_hw_pp_vsync_info {
|
||||
u32 rd_ptr_init_val;
|
||||
u32 rd_ptr_frame_count;
|
||||
u32 rd_ptr_line_count;
|
||||
u32 wr_ptr_line_count;
|
||||
u32 intf_frame_count;
|
||||
};
|
||||
|
||||
#endif /* _DPU_HW_MDSS_H */
|
||||
|
|
|
@ -14,24 +14,6 @@
|
|||
#define MERGE_3D_MUX 0x000
|
||||
#define MERGE_3D_MODE 0x004
|
||||
|
||||
static const struct dpu_merge_3d_cfg *_merge_3d_offset(enum dpu_merge_3d idx,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->merge_3d_count; i++) {
|
||||
if (idx == m->merge_3d[i].id) {
|
||||
b->blk_addr = addr + m->merge_3d[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_PINGPONG;
|
||||
return &m->merge_3d[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
|
||||
enum dpu_3d_blend_mode mode_3d)
|
||||
{
|
||||
|
@ -55,24 +37,19 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
|
|||
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
|
||||
};
|
||||
|
||||
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_merge_3d *c;
|
||||
const struct dpu_merge_3d_cfg *cfg;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _merge_3d_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
|
||||
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->caps = cfg;
|
||||
_setup_merge_3d_ops(c, c->caps->features);
|
||||
|
||||
|
|
|
@ -46,16 +46,14 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
|
|||
}
|
||||
|
||||
/**
|
||||
* dpu_hw_merge_3d_init - initializes the merge_3d driver for the passed
|
||||
* merge_3d idx.
|
||||
* @idx: Pingpong index for which driver object is required
|
||||
* dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
|
||||
* merge3d catalog entry.
|
||||
* @cfg: Pingpong catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @m: Pointer to mdss catalog data
|
||||
* Returns: Error code or allocated dpu_hw_merge_3d context
|
||||
* Return: Error code or allocated dpu_hw_merge_3d context
|
||||
*/
|
||||
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_merge_3d_destroy - destroys merge_3d driver context
|
||||
|
|
|
@ -42,24 +42,6 @@ static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
|
|||
0, 0, 0, 0, 0, 0, 0, 1, 2
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->pingpong_count; i++) {
|
||||
if (pp == m->pingpong[i].id) {
|
||||
b->blk_addr = addr + m->pingpong[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_PINGPONG;
|
||||
return &m->pingpong[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
|
||||
struct dpu_hw_dither_cfg *cfg)
|
||||
{
|
||||
|
@ -91,7 +73,7 @@ static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
|
|||
DPU_REG_WRITE(c, base + PP_DITHER_EN, 1);
|
||||
}
|
||||
|
||||
static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
|
||||
static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp,
|
||||
struct dpu_hw_tear_check *te)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
|
@ -118,6 +100,8 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
|
|||
DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
|
||||
(te->start_pos + te->sync_threshold_start + 1));
|
||||
|
||||
DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -144,24 +128,7 @@ static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
|
|||
return !!((val & BIT(31)) >> 31);
|
||||
}
|
||||
|
||||
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
|
||||
u32 timeout_us)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 val;
|
||||
int rc;
|
||||
|
||||
if (!pp)
|
||||
return -EINVAL;
|
||||
|
||||
c = &pp->hw;
|
||||
rc = readl_poll_timeout(c->blk_addr + PP_LINE_COUNT,
|
||||
val, (val & 0xffff) >= 1, 10, timeout_us);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
|
||||
static int dpu_hw_pp_disable_te(struct dpu_hw_pingpong *pp)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
|
||||
|
@ -169,7 +136,7 @@ static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
|
|||
return -EINVAL;
|
||||
c = &pp->hw;
|
||||
|
||||
DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
|
||||
DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -245,6 +212,49 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
|
|||
return line;
|
||||
}
|
||||
|
||||
static void dpu_hw_pp_disable_autorefresh(struct dpu_hw_pingpong *pp,
|
||||
uint32_t encoder_id, u16 vdisplay)
|
||||
{
|
||||
struct dpu_hw_pp_vsync_info info;
|
||||
int trial = 0;
|
||||
|
||||
/* If autorefresh is already disabled, we have nothing to do */
|
||||
if (!dpu_hw_pp_get_autorefresh_config(pp, NULL))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If autorefresh is enabled, disable it and make sure it is safe to
|
||||
* proceed with current frame commit/push. Sequence followed is,
|
||||
* 1. Disable TE
|
||||
* 2. Disable autorefresh config
|
||||
* 4. Poll for frame transfer ongoing to be false
|
||||
* 5. Enable TE back
|
||||
*/
|
||||
|
||||
dpu_hw_pp_connect_external_te(pp, false);
|
||||
dpu_hw_pp_setup_autorefresh_config(pp, 0, false);
|
||||
|
||||
do {
|
||||
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
|
||||
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
|
||||
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
|
||||
DPU_ERROR("enc%d pp%d disable autorefresh failed\n",
|
||||
encoder_id, pp->idx - PINGPONG_0);
|
||||
break;
|
||||
}
|
||||
|
||||
trial++;
|
||||
|
||||
dpu_hw_pp_get_vsync_info(pp, &info);
|
||||
} while (info.wr_ptr_line_count > 0 &&
|
||||
info.wr_ptr_line_count < vdisplay);
|
||||
|
||||
dpu_hw_pp_connect_external_te(pp, true);
|
||||
|
||||
DPU_DEBUG("enc%d pp%d disabled autorefresh\n",
|
||||
encoder_id, pp->idx - PINGPONG_0);
|
||||
}
|
||||
|
||||
static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c = &pp->hw;
|
||||
|
@ -274,40 +284,37 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
|
|||
static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
|
||||
unsigned long features)
|
||||
{
|
||||
c->ops.setup_tearcheck = dpu_hw_pp_setup_te_config;
|
||||
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
|
||||
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
|
||||
c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
|
||||
c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
|
||||
c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
|
||||
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
|
||||
c->ops.get_line_count = dpu_hw_pp_get_line_count;
|
||||
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
|
||||
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
|
||||
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
|
||||
if (test_bit(DPU_PINGPONG_TE, &features)) {
|
||||
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
|
||||
c->ops.disable_tearcheck = dpu_hw_pp_disable_te;
|
||||
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
|
||||
c->ops.get_line_count = dpu_hw_pp_get_line_count;
|
||||
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
|
||||
}
|
||||
|
||||
if (test_bit(DPU_PINGPONG_DSC, &features)) {
|
||||
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
|
||||
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
|
||||
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
|
||||
}
|
||||
|
||||
if (test_bit(DPU_PINGPONG_DITHER, &features))
|
||||
c->ops.setup_dither = dpu_hw_pp_setup_dither;
|
||||
};
|
||||
|
||||
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_pingpong *c;
|
||||
const struct dpu_pingpong_cfg *cfg;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _pingpong_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
|
||||
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->caps = cfg;
|
||||
_setup_pingpong_ops(c, c->caps->features);
|
||||
|
||||
|
|
|
@ -13,28 +13,6 @@
|
|||
|
||||
struct dpu_hw_pingpong;
|
||||
|
||||
struct dpu_hw_tear_check {
|
||||
/*
|
||||
* This is ratio of MDP VSYNC clk freq(Hz) to
|
||||
* refresh rate divided by no of lines
|
||||
*/
|
||||
u32 vsync_count;
|
||||
u32 sync_cfg_height;
|
||||
u32 vsync_init_val;
|
||||
u32 sync_threshold_start;
|
||||
u32 sync_threshold_continue;
|
||||
u32 start_pos;
|
||||
u32 rd_ptr_irq;
|
||||
u8 hw_vsync_mode;
|
||||
};
|
||||
|
||||
struct dpu_hw_pp_vsync_info {
|
||||
u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
|
||||
u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
|
||||
u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
|
||||
u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_dither_cfg - dither feature structure
|
||||
* @flags: for customizing operations
|
||||
|
@ -59,11 +37,8 @@ struct dpu_hw_dither_cfg {
|
|||
*
|
||||
* struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
|
||||
* Assumption is these functions will be called after clocks are enabled
|
||||
* @setup_tearcheck : program tear check values
|
||||
* @enable_tearcheck : enables tear check
|
||||
* @get_vsync_info : retries timing info of the panel
|
||||
* @setup_autorefresh : configure and enable the autorefresh config
|
||||
* @get_autorefresh : retrieve autorefresh config from hardware
|
||||
* @enable_tearcheck: program and enable tear check block
|
||||
* @disable_tearcheck: disable able tear check block
|
||||
* @setup_dither : function to program the dither hw block
|
||||
* @get_line_count: obtain current vertical line counter
|
||||
*/
|
||||
|
@ -72,14 +47,13 @@ struct dpu_hw_pingpong_ops {
|
|||
* enables vysnc generation and sets up init value of
|
||||
* read pointer and programs the tear check cofiguration
|
||||
*/
|
||||
int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
|
||||
int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
|
||||
struct dpu_hw_tear_check *cfg);
|
||||
|
||||
/**
|
||||
* enables tear check block
|
||||
* disables tear check block
|
||||
*/
|
||||
int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
|
||||
bool enable);
|
||||
int (*disable_tearcheck)(struct dpu_hw_pingpong *pp);
|
||||
|
||||
/**
|
||||
* read, modify, write to either set or clear listening to external TE
|
||||
|
@ -88,36 +62,16 @@ struct dpu_hw_pingpong_ops {
|
|||
int (*connect_external_te)(struct dpu_hw_pingpong *pp,
|
||||
bool enable_external_te);
|
||||
|
||||
/**
|
||||
* provides the programmed and current
|
||||
* line_count
|
||||
*/
|
||||
int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
|
||||
struct dpu_hw_pp_vsync_info *info);
|
||||
|
||||
/**
|
||||
* configure and enable the autorefresh config
|
||||
*/
|
||||
void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
|
||||
u32 frame_count, bool enable);
|
||||
|
||||
/**
|
||||
* retrieve autorefresh config from hardware
|
||||
*/
|
||||
bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
|
||||
u32 *frame_count);
|
||||
|
||||
/**
|
||||
* poll until write pointer transmission starts
|
||||
* @Return: 0 on success, -ETIMEDOUT on timeout
|
||||
*/
|
||||
int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
|
||||
|
||||
/**
|
||||
* Obtain current vertical line counter
|
||||
*/
|
||||
u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
|
||||
|
||||
/**
|
||||
* Disable autorefresh if enabled
|
||||
*/
|
||||
void (*disable_autorefresh)(struct dpu_hw_pingpong *pp, uint32_t encoder_id, u16 vdisplay);
|
||||
|
||||
/**
|
||||
* Setup dither matix for pingpong block
|
||||
*/
|
||||
|
@ -165,16 +119,14 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
|
|||
}
|
||||
|
||||
/**
|
||||
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
|
||||
* pingpong idx.
|
||||
* @idx: Pingpong index for which driver object is required
|
||||
* dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
|
||||
* pingpong catalog entry.
|
||||
* @cfg: Pingpong catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @m: Pointer to mdss catalog data
|
||||
* Returns: Error code or allocated dpu_hw_pingpong context
|
||||
* Return: Error code or allocated dpu_hw_pingpong context
|
||||
*/
|
||||
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_pingpong_destroy - destroys pingpong driver context
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
|
||||
|
||||
/* DPU_SSPP_SRC */
|
||||
/* SSPP registers */
|
||||
#define SSPP_SRC_SIZE 0x00
|
||||
#define SSPP_SRC_XY 0x08
|
||||
#define SSPP_OUT_SIZE 0x0c
|
||||
|
@ -26,45 +26,18 @@
|
|||
#define SSPP_SRC_FORMAT 0x30
|
||||
#define SSPP_SRC_UNPACK_PATTERN 0x34
|
||||
#define SSPP_SRC_OP_MODE 0x38
|
||||
|
||||
/* SSPP_MULTIRECT*/
|
||||
#define SSPP_SRC_SIZE_REC1 0x16C
|
||||
#define SSPP_SRC_XY_REC1 0x168
|
||||
#define SSPP_OUT_SIZE_REC1 0x160
|
||||
#define SSPP_OUT_XY_REC1 0x164
|
||||
#define SSPP_SRC_FORMAT_REC1 0x174
|
||||
#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
|
||||
#define SSPP_SRC_OP_MODE_REC1 0x17C
|
||||
#define SSPP_MULTIRECT_OPMODE 0x170
|
||||
#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
|
||||
#define SSPP_EXCL_REC_SIZE_REC1 0x184
|
||||
#define SSPP_EXCL_REC_XY_REC1 0x188
|
||||
|
||||
#define MDSS_MDP_OP_DEINTERLACE BIT(22)
|
||||
#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
|
||||
#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
|
||||
#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
|
||||
#define MDSS_MDP_OP_IGC_EN BIT(16)
|
||||
#define MDSS_MDP_OP_FLIP_UD BIT(14)
|
||||
#define MDSS_MDP_OP_FLIP_LR BIT(13)
|
||||
#define MDSS_MDP_OP_BWC_EN BIT(0)
|
||||
#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
|
||||
#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
|
||||
#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
|
||||
#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
|
||||
|
||||
#define SSPP_SRC_CONSTANT_COLOR 0x3c
|
||||
#define SSPP_EXCL_REC_CTL 0x40
|
||||
#define SSPP_UBWC_STATIC_CTRL 0x44
|
||||
#define SSPP_FETCH_CONFIG 0x048
|
||||
#define SSPP_FETCH_CONFIG 0x48
|
||||
#define SSPP_DANGER_LUT 0x60
|
||||
#define SSPP_SAFE_LUT 0x64
|
||||
#define SSPP_CREQ_LUT 0x68
|
||||
#define SSPP_QOS_CTRL 0x6C
|
||||
#define SSPP_DECIMATION_CONFIG 0xB4
|
||||
#define SSPP_SRC_ADDR_SW_STATUS 0x70
|
||||
#define SSPP_CREQ_LUT_0 0x74
|
||||
#define SSPP_CREQ_LUT_1 0x78
|
||||
#define SSPP_DECIMATION_CONFIG 0xB4
|
||||
#define SSPP_SW_PIX_EXT_C0_LR 0x100
|
||||
#define SSPP_SW_PIX_EXT_C0_TB 0x104
|
||||
#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
|
||||
|
@ -81,11 +54,33 @@
|
|||
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
|
||||
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
|
||||
#define SSPP_TRAFFIC_SHAPER_REC1 0x158
|
||||
#define SSPP_OUT_SIZE_REC1 0x160
|
||||
#define SSPP_OUT_XY_REC1 0x164
|
||||
#define SSPP_SRC_XY_REC1 0x168
|
||||
#define SSPP_SRC_SIZE_REC1 0x16C
|
||||
#define SSPP_MULTIRECT_OPMODE 0x170
|
||||
#define SSPP_SRC_FORMAT_REC1 0x174
|
||||
#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
|
||||
#define SSPP_SRC_OP_MODE_REC1 0x17C
|
||||
#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
|
||||
#define SSPP_EXCL_REC_SIZE_REC1 0x184
|
||||
#define SSPP_EXCL_REC_XY_REC1 0x188
|
||||
#define SSPP_EXCL_REC_SIZE 0x1B4
|
||||
#define SSPP_EXCL_REC_XY 0x1B8
|
||||
#define SSPP_VIG_OP_MODE 0x0
|
||||
#define SSPP_VIG_CSC_10_OP_MODE 0x0
|
||||
#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
|
||||
|
||||
/* SSPP_SRC_OP_MODE & OP_MODE_REC1 */
|
||||
#define MDSS_MDP_OP_DEINTERLACE BIT(22)
|
||||
#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
|
||||
#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
|
||||
#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
|
||||
#define MDSS_MDP_OP_IGC_EN BIT(16)
|
||||
#define MDSS_MDP_OP_FLIP_UD BIT(14)
|
||||
#define MDSS_MDP_OP_FLIP_LR BIT(13)
|
||||
#define MDSS_MDP_OP_BWC_EN BIT(0)
|
||||
#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
|
||||
#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
|
||||
#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
|
||||
#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
|
||||
|
||||
/* SSPP_QOS_CTRL */
|
||||
#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
|
||||
|
@ -96,6 +91,7 @@
|
|||
#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
|
||||
|
||||
/* DPU_SSPP_SCALER_QSEED2 */
|
||||
#define SSPP_VIG_OP_MODE 0x0
|
||||
#define SCALE_CONFIG 0x04
|
||||
#define COMP0_3_PHASE_STEP_X 0x10
|
||||
#define COMP0_3_PHASE_STEP_Y 0x14
|
||||
|
@ -107,6 +103,9 @@
|
|||
#define COMP1_2_INIT_PHASE_Y 0x2C
|
||||
#define VIG_0_QSEED2_SHARP 0x30
|
||||
|
||||
/* SSPP_TRAFFIC_SHAPER and _REC1 */
|
||||
#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
|
||||
|
||||
/*
|
||||
* Definitions for ViG op modes
|
||||
*/
|
||||
|
@ -128,6 +127,7 @@
|
|||
/*
|
||||
* Definitions for CSC 10 op modes
|
||||
*/
|
||||
#define SSPP_VIG_CSC_10_OP_MODE 0x0
|
||||
#define VIG_CSC_10_SRC_DATAFMT BIT(1)
|
||||
#define VIG_CSC_10_EN BIT(0)
|
||||
#define CSC_10BIT_OFFSET 4
|
||||
|
@ -136,45 +136,12 @@
|
|||
#define TS_CLK 19200000
|
||||
|
||||
|
||||
static int _sspp_subblk_offset(struct dpu_hw_sspp *ctx,
|
||||
int s_id,
|
||||
u32 *idx)
|
||||
{
|
||||
int rc = 0;
|
||||
const struct dpu_sspp_sub_blks *sblk;
|
||||
|
||||
if (!ctx || !ctx->cap || !ctx->cap->sblk)
|
||||
return -EINVAL;
|
||||
|
||||
sblk = ctx->cap->sblk;
|
||||
|
||||
switch (s_id) {
|
||||
case DPU_SSPP_SRC:
|
||||
*idx = sblk->src_blk.base;
|
||||
break;
|
||||
case DPU_SSPP_SCALER_QSEED2:
|
||||
case DPU_SSPP_SCALER_QSEED3:
|
||||
case DPU_SSPP_SCALER_RGB:
|
||||
*idx = sblk->scaler_blk.base;
|
||||
break;
|
||||
case DPU_SSPP_CSC:
|
||||
case DPU_SSPP_CSC_10BIT:
|
||||
*idx = sblk->csc_blk.base;
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_multirect(struct dpu_sw_pipe *pipe)
|
||||
{
|
||||
struct dpu_hw_sspp *ctx = pipe->sspp;
|
||||
u32 mode_mask;
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
|
||||
|
@ -185,7 +152,7 @@ static void dpu_hw_sspp_setup_multirect(struct dpu_sw_pipe *pipe)
|
|||
*/
|
||||
mode_mask = 0;
|
||||
} else {
|
||||
mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
|
||||
mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE);
|
||||
mode_mask |= pipe->multirect_index;
|
||||
if (pipe->multirect_mode == DPU_SSPP_MULTIRECT_TIME_MX)
|
||||
mode_mask |= BIT(2);
|
||||
|
@ -193,46 +160,42 @@ static void dpu_hw_sspp_setup_multirect(struct dpu_sw_pipe *pipe)
|
|||
mode_mask &= ~BIT(2);
|
||||
}
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE, mode_mask);
|
||||
}
|
||||
|
||||
static void _sspp_setup_opmode(struct dpu_hw_sspp *ctx,
|
||||
u32 mask, u8 en)
|
||||
{
|
||||
u32 idx;
|
||||
const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
|
||||
u32 opmode;
|
||||
|
||||
if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
|
||||
_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
|
||||
!test_bit(DPU_SSPP_CSC, &ctx->cap->features))
|
||||
return;
|
||||
|
||||
opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
|
||||
opmode = DPU_REG_READ(&ctx->hw, sblk->scaler_blk.base + SSPP_VIG_OP_MODE);
|
||||
|
||||
if (en)
|
||||
opmode |= mask;
|
||||
else
|
||||
opmode &= ~mask;
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
|
||||
DPU_REG_WRITE(&ctx->hw, sblk->scaler_blk.base + SSPP_VIG_OP_MODE, opmode);
|
||||
}
|
||||
|
||||
static void _sspp_setup_csc10_opmode(struct dpu_hw_sspp *ctx,
|
||||
u32 mask, u8 en)
|
||||
{
|
||||
u32 idx;
|
||||
const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
|
||||
u32 opmode;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
|
||||
return;
|
||||
|
||||
opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
|
||||
opmode = DPU_REG_READ(&ctx->hw, sblk->csc_blk.base + SSPP_VIG_CSC_10_OP_MODE);
|
||||
if (en)
|
||||
opmode |= mask;
|
||||
else
|
||||
opmode &= ~mask;
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
|
||||
DPU_REG_WRITE(&ctx->hw, sblk->csc_blk.base + SSPP_VIG_CSC_10_OP_MODE, opmode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -247,9 +210,8 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
|
|||
u32 opmode = 0;
|
||||
u32 fast_clear = 0;
|
||||
u32 op_mode_off, unpack_pat_off, format_off;
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
|
||||
if (!ctx || !fmt)
|
||||
return;
|
||||
|
||||
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
|
||||
|
@ -264,7 +226,7 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
|
|||
}
|
||||
|
||||
c = &ctx->hw;
|
||||
opmode = DPU_REG_READ(c, op_mode_off + idx);
|
||||
opmode = DPU_REG_READ(c, op_mode_off);
|
||||
opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
|
||||
MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
|
||||
|
||||
|
@ -352,12 +314,12 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
|
|||
VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
|
||||
DPU_FORMAT_IS_YUV(fmt));
|
||||
|
||||
DPU_REG_WRITE(c, format_off + idx, src_format);
|
||||
DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
|
||||
DPU_REG_WRITE(c, op_mode_off + idx, opmode);
|
||||
DPU_REG_WRITE(c, format_off, src_format);
|
||||
DPU_REG_WRITE(c, unpack_pat_off, unpack);
|
||||
DPU_REG_WRITE(c, op_mode_off, opmode);
|
||||
|
||||
/* clear previous UBWC error */
|
||||
DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
|
||||
DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS, BIT(31));
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx,
|
||||
|
@ -368,9 +330,8 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx,
|
|||
u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
|
||||
const u32 bytemask = 0xff;
|
||||
const u32 shortmask = 0xffff;
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
|
||||
if (!ctx || !pe_ext)
|
||||
return;
|
||||
|
||||
c = &ctx->hw;
|
||||
|
@ -400,21 +361,21 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx,
|
|||
}
|
||||
|
||||
/* color 0 */
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR, lr_pe[0]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB, tb_pe[0]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS,
|
||||
tot_req_pixels[0]);
|
||||
|
||||
/* color 1 and color 2 */
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR, lr_pe[1]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB, tb_pe[1]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS,
|
||||
tot_req_pixels[1]);
|
||||
|
||||
/* color 3 */
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR, lr_pe[3]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB, lr_pe[3]);
|
||||
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS,
|
||||
tot_req_pixels[3]);
|
||||
}
|
||||
|
||||
|
@ -422,25 +383,22 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx,
|
|||
struct dpu_hw_scaler3_cfg *scaler3_cfg,
|
||||
const struct dpu_format *format)
|
||||
{
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx)
|
||||
|| !scaler3_cfg)
|
||||
if (!ctx || !scaler3_cfg)
|
||||
return;
|
||||
|
||||
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
|
||||
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg,
|
||||
ctx->cap->sblk->scaler_blk.base,
|
||||
ctx->cap->sblk->scaler_blk.version,
|
||||
format);
|
||||
}
|
||||
|
||||
static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx)
|
||||
{
|
||||
u32 idx;
|
||||
|
||||
if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
|
||||
return dpu_hw_get_scaler3_ver(&ctx->hw,
|
||||
ctx->cap->sblk->scaler_blk.base);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -453,9 +411,8 @@ static void dpu_hw_sspp_setup_rects(struct dpu_sw_pipe *pipe,
|
|||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 src_size, src_xy, dst_size, dst_xy;
|
||||
u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
|
||||
if (!ctx || !cfg)
|
||||
return;
|
||||
|
||||
c = &ctx->hw;
|
||||
|
@ -483,10 +440,10 @@ static void dpu_hw_sspp_setup_rects(struct dpu_sw_pipe *pipe,
|
|||
drm_rect_width(&cfg->dst_rect);
|
||||
|
||||
/* rectangle register programming */
|
||||
DPU_REG_WRITE(c, src_size_off + idx, src_size);
|
||||
DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
|
||||
DPU_REG_WRITE(c, out_size_off + idx, dst_size);
|
||||
DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
|
||||
DPU_REG_WRITE(c, src_size_off, src_size);
|
||||
DPU_REG_WRITE(c, src_xy_off, src_xy);
|
||||
DPU_REG_WRITE(c, out_size_off, dst_size);
|
||||
DPU_REG_WRITE(c, out_xy_off, dst_xy);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_sourceaddress(struct dpu_sw_pipe *pipe,
|
||||
|
@ -495,24 +452,23 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_sw_pipe *pipe,
|
|||
struct dpu_hw_sspp *ctx = pipe->sspp;
|
||||
u32 ystride0, ystride1;
|
||||
int i;
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
|
||||
for (i = 0; i < ARRAY_SIZE(layout->plane_addr); i++)
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + i * 0x4,
|
||||
layout->plane_addr[i]);
|
||||
} else if (pipe->multirect_index == DPU_SSPP_RECT_0) {
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR,
|
||||
layout->plane_addr[0]);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR,
|
||||
layout->plane_addr[2]);
|
||||
} else {
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR,
|
||||
layout->plane_addr[0]);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR,
|
||||
layout->plane_addr[2]);
|
||||
}
|
||||
|
||||
|
@ -522,8 +478,8 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_sw_pipe *pipe,
|
|||
ystride1 = (layout->plane_pitch[2]) |
|
||||
(layout->plane_pitch[3] << 16);
|
||||
} else {
|
||||
ystride0 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE0 + idx);
|
||||
ystride1 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE1 + idx);
|
||||
ystride0 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE0);
|
||||
ystride1 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE1);
|
||||
|
||||
if (pipe->multirect_index == DPU_SSPP_RECT_0) {
|
||||
ystride0 = (ystride0 & 0xFFFF0000) |
|
||||
|
@ -540,34 +496,35 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_sw_pipe *pipe,
|
|||
}
|
||||
}
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE0 + idx, ystride0);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE1 + idx, ystride1);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE0, ystride0);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE1, ystride1);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_csc(struct dpu_hw_sspp *ctx,
|
||||
const struct dpu_csc_cfg *data)
|
||||
{
|
||||
u32 idx;
|
||||
u32 offset;
|
||||
bool csc10 = false;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
|
||||
if (!ctx || !data)
|
||||
return;
|
||||
|
||||
offset = ctx->cap->sblk->csc_blk.base;
|
||||
|
||||
if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
|
||||
idx += CSC_10BIT_OFFSET;
|
||||
offset += CSC_10BIT_OFFSET;
|
||||
csc10 = true;
|
||||
}
|
||||
|
||||
dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
|
||||
dpu_hw_csc_setup(&ctx->hw, offset, data, csc10);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_solidfill(struct dpu_sw_pipe *pipe, u32 color)
|
||||
{
|
||||
struct dpu_hw_sspp *ctx = pipe->sspp;
|
||||
struct dpu_hw_fmt_layout cfg;
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
/* cleanup source addresses */
|
||||
|
@ -576,79 +533,41 @@ static void dpu_hw_sspp_setup_solidfill(struct dpu_sw_pipe *pipe, u32 color)
|
|||
|
||||
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
|
||||
pipe->multirect_index == DPU_SSPP_RECT_0)
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR, color);
|
||||
else
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1,
|
||||
color);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_sspp *ctx,
|
||||
u32 danger_lut,
|
||||
u32 safe_lut)
|
||||
static void dpu_hw_sspp_setup_qos_lut(struct dpu_hw_sspp *ctx,
|
||||
struct dpu_hw_qos_cfg *cfg)
|
||||
{
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
return;
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_sspp *ctx,
|
||||
u64 creq_lut)
|
||||
{
|
||||
u32 idx;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
return;
|
||||
|
||||
if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut);
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
|
||||
creq_lut >> 32);
|
||||
} else {
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut);
|
||||
}
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_sspp *ctx,
|
||||
struct dpu_hw_pipe_qos_cfg *cfg)
|
||||
{
|
||||
u32 idx;
|
||||
u32 qos_ctrl = 0;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
return;
|
||||
|
||||
if (cfg->vblank_en) {
|
||||
qos_ctrl |= ((cfg->creq_vblank &
|
||||
SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
|
||||
SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
|
||||
qos_ctrl |= ((cfg->danger_vblank &
|
||||
SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
|
||||
SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
|
||||
qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
|
||||
}
|
||||
|
||||
if (cfg->danger_safe_en)
|
||||
qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
|
||||
struct dpu_hw_cdp_cfg *cfg)
|
||||
{
|
||||
struct dpu_hw_sspp *ctx = pipe->sspp;
|
||||
u32 idx;
|
||||
u32 cdp_cntl = 0;
|
||||
u32 cdp_cntl_offset = 0;
|
||||
|
||||
if (!ctx || !cfg)
|
||||
return;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
|
||||
_dpu_hw_setup_qos_lut(&ctx->hw, SSPP_DANGER_LUT,
|
||||
test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features),
|
||||
cfg);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_sspp *ctx,
|
||||
bool danger_safe_en)
|
||||
{
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL,
|
||||
danger_safe_en ? SSPP_QOS_CTRL_DANGER_SAFE_EN : 0);
|
||||
}
|
||||
|
||||
static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
|
||||
const struct dpu_format *fmt,
|
||||
bool enable)
|
||||
{
|
||||
struct dpu_hw_sspp *ctx = pipe->sspp;
|
||||
u32 cdp_cntl_offset = 0;
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
|
||||
|
@ -657,33 +576,20 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
|
|||
else
|
||||
cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
|
||||
|
||||
if (cfg->enable)
|
||||
cdp_cntl |= BIT(0);
|
||||
if (cfg->ubwc_meta_enable)
|
||||
cdp_cntl |= BIT(1);
|
||||
if (cfg->tile_amortize_enable)
|
||||
cdp_cntl |= BIT(2);
|
||||
if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
|
||||
cdp_cntl |= BIT(3);
|
||||
|
||||
DPU_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
|
||||
dpu_setup_cdp(&ctx->hw, cdp_cntl_offset, fmt, enable);
|
||||
}
|
||||
|
||||
static void _setup_layer_ops(struct dpu_hw_sspp *c,
|
||||
unsigned long features)
|
||||
{
|
||||
if (test_bit(DPU_SSPP_SRC, &features)) {
|
||||
c->ops.setup_format = dpu_hw_sspp_setup_format;
|
||||
c->ops.setup_rects = dpu_hw_sspp_setup_rects;
|
||||
c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
|
||||
c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
|
||||
c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
|
||||
}
|
||||
c->ops.setup_format = dpu_hw_sspp_setup_format;
|
||||
c->ops.setup_rects = dpu_hw_sspp_setup_rects;
|
||||
c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
|
||||
c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
|
||||
c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
|
||||
|
||||
if (test_bit(DPU_SSPP_QOS, &features)) {
|
||||
c->ops.setup_danger_safe_lut =
|
||||
dpu_hw_sspp_setup_danger_safe_lut;
|
||||
c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
|
||||
c->ops.setup_qos_lut = dpu_hw_sspp_setup_qos_lut;
|
||||
c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
|
||||
}
|
||||
|
||||
|
@ -728,8 +634,8 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
|
|||
/* add register dump support */
|
||||
dpu_debugfs_create_regset32("src_blk", 0400,
|
||||
debugfs_root,
|
||||
sblk->src_blk.base + cfg->base,
|
||||
sblk->src_blk.len,
|
||||
cfg->base,
|
||||
cfg->len,
|
||||
kms);
|
||||
|
||||
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
|
||||
|
@ -758,63 +664,29 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
|
|||
0400,
|
||||
debugfs_root,
|
||||
(u32 *) &cfg->clk_ctrl);
|
||||
debugfs_create_x32("creq_vblank",
|
||||
0600,
|
||||
debugfs_root,
|
||||
(u32 *) &sblk->creq_vblank);
|
||||
debugfs_create_x32("danger_vblank",
|
||||
0600,
|
||||
debugfs_root,
|
||||
(u32 *) &sblk->danger_vblank);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *catalog,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
if ((sspp < SSPP_MAX) && catalog && addr && b) {
|
||||
for (i = 0; i < catalog->sspp_count; i++) {
|
||||
if (sspp == catalog->sspp[i].id) {
|
||||
b->blk_addr = addr + catalog->sspp[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_SSPP;
|
||||
return &catalog->sspp[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
struct dpu_hw_sspp *dpu_hw_sspp_init(enum dpu_sspp idx,
|
||||
void __iomem *addr, const struct dpu_mdss_cfg *catalog)
|
||||
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
|
||||
void __iomem *addr, const struct dpu_ubwc_cfg *ubwc)
|
||||
{
|
||||
struct dpu_hw_sspp *hw_pipe;
|
||||
const struct dpu_sspp_cfg *cfg;
|
||||
|
||||
if (!addr || !catalog)
|
||||
if (!addr || !ubwc)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
|
||||
if (!hw_pipe)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(hw_pipe);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
hw_pipe->hw.blk_addr = addr + cfg->base;
|
||||
hw_pipe->hw.log_mask = DPU_DBG_MASK_SSPP;
|
||||
|
||||
/* Assign ops */
|
||||
hw_pipe->catalog = catalog;
|
||||
hw_pipe->ubwc = catalog->ubwc;
|
||||
hw_pipe->idx = idx;
|
||||
hw_pipe->ubwc = ubwc;
|
||||
hw_pipe->idx = cfg->id;
|
||||
hw_pipe->cap = cfg;
|
||||
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
|
||||
|
||||
|
|
|
@ -163,28 +163,6 @@ struct dpu_sw_pipe_cfg {
|
|||
struct drm_rect dst_rect;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
|
||||
* @creq_vblank: creq value generated to vbif during vertical blanking
|
||||
* @danger_vblank: danger value generated during vertical blanking
|
||||
* @vblank_en: enable creq_vblank and danger_vblank during vblank
|
||||
* @danger_safe_en: enable danger safe generation
|
||||
*/
|
||||
struct dpu_hw_pipe_qos_cfg {
|
||||
u32 creq_vblank;
|
||||
u32 danger_vblank;
|
||||
bool vblank_en;
|
||||
bool danger_safe_en;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum CDP preload ahead address size
|
||||
*/
|
||||
enum {
|
||||
DPU_SSPP_CDP_PRELOAD_AHEAD_32,
|
||||
DPU_SSPP_CDP_PRELOAD_AHEAD_64
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
|
||||
* @size: size to prefill in bytes, or zero to disable
|
||||
|
@ -276,34 +254,22 @@ struct dpu_hw_sspp_ops {
|
|||
void (*setup_sharpening)(struct dpu_hw_sspp *ctx,
|
||||
struct dpu_hw_sharp_cfg *cfg);
|
||||
|
||||
/**
|
||||
* setup_danger_safe_lut - setup danger safe LUTs
|
||||
* @ctx: Pointer to pipe context
|
||||
* @danger_lut: LUT for generate danger level based on fill level
|
||||
* @safe_lut: LUT for generate safe level based on fill level
|
||||
*
|
||||
*/
|
||||
void (*setup_danger_safe_lut)(struct dpu_hw_sspp *ctx,
|
||||
u32 danger_lut,
|
||||
u32 safe_lut);
|
||||
|
||||
/**
|
||||
* setup_creq_lut - setup CREQ LUT
|
||||
* setup_qos_lut - setup QoS LUTs
|
||||
* @ctx: Pointer to pipe context
|
||||
* @creq_lut: LUT for generate creq level based on fill level
|
||||
*
|
||||
* @cfg: LUT configuration
|
||||
*/
|
||||
void (*setup_creq_lut)(struct dpu_hw_sspp *ctx,
|
||||
u64 creq_lut);
|
||||
void (*setup_qos_lut)(struct dpu_hw_sspp *ctx,
|
||||
struct dpu_hw_qos_cfg *cfg);
|
||||
|
||||
/**
|
||||
* setup_qos_ctrl - setup QoS control
|
||||
* @ctx: Pointer to pipe context
|
||||
* @cfg: Pointer to pipe QoS configuration
|
||||
*
|
||||
* @danger_safe_en: flags controlling enabling of danger/safe QoS/LUT
|
||||
*/
|
||||
void (*setup_qos_ctrl)(struct dpu_hw_sspp *ctx,
|
||||
struct dpu_hw_pipe_qos_cfg *cfg);
|
||||
bool danger_safe_en);
|
||||
|
||||
/**
|
||||
* setup_histogram - setup histograms
|
||||
|
@ -331,18 +297,19 @@ struct dpu_hw_sspp_ops {
|
|||
/**
|
||||
* setup_cdp - setup client driven prefetch
|
||||
* @pipe: Pointer to software pipe context
|
||||
* @cfg: Pointer to cdp configuration
|
||||
* @fmt: format used by the sw pipe
|
||||
* @enable: whether the CDP should be enabled for this pipe
|
||||
*/
|
||||
void (*setup_cdp)(struct dpu_sw_pipe *pipe,
|
||||
struct dpu_hw_cdp_cfg *cfg);
|
||||
const struct dpu_format *fmt,
|
||||
bool enable);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_sspp - pipe description
|
||||
* @base: hardware block base structure
|
||||
* @hw: block hardware details
|
||||
* @catalog: back pointer to catalog
|
||||
* @ubwc: ubwc configuration data
|
||||
* @ubwc: UBWC configuration data
|
||||
* @idx: pipe index
|
||||
* @cap: pointer to layer_cfg
|
||||
* @ops: pointer to operations possible for this pipe
|
||||
|
@ -350,7 +317,6 @@ struct dpu_hw_sspp_ops {
|
|||
struct dpu_hw_sspp {
|
||||
struct dpu_hw_blk base;
|
||||
struct dpu_hw_blk_reg_map hw;
|
||||
const struct dpu_mdss_cfg *catalog;
|
||||
const struct dpu_ubwc_cfg *ubwc;
|
||||
|
||||
/* Pipe */
|
||||
|
@ -363,14 +329,14 @@ struct dpu_hw_sspp {
|
|||
|
||||
struct dpu_kms;
|
||||
/**
|
||||
* dpu_hw_sspp_init - initializes the sspp hw driver object.
|
||||
* dpu_hw_sspp_init() - Initializes the sspp hw driver object.
|
||||
* Should be called once before accessing every pipe.
|
||||
* @idx: Pipe index for which driver object is required
|
||||
* @cfg: Pipe catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @catalog : Pointer to mdss catalog data
|
||||
* @ubwc: UBWC configuration data
|
||||
*/
|
||||
struct dpu_hw_sspp *dpu_hw_sspp_init(enum dpu_sspp idx,
|
||||
void __iomem *addr, const struct dpu_mdss_cfg *catalog);
|
||||
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
|
||||
void __iomem *addr, const struct dpu_ubwc_cfg *ubwc);
|
||||
|
||||
/**
|
||||
* dpu_hw_sspp_destroy(): Destroys SSPP driver context
|
||||
|
|
|
@ -130,24 +130,12 @@ static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
|
|||
struct dpu_vsync_source_cfg *cfg)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
|
||||
static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
|
||||
u32 reg, wd_load_value, wd_ctl, wd_ctl2;
|
||||
|
||||
if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
|
||||
if (!mdp || !cfg)
|
||||
return;
|
||||
|
||||
c = &mdp->hw;
|
||||
reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
|
||||
for (i = 0; i < cfg->pp_count; i++) {
|
||||
int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
|
||||
|
||||
if (pp_idx >= ARRAY_SIZE(pp_offset))
|
||||
continue;
|
||||
|
||||
reg &= ~(0xf << pp_offset[pp_idx]);
|
||||
reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
|
||||
}
|
||||
DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
|
||||
|
||||
if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
|
||||
cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
|
||||
|
@ -194,6 +182,33 @@ static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
|
|||
}
|
||||
}
|
||||
|
||||
static void dpu_hw_setup_vsync_source_and_vsync_sel(struct dpu_hw_mdp *mdp,
|
||||
struct dpu_vsync_source_cfg *cfg)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 reg, i;
|
||||
static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
|
||||
|
||||
if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
|
||||
return;
|
||||
|
||||
c = &mdp->hw;
|
||||
|
||||
reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
|
||||
for (i = 0; i < cfg->pp_count; i++) {
|
||||
int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
|
||||
|
||||
if (pp_idx >= ARRAY_SIZE(pp_offset))
|
||||
continue;
|
||||
|
||||
reg &= ~(0xf << pp_offset[pp_idx]);
|
||||
reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
|
||||
}
|
||||
DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
|
||||
|
||||
dpu_hw_setup_vsync_source(mdp, cfg);
|
||||
}
|
||||
|
||||
static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
|
||||
struct dpu_danger_safe_status *status)
|
||||
{
|
||||
|
@ -241,7 +256,12 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
|
|||
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
|
||||
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
|
||||
ops->get_danger_status = dpu_hw_get_danger_status;
|
||||
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
|
||||
|
||||
if (cap & BIT(DPU_MDP_VSYNC_SEL))
|
||||
ops->setup_vsync_source = dpu_hw_setup_vsync_source_and_vsync_sel;
|
||||
else
|
||||
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
|
||||
|
||||
ops->get_safe_status = dpu_hw_get_safe_status;
|
||||
|
||||
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
|
||||
|
|
|
@ -73,6 +73,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
|
|||
#define QSEED3LITE_SEP_LUT_SIZE \
|
||||
(QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
|
||||
|
||||
/* QOS_LUT */
|
||||
#define QOS_DANGER_LUT 0x00
|
||||
#define QOS_SAFE_LUT 0x04
|
||||
#define QOS_CREQ_LUT 0x08
|
||||
#define QOS_QOS_CTRL 0x0C
|
||||
#define QOS_CREQ_LUT_0 0x14
|
||||
#define QOS_CREQ_LUT_1 0x18
|
||||
|
||||
/* QOS_QOS_CTRL */
|
||||
#define QOS_QOS_CTRL_DANGER_SAFE_EN BIT(0)
|
||||
#define QOS_QOS_CTRL_DANGER_VBLANK_MASK GENMASK(5, 4)
|
||||
#define QOS_QOS_CTRL_VBLANK_EN BIT(16)
|
||||
#define QOS_QOS_CTRL_CREQ_VBLANK_MASK GENMASK(21, 20)
|
||||
|
||||
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
|
||||
u32 reg_off,
|
||||
|
@ -450,6 +463,24 @@ u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
|
||||
bool qos_8lvl,
|
||||
const struct dpu_hw_qos_cfg *cfg)
|
||||
{
|
||||
DPU_REG_WRITE(c, offset + QOS_DANGER_LUT, cfg->danger_lut);
|
||||
DPU_REG_WRITE(c, offset + QOS_SAFE_LUT, cfg->safe_lut);
|
||||
|
||||
if (qos_8lvl) {
|
||||
DPU_REG_WRITE(c, offset + QOS_CREQ_LUT_0, cfg->creq_lut);
|
||||
DPU_REG_WRITE(c, offset + QOS_CREQ_LUT_1, cfg->creq_lut >> 32);
|
||||
} else {
|
||||
DPU_REG_WRITE(c, offset + QOS_CREQ_LUT, cfg->creq_lut);
|
||||
}
|
||||
|
||||
DPU_REG_WRITE(c, offset + QOS_QOS_CTRL,
|
||||
cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0);
|
||||
}
|
||||
|
||||
void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
|
||||
u32 misr_ctrl_offset,
|
||||
bool enable, u32 frame_count)
|
||||
|
@ -494,3 +525,24 @@ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CDP_ENABLE BIT(0)
|
||||
#define CDP_UBWC_META_ENABLE BIT(1)
|
||||
#define CDP_TILE_AMORTIZE_ENABLE BIT(2)
|
||||
#define CDP_PRELOAD_AHEAD_64 BIT(3)
|
||||
|
||||
void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
|
||||
const struct dpu_format *fmt, bool enable)
|
||||
{
|
||||
u32 cdp_cntl = CDP_PRELOAD_AHEAD_64;
|
||||
|
||||
if (enable)
|
||||
cdp_cntl |= CDP_ENABLE;
|
||||
if (DPU_FORMAT_IS_UBWC(fmt))
|
||||
cdp_cntl |= CDP_UBWC_META_ENABLE;
|
||||
if (DPU_FORMAT_IS_UBWC(fmt) ||
|
||||
DPU_FORMAT_IS_TILE(fmt))
|
||||
cdp_cntl |= CDP_TILE_AMORTIZE_ENABLE;
|
||||
|
||||
DPU_REG_WRITE(c, offset, cdp_cntl);
|
||||
}
|
||||
|
|
|
@ -306,19 +306,20 @@ struct dpu_drm_scaler_v2 {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_cdp_cfg : CDP configuration
|
||||
* @enable: true to enable CDP
|
||||
* @ubwc_meta_enable: true to enable ubwc metadata preload
|
||||
* @tile_amortize_enable: true to enable amortization control for tile format
|
||||
* @preload_ahead: number of request to preload ahead
|
||||
* DPU_*_CDP_PRELOAD_AHEAD_32,
|
||||
* DPU_*_CDP_PRELOAD_AHEAD_64
|
||||
* struct dpu_hw_qos_cfg: pipe QoS configuration
|
||||
* @danger_lut: LUT for generate danger level based on fill level
|
||||
* @safe_lut: LUT for generate safe level based on fill level
|
||||
* @creq_lut: LUT for generate creq level based on fill level
|
||||
* @creq_vblank: creq value generated to vbif during vertical blanking
|
||||
* @danger_vblank: danger value generated during vertical blanking
|
||||
* @vblank_en: enable creq_vblank and danger_vblank during vblank
|
||||
* @danger_safe_en: enable danger safe generation
|
||||
*/
|
||||
struct dpu_hw_cdp_cfg {
|
||||
bool enable;
|
||||
bool ubwc_meta_enable;
|
||||
bool tile_amortize_enable;
|
||||
u32 preload_ahead;
|
||||
struct dpu_hw_qos_cfg {
|
||||
u32 danger_lut;
|
||||
u32 safe_lut;
|
||||
u64 creq_lut;
|
||||
bool danger_safe_en;
|
||||
};
|
||||
|
||||
u32 *dpu_hw_util_get_log_mask_ptr(void);
|
||||
|
@ -346,9 +347,16 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
|
|||
u32 csc_reg_off,
|
||||
const struct dpu_csc_cfg *data, bool csc10);
|
||||
|
||||
void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
|
||||
const struct dpu_format *fmt, bool enable);
|
||||
|
||||
u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
|
||||
u32 total_fl);
|
||||
|
||||
void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
|
||||
bool qos_8lvl,
|
||||
const struct dpu_hw_qos_cfg *cfg);
|
||||
|
||||
void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
|
||||
u32 misr_ctrl_offset,
|
||||
bool enable,
|
||||
|
|
|
@ -211,45 +211,22 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
|
|||
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
|
||||
}
|
||||
|
||||
static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->vbif_count; i++) {
|
||||
if (vbif == m->vbif[i].id) {
|
||||
b->blk_addr = addr + m->vbif[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_VBIF;
|
||||
return &m->vbif[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_vbif *c;
|
||||
const struct dpu_vbif_cfg *cfg;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _top_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_VBIF;
|
||||
|
||||
/*
|
||||
* Assign ops
|
||||
*/
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->cap = cfg;
|
||||
_setup_vbif_ops(&c->ops, c->cap->features);
|
||||
|
||||
|
|
|
@ -106,14 +106,13 @@ struct dpu_hw_vbif {
|
|||
};
|
||||
|
||||
/**
|
||||
* dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
|
||||
* @idx: Interface index for which driver object is required
|
||||
* dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
|
||||
* VBIF catalog entry.
|
||||
* @cfg: VBIF catalog entry for which driver object is required
|
||||
* @addr: Mapped register io address of MDSS
|
||||
* @m: Pointer to mdss catalog data
|
||||
*/
|
||||
struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
|
||||
|
||||
|
|
|
@ -49,25 +49,6 @@
|
|||
#define WB_OUT_IMAGE_SIZE 0x2C0
|
||||
#define WB_OUT_XY 0x2C4
|
||||
|
||||
/* WB_QOS_CTRL */
|
||||
#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
|
||||
|
||||
static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
|
||||
const struct dpu_mdss_cfg *m, void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->wb_count; i++) {
|
||||
if (wb == m->wb[i].id) {
|
||||
b->blk_addr = addr + m->wb[i].base;
|
||||
b->log_mask = DPU_DBG_MASK_WB;
|
||||
return &m->wb[i];
|
||||
}
|
||||
}
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
|
||||
struct dpu_hw_wb_cfg *data)
|
||||
{
|
||||
|
@ -151,58 +132,29 @@ static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
|
|||
}
|
||||
|
||||
static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
|
||||
struct dpu_hw_wb_qos_cfg *cfg)
|
||||
struct dpu_hw_qos_cfg *cfg)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c = &ctx->hw;
|
||||
u32 qos_ctrl = 0;
|
||||
|
||||
if (!ctx || !cfg)
|
||||
return;
|
||||
|
||||
DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
|
||||
DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
|
||||
|
||||
/*
|
||||
* for chipsets not using DPU_WB_QOS_8LVL but still using DPU
|
||||
* driver such as msm8998, the reset value of WB_CREQ_LUT is
|
||||
* sufficient for writeback to work. SW doesn't need to explicitly
|
||||
* program a value.
|
||||
*/
|
||||
if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) {
|
||||
DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
|
||||
DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
|
||||
}
|
||||
|
||||
if (cfg->danger_safe_en)
|
||||
qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
|
||||
|
||||
DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
|
||||
_dpu_hw_setup_qos_lut(&ctx->hw, WB_DANGER_LUT,
|
||||
test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features),
|
||||
cfg);
|
||||
}
|
||||
|
||||
static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
|
||||
struct dpu_hw_cdp_cfg *cfg)
|
||||
const struct dpu_format *fmt,
|
||||
bool enable)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 cdp_cntl = 0;
|
||||
|
||||
if (!ctx || !cfg)
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
c = &ctx->hw;
|
||||
|
||||
if (cfg->enable)
|
||||
cdp_cntl |= BIT(0);
|
||||
if (cfg->ubwc_meta_enable)
|
||||
cdp_cntl |= BIT(1);
|
||||
if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64)
|
||||
cdp_cntl |= BIT(3);
|
||||
|
||||
DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
|
||||
dpu_setup_cdp(&ctx->hw, WB_CDP_CNTL, fmt, enable);
|
||||
}
|
||||
|
||||
static void dpu_hw_wb_bind_pingpong_blk(
|
||||
struct dpu_hw_wb *ctx,
|
||||
bool enable, const enum dpu_pingpong pp)
|
||||
const enum dpu_pingpong pp)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
int mux_cfg;
|
||||
|
@ -215,7 +167,7 @@ static void dpu_hw_wb_bind_pingpong_blk(
|
|||
mux_cfg = DPU_REG_READ(c, WB_MUX);
|
||||
mux_cfg &= ~0xf;
|
||||
|
||||
if (enable)
|
||||
if (pp)
|
||||
mux_cfg |= (pp - PINGPONG_0) & 0x7;
|
||||
else
|
||||
mux_cfg |= 0xf;
|
||||
|
@ -242,29 +194,23 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
|
|||
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
|
||||
}
|
||||
|
||||
struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
|
||||
void __iomem *addr, const struct dpu_mdss_cfg *m)
|
||||
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
|
||||
void __iomem *addr)
|
||||
{
|
||||
struct dpu_hw_wb *c;
|
||||
const struct dpu_wb_cfg *cfg;
|
||||
|
||||
if (!addr || !m)
|
||||
if (!addr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _wb_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR(cfg)) {
|
||||
WARN(1, "Unable to find wb idx=%d\n", idx);
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
c->hw.blk_addr = addr + cfg->base;
|
||||
c->hw.log_mask = DPU_DBG_MASK_WB;
|
||||
|
||||
/* Assign ops */
|
||||
c->mdp = &m->mdp[0];
|
||||
c->idx = idx;
|
||||
c->idx = cfg->id;
|
||||
c->caps = cfg;
|
||||
_setup_wb_ops(&c->ops, c->caps->features);
|
||||
|
||||
|
|
|
@ -21,28 +21,6 @@ struct dpu_hw_wb_cfg {
|
|||
struct drm_rect crop;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum CDP preload ahead address size
|
||||
*/
|
||||
enum {
|
||||
DPU_WB_CDP_PRELOAD_AHEAD_32,
|
||||
DPU_WB_CDP_PRELOAD_AHEAD_64
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration
|
||||
* @danger_lut: LUT for generate danger level based on fill level
|
||||
* @safe_lut: LUT for generate safe level based on fill level
|
||||
* @creq_lut: LUT for generate creq level based on fill level
|
||||
* @danger_safe_en: enable danger safe generation
|
||||
*/
|
||||
struct dpu_hw_wb_qos_cfg {
|
||||
u32 danger_lut;
|
||||
u32 safe_lut;
|
||||
u64 creq_lut;
|
||||
bool danger_safe_en;
|
||||
};
|
||||
|
||||
/**
|
||||
*
|
||||
* struct dpu_hw_wb_ops : Interface to the wb hw driver functions
|
||||
|
@ -64,27 +42,25 @@ struct dpu_hw_wb_ops {
|
|||
struct dpu_hw_wb_cfg *wb);
|
||||
|
||||
void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
|
||||
struct dpu_hw_wb_qos_cfg *cfg);
|
||||
struct dpu_hw_qos_cfg *cfg);
|
||||
|
||||
void (*setup_cdp)(struct dpu_hw_wb *ctx,
|
||||
struct dpu_hw_cdp_cfg *cfg);
|
||||
const struct dpu_format *fmt,
|
||||
bool enable);
|
||||
|
||||
void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
|
||||
bool enable, const enum dpu_pingpong pp);
|
||||
const enum dpu_pingpong pp);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_hw_wb : WB driver object
|
||||
* @hw: block hardware details
|
||||
* @mdp: pointer to associated mdp portion of the catalog
|
||||
* @idx: hardware index number within type
|
||||
* @wb_hw_caps: hardware capabilities
|
||||
* @ops: function pointers
|
||||
* @hw_mdp: MDP top level hardware block
|
||||
*/
|
||||
struct dpu_hw_wb {
|
||||
struct dpu_hw_blk_reg_map hw;
|
||||
const struct dpu_mdp_cfg *mdp;
|
||||
|
||||
/* wb path */
|
||||
int idx;
|
||||
|
@ -92,19 +68,16 @@ struct dpu_hw_wb {
|
|||
|
||||
/* ops */
|
||||
struct dpu_hw_wb_ops ops;
|
||||
|
||||
struct dpu_hw_mdp *hw_mdp;
|
||||
};
|
||||
|
||||
/**
|
||||
* dpu_hw_wb_init(): Initializes and return writeback hw driver object.
|
||||
* @idx: wb_path index for which driver object is required
|
||||
* dpu_hw_wb_init() - Initializes the writeback hw driver object.
|
||||
* @cfg: wb_path catalog entry for which driver object is required
|
||||
* @addr: mapped register io address of MDP
|
||||
* @m : pointer to mdss catalog data
|
||||
* Return: Error code or allocated dpu_hw_wb context
|
||||
*/
|
||||
struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
|
||||
void __iomem *addr);
|
||||
|
||||
/**
|
||||
* dpu_hw_wb_destroy(): Destroy writeback hw driver object.
|
||||
|
|
|
@ -57,8 +57,8 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
|
|||
static int _dpu_danger_signal_status(struct seq_file *s,
|
||||
bool danger_status)
|
||||
{
|
||||
struct dpu_kms *kms = (struct dpu_kms *)s->private;
|
||||
struct dpu_danger_safe_status status;
|
||||
struct dpu_kms *kms = s->private;
|
||||
int i;
|
||||
|
||||
if (!kms->hw_mdp) {
|
||||
|
@ -535,15 +535,23 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
|
|||
!msm_dsi_is_master_dsi(priv->dsi[i]))
|
||||
continue;
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.intf_type = INTF_DSI;
|
||||
|
||||
info.h_tile_instance[info.num_of_h_tiles++] = i;
|
||||
if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
|
||||
info.h_tile_instance[info.num_of_h_tiles++] = other;
|
||||
|
||||
info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
|
||||
|
||||
info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
|
||||
if (IS_ERR(encoder)) {
|
||||
DPU_ERROR("encoder init failed for dsi display\n");
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.intf_type = encoder->encoder_type;
|
||||
|
||||
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
|
||||
if (rc) {
|
||||
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
|
||||
|
@ -551,11 +559,6 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
|
|||
break;
|
||||
}
|
||||
|
||||
info.h_tile_instance[info.num_of_h_tiles++] = i;
|
||||
info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
|
||||
|
||||
info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
|
||||
|
||||
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
|
||||
rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
|
||||
if (rc) {
|
||||
|
@ -563,14 +566,7 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
|
|||
other, rc);
|
||||
break;
|
||||
}
|
||||
|
||||
info.h_tile_instance[info.num_of_h_tiles++] = other;
|
||||
}
|
||||
|
||||
rc = dpu_encoder_setup(dev, encoder, &info);
|
||||
if (rc)
|
||||
DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
|
||||
encoder->base.id, rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -589,29 +585,55 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
|
|||
if (!priv->dp[i])
|
||||
continue;
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.num_of_h_tiles = 1;
|
||||
info.h_tile_instance[0] = i;
|
||||
info.intf_type = INTF_DP;
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
|
||||
if (IS_ERR(encoder)) {
|
||||
DPU_ERROR("encoder init failed for dsi display\n");
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
|
||||
if (rc) {
|
||||
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
|
||||
drm_encoder_cleanup(encoder);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
info.num_of_h_tiles = 1;
|
||||
info.h_tile_instance[0] = i;
|
||||
info.intf_type = encoder->encoder_type;
|
||||
rc = dpu_encoder_setup(dev, encoder, &info);
|
||||
if (rc) {
|
||||
DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
|
||||
encoder->base.id, rc);
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
|
||||
struct msm_drm_private *priv,
|
||||
struct dpu_kms *dpu_kms)
|
||||
{
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct msm_display_info info;
|
||||
int rc;
|
||||
|
||||
if (!priv->hdmi)
|
||||
return 0;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.num_of_h_tiles = 1;
|
||||
info.h_tile_instance[0] = 0;
|
||||
info.intf_type = INTF_HDMI;
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
|
||||
if (IS_ERR(encoder)) {
|
||||
DPU_ERROR("encoder init failed for HDMI display\n");
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
|
||||
if (rc) {
|
||||
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
|
||||
drm_encoder_cleanup(encoder);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -625,14 +647,19 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev,
|
|||
struct msm_display_info info;
|
||||
int rc;
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
info.num_of_h_tiles = 1;
|
||||
/* use only WB idx 2 instance for DPU */
|
||||
info.h_tile_instance[0] = WB_2;
|
||||
info.intf_type = INTF_WB;
|
||||
|
||||
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
|
||||
if (IS_ERR(encoder)) {
|
||||
DPU_ERROR("encoder init failed for dsi display\n");
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
rc = dpu_writeback_init(dev, encoder, wb_formats,
|
||||
n_formats);
|
||||
if (rc) {
|
||||
|
@ -641,18 +668,6 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev,
|
|||
return rc;
|
||||
}
|
||||
|
||||
info.num_of_h_tiles = 1;
|
||||
/* use only WB idx 2 instance for DPU */
|
||||
info.h_tile_instance[0] = WB_2;
|
||||
info.intf_type = encoder->encoder_type;
|
||||
|
||||
rc = dpu_encoder_setup(dev, encoder, &info);
|
||||
if (rc) {
|
||||
DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
|
||||
encoder->base.id, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -683,6 +698,12 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
|
|||
return rc;
|
||||
}
|
||||
|
||||
rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms);
|
||||
if (rc) {
|
||||
DPU_ERROR("initialize HDMI failed, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Since WB isn't a driver check the catalog before initializing */
|
||||
if (dpu_kms->catalog->wb_count) {
|
||||
for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
|
||||
|
@ -979,13 +1000,13 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
|
|||
return 0;
|
||||
}
|
||||
|
||||
u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
|
||||
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
|
||||
{
|
||||
struct clk *clk;
|
||||
|
||||
clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
|
||||
if (!clk)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
return clk_get_rate(clk);
|
||||
}
|
||||
|
@ -1005,6 +1026,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
|
|||
dpu_kms = to_dpu_kms(kms);
|
||||
dev = dpu_kms->dev;
|
||||
|
||||
dev->mode_config.cursor_width = 512;
|
||||
dev->mode_config.cursor_height = 512;
|
||||
|
||||
rc = dpu_kms_global_obj_init(dpu_kms);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -1033,12 +1057,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
|
|||
DPU_DEBUG("VBIF NRT is not defined");
|
||||
}
|
||||
|
||||
dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
|
||||
if (IS_ERR(dpu_kms->reg_dma)) {
|
||||
dpu_kms->reg_dma = NULL;
|
||||
DPU_DEBUG("REG_DMA is not defined");
|
||||
}
|
||||
|
||||
dpu_kms_parse_data_bus_icc_path(dpu_kms);
|
||||
|
||||
rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
|
||||
|
@ -1084,16 +1102,17 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
|
|||
}
|
||||
|
||||
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
|
||||
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
|
||||
struct dpu_hw_vbif *hw;
|
||||
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
|
||||
|
||||
dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
|
||||
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
|
||||
if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
|
||||
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
|
||||
DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
|
||||
dpu_kms->hw_vbif[vbif_idx] = NULL;
|
||||
hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
|
||||
goto power_error;
|
||||
}
|
||||
|
||||
dpu_kms->hw_vbif[vbif->id] = hw;
|
||||
}
|
||||
|
||||
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
|
||||
|
@ -1286,6 +1305,8 @@ static const struct of_device_id dpu_dt_match[] = {
|
|||
{ .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
|
||||
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
|
||||
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
|
||||
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
|
||||
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
|
||||
{ .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
|
||||
{ .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
|
||||
{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
|
||||
|
|
|
@ -63,15 +63,13 @@
|
|||
#define ktime_compare_safe(A, B) \
|
||||
ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
|
||||
|
||||
#define DPU_NAME_SIZE 12
|
||||
|
||||
struct dpu_kms {
|
||||
struct msm_kms base;
|
||||
struct drm_device *dev;
|
||||
const struct dpu_mdss_cfg *catalog;
|
||||
|
||||
/* io/register spaces: */
|
||||
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
|
||||
void __iomem *mmio, *vbif[VBIF_MAX];
|
||||
|
||||
struct regulator *vdd;
|
||||
struct regulator *mmagic;
|
||||
|
@ -118,6 +116,10 @@ struct vsync_info {
|
|||
u32 line_count;
|
||||
};
|
||||
|
||||
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
|
||||
|
||||
#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
|
||||
|
||||
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
|
||||
|
||||
#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
|
||||
|
@ -201,6 +203,6 @@ void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
|
|||
*
|
||||
* Return: current clock rate
|
||||
*/
|
||||
u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
|
||||
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
|
||||
|
||||
#endif /* __dpu_kms_H__ */
|
||||
|
|
|
@ -42,8 +42,6 @@
|
|||
#define SHARP_SMOOTH_THR_DEFAULT 8
|
||||
#define SHARP_NOISE_THR_DEFAULT 2
|
||||
|
||||
#define DPU_NAME_SIZE 12
|
||||
|
||||
#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
|
||||
#define DPU_ZPOS_MAX 255
|
||||
|
||||
|
@ -70,20 +68,6 @@ static const uint32_t qcom_compressed_supported_formats[] = {
|
|||
DRM_FORMAT_P010,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dpu_plane_qos - Different qos configurations for each pipe
|
||||
*
|
||||
* @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
|
||||
* @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
|
||||
* this configuration is mutually exclusive from VBLANK_CTRL.
|
||||
* @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
|
||||
*/
|
||||
enum dpu_plane_qos {
|
||||
DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
|
||||
DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
|
||||
DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
|
||||
};
|
||||
|
||||
/*
|
||||
* struct dpu_plane - local dpu plane structure
|
||||
* @aspace: address space pointer
|
||||
|
@ -204,12 +188,14 @@ static u64 _dpu_plane_calc_clk(const struct drm_display_mode *mode,
|
|||
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
|
||||
* @plane: Pointer to drm plane
|
||||
* @pipe: Pointer to software pipe
|
||||
* @lut_usage: LUT usecase
|
||||
* @fmt: Pointer to source buffer format
|
||||
* @src_width: width of source buffer
|
||||
* Return: fill level corresponding to the source buffer/format or 0 if error
|
||||
*/
|
||||
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
|
||||
struct dpu_sw_pipe *pipe,
|
||||
enum dpu_qos_lut_usage lut_usage,
|
||||
const struct dpu_format *fmt, u32 src_width)
|
||||
{
|
||||
struct dpu_plane *pdpu;
|
||||
|
@ -221,6 +207,9 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (lut_usage == DPU_QOS_LUT_USAGE_NRT)
|
||||
return 0;
|
||||
|
||||
pdpu = to_dpu_plane(plane);
|
||||
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
|
||||
|
||||
|
@ -266,83 +255,58 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
|
|||
const struct dpu_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg)
|
||||
{
|
||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||
u64 qos_lut;
|
||||
u32 total_fl = 0, lut_usage;
|
||||
struct dpu_hw_qos_cfg cfg;
|
||||
u32 total_fl, lut_usage;
|
||||
|
||||
if (!pdpu->is_rt_pipe) {
|
||||
lut_usage = DPU_QOS_LUT_USAGE_NRT;
|
||||
} else {
|
||||
total_fl = _dpu_plane_calc_fill_level(plane, pipe, fmt,
|
||||
drm_rect_width(&pipe_cfg->src_rect));
|
||||
|
||||
if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
|
||||
lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
|
||||
else
|
||||
lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
|
||||
}
|
||||
|
||||
qos_lut = _dpu_hw_get_qos_lut(
|
||||
&pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl);
|
||||
total_fl = _dpu_plane_calc_fill_level(plane, pipe, lut_usage, fmt,
|
||||
drm_rect_width(&pipe_cfg->src_rect));
|
||||
|
||||
cfg.creq_lut = _dpu_hw_get_qos_lut(&pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl);
|
||||
cfg.danger_lut = pdpu->catalog->perf->danger_lut_tbl[lut_usage];
|
||||
cfg.safe_lut = pdpu->catalog->perf->safe_lut_tbl[lut_usage];
|
||||
|
||||
if (pipe->sspp->idx != SSPP_CURSOR0 &&
|
||||
pipe->sspp->idx != SSPP_CURSOR1 &&
|
||||
pdpu->is_rt_pipe)
|
||||
cfg.danger_safe_en = true;
|
||||
|
||||
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d is_rt:%d\n",
|
||||
pdpu->pipe - SSPP_VIG0,
|
||||
cfg.danger_safe_en,
|
||||
pdpu->is_rt_pipe);
|
||||
|
||||
trace_dpu_perf_set_qos_luts(pipe->sspp->idx - SSPP_VIG0,
|
||||
(fmt) ? fmt->base.pixel_format : 0,
|
||||
pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
|
||||
pdpu->is_rt_pipe, total_fl, cfg.creq_lut, lut_usage);
|
||||
|
||||
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
|
||||
pdpu->pipe - SSPP_VIG0,
|
||||
fmt ? (char *)&fmt->base.pixel_format : NULL,
|
||||
pdpu->is_rt_pipe, total_fl, qos_lut);
|
||||
|
||||
pipe->sspp->ops.setup_creq_lut(pipe->sspp, qos_lut);
|
||||
}
|
||||
|
||||
/**
|
||||
* _dpu_plane_set_danger_lut - set danger/safe LUT of the given plane
|
||||
* @plane: Pointer to drm plane
|
||||
* @pipe: Pointer to software pipe
|
||||
* @fmt: Pointer to source buffer format
|
||||
*/
|
||||
static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
|
||||
struct dpu_sw_pipe *pipe,
|
||||
const struct dpu_format *fmt)
|
||||
{
|
||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||
u32 danger_lut, safe_lut;
|
||||
|
||||
if (!pdpu->is_rt_pipe) {
|
||||
danger_lut = pdpu->catalog->perf->danger_lut_tbl
|
||||
[DPU_QOS_LUT_USAGE_NRT];
|
||||
safe_lut = pdpu->catalog->perf->safe_lut_tbl
|
||||
[DPU_QOS_LUT_USAGE_NRT];
|
||||
} else {
|
||||
if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
|
||||
danger_lut = pdpu->catalog->perf->danger_lut_tbl
|
||||
[DPU_QOS_LUT_USAGE_LINEAR];
|
||||
safe_lut = pdpu->catalog->perf->safe_lut_tbl
|
||||
[DPU_QOS_LUT_USAGE_LINEAR];
|
||||
} else {
|
||||
danger_lut = pdpu->catalog->perf->danger_lut_tbl
|
||||
[DPU_QOS_LUT_USAGE_MACROTILE];
|
||||
safe_lut = pdpu->catalog->perf->safe_lut_tbl
|
||||
[DPU_QOS_LUT_USAGE_MACROTILE];
|
||||
}
|
||||
}
|
||||
pdpu->is_rt_pipe, total_fl, cfg.creq_lut);
|
||||
|
||||
trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
|
||||
(fmt) ? fmt->base.pixel_format : 0,
|
||||
(fmt) ? fmt->fetch_mode : 0,
|
||||
danger_lut,
|
||||
safe_lut);
|
||||
cfg.danger_lut,
|
||||
cfg.safe_lut);
|
||||
|
||||
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
|
||||
pdpu->pipe - SSPP_VIG0,
|
||||
fmt ? (char *)&fmt->base.pixel_format : NULL,
|
||||
fmt ? fmt->fetch_mode : -1,
|
||||
danger_lut,
|
||||
safe_lut);
|
||||
cfg.danger_lut,
|
||||
cfg.safe_lut);
|
||||
|
||||
pipe->sspp->ops.setup_danger_safe_lut(pipe->sspp,
|
||||
danger_lut, safe_lut);
|
||||
pipe->sspp->ops.setup_qos_lut(pipe->sspp, &cfg);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -350,48 +314,23 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
|
|||
* @plane: Pointer to drm plane
|
||||
* @pipe: Pointer to software pipe
|
||||
* @enable: true to enable QoS control
|
||||
* @flags: QoS control mode (enum dpu_plane_qos)
|
||||
*/
|
||||
static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
|
||||
struct dpu_sw_pipe *pipe,
|
||||
bool enable, u32 flags)
|
||||
bool enable)
|
||||
{
|
||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||
struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
|
||||
|
||||
memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg));
|
||||
if (!pdpu->is_rt_pipe)
|
||||
enable = false;
|
||||
|
||||
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
|
||||
pipe_qos_cfg.creq_vblank = pipe->sspp->cap->sblk->creq_vblank;
|
||||
pipe_qos_cfg.danger_vblank =
|
||||
pipe->sspp->cap->sblk->danger_vblank;
|
||||
pipe_qos_cfg.vblank_en = enable;
|
||||
}
|
||||
|
||||
if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
|
||||
/* this feature overrules previous VBLANK_CTRL */
|
||||
pipe_qos_cfg.vblank_en = false;
|
||||
pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
|
||||
}
|
||||
|
||||
if (flags & DPU_PLANE_QOS_PANIC_CTRL)
|
||||
pipe_qos_cfg.danger_safe_en = enable;
|
||||
|
||||
if (!pdpu->is_rt_pipe) {
|
||||
pipe_qos_cfg.vblank_en = false;
|
||||
pipe_qos_cfg.danger_safe_en = false;
|
||||
}
|
||||
|
||||
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
|
||||
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d is_rt:%d\n",
|
||||
pdpu->pipe - SSPP_VIG0,
|
||||
pipe_qos_cfg.danger_safe_en,
|
||||
pipe_qos_cfg.vblank_en,
|
||||
pipe_qos_cfg.creq_vblank,
|
||||
pipe_qos_cfg.danger_vblank,
|
||||
enable,
|
||||
pdpu->is_rt_pipe);
|
||||
|
||||
pipe->sspp->ops.setup_qos_ctrl(pipe->sspp,
|
||||
&pipe_qos_cfg);
|
||||
enable);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1079,10 +1018,10 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
|
|||
pipe->sspp->ops.setup_sourceaddress(pipe, layout);
|
||||
}
|
||||
|
||||
_dpu_plane_set_qos_ctrl(plane, pipe, false, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
|
||||
/* override for color fill */
|
||||
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
|
||||
_dpu_plane_set_qos_ctrl(plane, pipe, false);
|
||||
|
||||
/* skip remaining processing on color fill */
|
||||
return;
|
||||
}
|
||||
|
@ -1116,30 +1055,18 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
|
|||
pipe->sspp->ops.setup_format(pipe, fmt, src_flags);
|
||||
|
||||
if (pipe->sspp->ops.setup_cdp) {
|
||||
struct dpu_hw_cdp_cfg cdp_cfg;
|
||||
const struct dpu_perf_cfg *perf = pdpu->catalog->perf;
|
||||
|
||||
memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
|
||||
|
||||
cdp_cfg.enable = pdpu->catalog->perf->cdp_cfg
|
||||
[DPU_PERF_CDP_USAGE_RT].rd_enable;
|
||||
cdp_cfg.ubwc_meta_enable =
|
||||
DPU_FORMAT_IS_UBWC(fmt);
|
||||
cdp_cfg.tile_amortize_enable =
|
||||
DPU_FORMAT_IS_UBWC(fmt) ||
|
||||
DPU_FORMAT_IS_TILE(fmt);
|
||||
cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
|
||||
|
||||
pipe->sspp->ops.setup_cdp(pipe, &cdp_cfg);
|
||||
pipe->sspp->ops.setup_cdp(pipe, fmt,
|
||||
perf->cdp_cfg[DPU_PERF_CDP_USAGE_RT].rd_enable);
|
||||
}
|
||||
}
|
||||
|
||||
_dpu_plane_set_qos_lut(plane, pipe, fmt, pipe_cfg);
|
||||
_dpu_plane_set_danger_lut(plane, pipe, fmt);
|
||||
|
||||
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
|
||||
_dpu_plane_set_qos_ctrl(plane, pipe, true, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
if (pipe->sspp->idx != SSPP_CURSOR0 &&
|
||||
pipe->sspp->idx != SSPP_CURSOR1)
|
||||
_dpu_plane_set_ot_limit(plane, pipe, pipe_cfg, frame_rate);
|
||||
}
|
||||
|
||||
if (pstate->needs_qos_remap)
|
||||
_dpu_plane_set_qos_remap(plane, pipe);
|
||||
|
@ -1254,10 +1181,10 @@ static void dpu_plane_destroy(struct drm_plane *plane)
|
|||
|
||||
if (pdpu) {
|
||||
pstate = to_dpu_plane_state(plane->state);
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false);
|
||||
|
||||
if (pstate->r_pipe.sspp)
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false);
|
||||
|
||||
mutex_destroy(&pdpu->lock);
|
||||
|
||||
|
@ -1414,9 +1341,9 @@ void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
|
|||
return;
|
||||
|
||||
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->pipe, enable, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->pipe, enable);
|
||||
if (pstate->r_pipe.sspp)
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, enable, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
_dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, enable);
|
||||
pm_runtime_put_sync(&dpu_kms->pdev->dev);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
|
||||
|
@ -117,16 +118,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_mixer *hw;
|
||||
const struct dpu_lm_cfg *lm = &cat->mixer[i];
|
||||
|
||||
if (lm->pingpong == PINGPONG_MAX) {
|
||||
DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lm->id < LM_0 || lm->id >= LM_MAX) {
|
||||
DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_lm_init(lm->id, mmio, cat);
|
||||
hw = dpu_hw_lm_init(lm, mmio);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed lm object creation: err %d\n", rc);
|
||||
|
@ -139,11 +131,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_merge_3d *hw;
|
||||
const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
|
||||
|
||||
if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
|
||||
DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
|
||||
hw = dpu_hw_merge_3d_init(merge_3d, mmio);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed merge_3d object creation: err %d\n",
|
||||
|
@ -157,11 +145,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_pingpong *hw;
|
||||
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
|
||||
|
||||
if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
|
||||
DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
|
||||
hw = dpu_hw_pingpong_init(pp, mmio);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed pingpong object creation: err %d\n",
|
||||
|
@ -177,15 +161,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_intf *hw;
|
||||
const struct dpu_intf_cfg *intf = &cat->intf[i];
|
||||
|
||||
if (intf->type == INTF_NONE) {
|
||||
DPU_DEBUG("skip intf %d with type none\n", i);
|
||||
continue;
|
||||
}
|
||||
if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
|
||||
DPU_ERROR("skip intf %d with invalid id\n", intf->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_intf_init(intf->id, mmio, cat);
|
||||
hw = dpu_hw_intf_init(intf, mmio);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed intf object creation: err %d\n", rc);
|
||||
|
@ -198,12 +174,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_wb *hw;
|
||||
const struct dpu_wb_cfg *wb = &cat->wb[i];
|
||||
|
||||
if (wb->id < WB_0 || wb->id >= WB_MAX) {
|
||||
DPU_ERROR("skip intf %d with invalid id\n", wb->id);
|
||||
continue;
|
||||
}
|
||||
|
||||
hw = dpu_hw_wb_init(wb->id, mmio, cat);
|
||||
hw = dpu_hw_wb_init(wb, mmio);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed wb object creation: err %d\n", rc);
|
||||
|
@ -216,11 +187,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_ctl *hw;
|
||||
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
|
||||
|
||||
if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
|
||||
DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
|
||||
hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed ctl object creation: err %d\n", rc);
|
||||
|
@ -233,11 +200,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_dspp *hw;
|
||||
const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
|
||||
|
||||
if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
|
||||
DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
|
||||
hw = dpu_hw_dspp_init(dspp, mmio);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed dspp object creation: err %d\n", rc);
|
||||
|
@ -250,8 +213,12 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_dsc *hw;
|
||||
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
|
||||
|
||||
hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
|
||||
if (IS_ERR_OR_NULL(hw)) {
|
||||
if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
|
||||
hw = dpu_hw_dsc_init_1_2(dsc, mmio);
|
||||
else
|
||||
hw = dpu_hw_dsc_init(dsc, mmio);
|
||||
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed dsc object creation: err %d\n", rc);
|
||||
goto fail;
|
||||
|
@ -263,12 +230,7 @@ int dpu_rm_init(struct dpu_rm *rm,
|
|||
struct dpu_hw_sspp *hw;
|
||||
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
|
||||
|
||||
if (sspp->id < SSPP_NONE || sspp->id >= SSPP_MAX) {
|
||||
DPU_ERROR("skip intf %d with invalid id\n", sspp->id);
|
||||
continue;
|
||||
}
|
||||
|
||||
hw = dpu_hw_sspp_init(sspp->id, mmio, cat);
|
||||
hw = dpu_hw_sspp_init(sspp, mmio, cat->ubwc);
|
||||
if (IS_ERR(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed sspp object creation: err %d\n", rc);
|
||||
|
|
|
@ -871,6 +871,20 @@ TRACE_EVENT(dpu_pp_connect_ext_te,
|
|||
TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dpu_intf_connect_ext_te,
|
||||
TP_PROTO(enum dpu_intf intf, u32 cfg),
|
||||
TP_ARGS(intf, cfg),
|
||||
TP_STRUCT__entry(
|
||||
__field( enum dpu_intf, intf )
|
||||
__field( u32, cfg )
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->intf = intf;
|
||||
__entry->cfg = cfg;
|
||||
),
|
||||
TP_printk("intf:%d cfg:%u", __entry->intf, __entry->cfg)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dpu_core_irq_register_callback,
|
||||
TP_PROTO(int irq_idx, void *callback),
|
||||
TP_ARGS(irq_idx, callback),
|
||||
|
|
|
@ -103,6 +103,87 @@ static const struct mdp5_cfg_hw msm8x74v1_config = {
|
|||
.max_clk = 200000000,
|
||||
};
|
||||
|
||||
static const struct mdp5_cfg_hw msm8x26_config = {
|
||||
.name = "msm8x26",
|
||||
.mdp = {
|
||||
.count = 1,
|
||||
.caps = MDP_CAP_SMP |
|
||||
0,
|
||||
},
|
||||
.smp = {
|
||||
.mmb_count = 7,
|
||||
.mmb_size = 4096,
|
||||
.clients = {
|
||||
[SSPP_VIG0] = 1,
|
||||
[SSPP_DMA0] = 4,
|
||||
[SSPP_RGB0] = 7,
|
||||
},
|
||||
},
|
||||
.ctl = {
|
||||
.count = 2,
|
||||
.base = { 0x00500, 0x00600 },
|
||||
.flush_hw_mask = 0x0003ffff,
|
||||
},
|
||||
.pipe_vig = {
|
||||
.count = 1,
|
||||
.base = { 0x01100 },
|
||||
.caps = MDP_PIPE_CAP_HFLIP |
|
||||
MDP_PIPE_CAP_VFLIP |
|
||||
MDP_PIPE_CAP_SCALE |
|
||||
MDP_PIPE_CAP_CSC |
|
||||
0,
|
||||
},
|
||||
.pipe_rgb = {
|
||||
.count = 1,
|
||||
.base = { 0x01d00 },
|
||||
.caps = MDP_PIPE_CAP_HFLIP |
|
||||
MDP_PIPE_CAP_VFLIP |
|
||||
MDP_PIPE_CAP_SCALE |
|
||||
0,
|
||||
},
|
||||
.pipe_dma = {
|
||||
.count = 1,
|
||||
.base = { 0x02900 },
|
||||
.caps = MDP_PIPE_CAP_HFLIP |
|
||||
MDP_PIPE_CAP_VFLIP |
|
||||
0,
|
||||
},
|
||||
.lm = {
|
||||
.count = 2,
|
||||
.base = { 0x03100, 0x03d00 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 1, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB },
|
||||
},
|
||||
.nb_stages = 2,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
},
|
||||
.dspp = {
|
||||
.count = 1,
|
||||
.base = { 0x04500 },
|
||||
},
|
||||
.pp = {
|
||||
.count = 1,
|
||||
.base = { 0x21a00 },
|
||||
},
|
||||
.intf = {
|
||||
.base = { 0x00000, 0x21200 },
|
||||
.connect = {
|
||||
[0] = INTF_DISABLED,
|
||||
[1] = INTF_DSI,
|
||||
},
|
||||
},
|
||||
.perf = {
|
||||
.ab_inefficiency = 100,
|
||||
.ib_inefficiency = 200,
|
||||
.clk_inefficiency = 125
|
||||
},
|
||||
.max_clk = 200000000,
|
||||
};
|
||||
|
||||
static const struct mdp5_cfg_hw msm8x74v2_config = {
|
||||
.name = "msm8x74",
|
||||
.mdp = {
|
||||
|
@ -1236,6 +1317,7 @@ static const struct mdp5_cfg_hw sdm660_config = {
|
|||
|
||||
static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
|
||||
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
|
||||
{ .revision = 1, .config = { .hw = &msm8x26_config } },
|
||||
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
|
||||
{ .revision = 3, .config = { .hw = &apq8084_config } },
|
||||
{ .revision = 6, .config = { .hw = &msm8x16_config } },
|
||||
|
|
|
@ -229,7 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
|
|||
#ifdef CONFIG_DEBUG_FS
|
||||
static int smp_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include "dp_catalog.h"
|
||||
|
||||
struct dp_ctrl {
|
||||
bool orientation;
|
||||
atomic_t aborted;
|
||||
bool wide_bus_en;
|
||||
};
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
struct dp_debug_private {
|
||||
struct dentry *root;
|
||||
|
||||
struct dp_usbpd *usbpd;
|
||||
struct dp_link *link;
|
||||
struct dp_panel *panel;
|
||||
struct drm_connector *connector;
|
||||
|
@ -232,14 +231,14 @@ static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
|
|||
}
|
||||
|
||||
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
|
||||
struct dp_usbpd *usbpd, struct dp_link *link,
|
||||
struct dp_link *link,
|
||||
struct drm_connector *connector, struct drm_minor *minor)
|
||||
{
|
||||
struct dp_debug_private *debug;
|
||||
struct dp_debug *dp_debug;
|
||||
int rc;
|
||||
|
||||
if (!dev || !panel || !usbpd || !link) {
|
||||
if (!dev || !panel || !link) {
|
||||
DRM_ERROR("invalid input\n");
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
|
@ -252,7 +251,6 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
|
|||
}
|
||||
|
||||
debug->dp_debug.debug_en = false;
|
||||
debug->usbpd = usbpd;
|
||||
debug->link = link;
|
||||
debug->panel = panel;
|
||||
debug->dev = dev;
|
||||
|
|
|
@ -32,7 +32,6 @@ struct dp_debug {
|
|||
*
|
||||
* @dev: device instance of the caller
|
||||
* @panel: instance of panel module
|
||||
* @usbpd: instance of usbpd module
|
||||
* @link: instance of link module
|
||||
* @connector: double pointer to display connector
|
||||
* @minor: pointer to drm minor number after device registration
|
||||
|
@ -42,7 +41,7 @@ struct dp_debug {
|
|||
* for debugfs input to be communicated with existing modules
|
||||
*/
|
||||
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
|
||||
struct dp_usbpd *usbpd, struct dp_link *link,
|
||||
struct dp_link *link,
|
||||
struct drm_connector *connector,
|
||||
struct drm_minor *minor);
|
||||
|
||||
|
@ -59,7 +58,7 @@ void dp_debug_put(struct dp_debug *dp_debug);
|
|||
|
||||
static inline
|
||||
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
|
||||
struct dp_usbpd *usbpd, struct dp_link *link,
|
||||
struct dp_link *link,
|
||||
struct drm_connector *connector, struct drm_minor *minor)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_kms.h"
|
||||
#include "dp_hpd.h"
|
||||
#include "dp_parser.h"
|
||||
#include "dp_power.h"
|
||||
#include "dp_catalog.h"
|
||||
|
@ -92,7 +91,6 @@ struct dp_display_private {
|
|||
struct platform_device *pdev;
|
||||
struct dentry *root;
|
||||
|
||||
struct dp_usbpd *usbpd;
|
||||
struct dp_parser *parser;
|
||||
struct dp_power *power;
|
||||
struct dp_catalog *catalog;
|
||||
|
@ -102,7 +100,6 @@ struct dp_display_private {
|
|||
struct dp_ctrl *ctrl;
|
||||
struct dp_debug *debug;
|
||||
|
||||
struct dp_usbpd_cb usbpd_cb;
|
||||
struct dp_display_mode dp_mode;
|
||||
struct msm_dp dp_display;
|
||||
|
||||
|
@ -329,6 +326,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
|
|||
|
||||
kthread_stop(dp->ev_tsk);
|
||||
|
||||
of_dp_aux_depopulate_bus(dp->aux);
|
||||
|
||||
dp_power_client_deinit(dp->power);
|
||||
dp_unregister_audio_driver(dev, dp->audio);
|
||||
dp_aux_unregister(dp->aux);
|
||||
|
@ -467,7 +466,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
|
|||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized);
|
||||
|
||||
dp_power_init(dp->power, false);
|
||||
dp_power_init(dp->power);
|
||||
dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
|
||||
dp_aux_init(dp->aux);
|
||||
dp->core_initialized = true;
|
||||
|
@ -494,11 +493,6 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
|
|||
return dp_display_process_hpd_high(dp);
|
||||
}
|
||||
|
||||
static int dp_display_usbpd_disconnect_cb(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_display_notify_disconnect(struct device *dev)
|
||||
{
|
||||
struct dp_display_private *dp = dev_get_dp_display_private(dev);
|
||||
|
@ -583,13 +577,9 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
|
|||
|
||||
static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
{
|
||||
struct dp_usbpd *hpd = dp->usbpd;
|
||||
u32 state;
|
||||
int ret;
|
||||
|
||||
if (!hpd)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
|
@ -644,12 +634,8 @@ static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
|
|||
|
||||
static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
{
|
||||
struct dp_usbpd *hpd = dp->usbpd;
|
||||
u32 state;
|
||||
|
||||
if (!hpd)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
|
@ -748,24 +734,10 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
|
|||
{
|
||||
int rc = 0;
|
||||
struct device *dev = &dp->pdev->dev;
|
||||
struct dp_usbpd_cb *cb = &dp->usbpd_cb;
|
||||
struct dp_panel_in panel_in = {
|
||||
.dev = dev,
|
||||
};
|
||||
|
||||
/* Callback APIs used for cable status change event */
|
||||
cb->configure = dp_display_usbpd_configure_cb;
|
||||
cb->disconnect = dp_display_usbpd_disconnect_cb;
|
||||
cb->attention = dp_display_usbpd_attention_cb;
|
||||
|
||||
dp->usbpd = dp_hpd_get(dev, cb);
|
||||
if (IS_ERR(dp->usbpd)) {
|
||||
rc = PTR_ERR(dp->usbpd);
|
||||
DRM_ERROR("failed to initialize hpd, rc = %d\n", rc);
|
||||
dp->usbpd = NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
dp->parser = dp_parser_get(dp->pdev);
|
||||
if (IS_ERR(dp->parser)) {
|
||||
rc = PTR_ERR(dp->parser);
|
||||
|
@ -1328,9 +1300,9 @@ static int dp_display_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
|
||||
|
||||
component_del(&pdev->dev, &dp_display_comp_ops);
|
||||
dp_display_deinit_sub_modules(dp);
|
||||
|
||||
component_del(&pdev->dev, &dp_display_comp_ops);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
return 0;
|
||||
|
@ -1499,7 +1471,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
|
|||
dp = container_of(dp_display, struct dp_display_private, dp_display);
|
||||
dev = &dp->pdev->dev;
|
||||
|
||||
dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
|
||||
dp->debug = dp_debug_get(dev, dp->panel,
|
||||
dp->link, dp->dp_display.connector,
|
||||
minor);
|
||||
if (IS_ERR(dp->debug)) {
|
||||
|
@ -1509,11 +1481,6 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
|
|||
}
|
||||
}
|
||||
|
||||
static void of_dp_aux_depopulate_bus_void(void *data)
|
||||
{
|
||||
of_dp_aux_depopulate_bus(data);
|
||||
}
|
||||
|
||||
static int dp_display_get_next_bridge(struct msm_dp *dp)
|
||||
{
|
||||
int rc;
|
||||
|
@ -1541,12 +1508,6 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
|
|||
of_node_put(aux_bus);
|
||||
if (rc)
|
||||
goto error;
|
||||
|
||||
rc = devm_add_action_or_reset(dp->drm_dev->dev,
|
||||
of_dp_aux_depopulate_bus_void,
|
||||
dp_priv->aux);
|
||||
if (rc)
|
||||
goto error;
|
||||
} else if (dp->is_edp) {
|
||||
DRM_ERROR("eDP aux_bus not found\n");
|
||||
return -ENODEV;
|
||||
|
@ -1570,6 +1531,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
|
|||
|
||||
error:
|
||||
if (dp->is_edp) {
|
||||
of_dp_aux_depopulate_bus(dp_priv->aux);
|
||||
dp_display_host_phy_exit(dp_priv);
|
||||
dp_display_host_deinit(dp_priv);
|
||||
}
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include "dp_hpd.h"
|
||||
|
||||
/* DP specific VDM commands */
|
||||
#define DP_USBPD_VDM_STATUS 0x10
|
||||
#define DP_USBPD_VDM_CONFIGURE 0x11
|
||||
|
||||
/* USBPD-TypeC specific Macros */
|
||||
#define VDM_VERSION 0x0
|
||||
#define USB_C_DP_SID 0xFF01
|
||||
|
||||
struct dp_hpd_private {
|
||||
struct device *dev;
|
||||
struct dp_usbpd_cb *dp_cb;
|
||||
struct dp_usbpd dp_usbpd;
|
||||
};
|
||||
|
||||
int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dp_hpd_private *hpd_priv;
|
||||
|
||||
hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
|
||||
dp_usbpd);
|
||||
|
||||
if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
|
||||
|| !hpd_priv->dp_cb->disconnect) {
|
||||
pr_err("hpd dp_cb not initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (hpd)
|
||||
hpd_priv->dp_cb->configure(hpd_priv->dev);
|
||||
else
|
||||
hpd_priv->dp_cb->disconnect(hpd_priv->dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb)
|
||||
{
|
||||
struct dp_hpd_private *dp_hpd;
|
||||
|
||||
if (!cb) {
|
||||
pr_err("invalid cb data\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL);
|
||||
if (!dp_hpd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dp_hpd->dev = dev;
|
||||
dp_hpd->dp_cb = cb;
|
||||
|
||||
dp_hpd->dp_usbpd.connect = dp_hpd_connect;
|
||||
|
||||
return &dp_hpd->dp_usbpd;
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _DP_HPD_H_
|
||||
#define _DP_HPD_H_
|
||||
|
||||
//#include <linux/usb/usbpd.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
enum plug_orientation {
|
||||
ORIENTATION_NONE,
|
||||
ORIENTATION_CC1,
|
||||
ORIENTATION_CC2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_usbpd - DisplayPort status
|
||||
*
|
||||
* @orientation: plug orientation configuration
|
||||
* @low_pow_st: low power state
|
||||
* @adaptor_dp_en: adaptor functionality enabled
|
||||
* @multi_func: multi-function preferred
|
||||
* @usb_config_req: request to switch to usb
|
||||
* @exit_dp_mode: request exit from displayport mode
|
||||
* @hpd_irq: Change in the status since last message
|
||||
* @alt_mode_cfg_done: bool to specify alt mode status
|
||||
* @debug_en: bool to specify debug mode
|
||||
* @connect: simulate disconnect or connect for debug mode
|
||||
*/
|
||||
struct dp_usbpd {
|
||||
enum plug_orientation orientation;
|
||||
bool low_pow_st;
|
||||
bool adaptor_dp_en;
|
||||
bool multi_func;
|
||||
bool usb_config_req;
|
||||
bool exit_dp_mode;
|
||||
bool hpd_irq;
|
||||
bool alt_mode_cfg_done;
|
||||
bool debug_en;
|
||||
|
||||
int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_usbpd_cb - callback functions provided by the client
|
||||
*
|
||||
* @configure: called by usbpd module when PD communication has
|
||||
* been completed and the usb peripheral has been configured on
|
||||
* dp mode.
|
||||
* @disconnect: notify the cable disconnect issued by usb.
|
||||
* @attention: notify any attention message issued by usb.
|
||||
*/
|
||||
struct dp_usbpd_cb {
|
||||
int (*configure)(struct device *dev);
|
||||
int (*disconnect)(struct device *dev);
|
||||
int (*attention)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* dp_hpd_get() - setup hpd module
|
||||
*
|
||||
* @dev: device instance of the caller
|
||||
* @cb: struct containing callback function pointers.
|
||||
*
|
||||
* This function allows the client to initialize the usbpd
|
||||
* module. The module will communicate with HPD module.
|
||||
*/
|
||||
struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb);
|
||||
|
||||
int dp_hpd_register(struct dp_usbpd *dp_usbpd);
|
||||
void dp_hpd_unregister(struct dp_usbpd *dp_usbpd);
|
||||
int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd);
|
||||
|
||||
#endif /* _DP_HPD_H_ */
|
|
@ -10,7 +10,6 @@
|
|||
|
||||
#include "dp_aux.h"
|
||||
#include "dp_link.h"
|
||||
#include "dp_hpd.h"
|
||||
|
||||
struct edid;
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
struct dp_power_private {
|
||||
struct dp_parser *parser;
|
||||
struct platform_device *pdev;
|
||||
struct device *dev;
|
||||
struct drm_device *drm_dev;
|
||||
struct clk *link_clk_src;
|
||||
|
@ -28,32 +27,23 @@ static int dp_power_clk_init(struct dp_power_private *power)
|
|||
{
|
||||
int rc = 0;
|
||||
struct dss_module_power *core, *ctrl, *stream;
|
||||
struct device *dev = &power->pdev->dev;
|
||||
struct device *dev = power->dev;
|
||||
|
||||
core = &power->parser->mp[DP_CORE_PM];
|
||||
ctrl = &power->parser->mp[DP_CTRL_PM];
|
||||
stream = &power->parser->mp[DP_STREAM_PM];
|
||||
|
||||
rc = devm_clk_bulk_get(dev, core->num_clk, core->clocks);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to get %s clk. err=%d\n",
|
||||
dp_parser_pm_name(DP_CORE_PM), rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = devm_clk_bulk_get(dev, ctrl->num_clk, ctrl->clocks);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to get %s clk. err=%d\n",
|
||||
dp_parser_pm_name(DP_CTRL_PM), rc);
|
||||
if (rc)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rc = devm_clk_bulk_get(dev, stream->num_clk, stream->clocks);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to get %s clk. err=%d\n",
|
||||
dp_parser_pm_name(DP_CTRL_PM), rc);
|
||||
if (rc)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -121,11 +111,9 @@ int dp_power_clk_enable(struct dp_power *dp_power,
|
|||
mp = &power->parser->mp[DP_CORE_PM];
|
||||
|
||||
rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
|
||||
if (rc) {
|
||||
DRM_ERROR("fail to enable clks: %s. err=%d\n",
|
||||
dp_parser_pm_name(DP_CORE_PM), rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
dp_power->core_clks_on = true;
|
||||
}
|
||||
}
|
||||
|
@ -133,10 +121,8 @@ int dp_power_clk_enable(struct dp_power *dp_power,
|
|||
mp = &power->parser->mp[pm_type];
|
||||
if (enable) {
|
||||
rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to enable clks, err: %d\n", rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
clk_bulk_disable_unprepare(mp->num_clk, mp->clocks);
|
||||
}
|
||||
|
@ -162,63 +148,37 @@ int dp_power_clk_enable(struct dp_power *dp_power,
|
|||
|
||||
int dp_power_client_init(struct dp_power *dp_power)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dp_power_private *power;
|
||||
|
||||
if (!dp_power) {
|
||||
DRM_ERROR("invalid power data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
power = container_of(dp_power, struct dp_power_private, dp_power);
|
||||
|
||||
pm_runtime_enable(&power->pdev->dev);
|
||||
pm_runtime_enable(power->dev);
|
||||
|
||||
rc = dp_power_clk_init(power);
|
||||
if (rc)
|
||||
DRM_ERROR("failed to init clocks %d\n", rc);
|
||||
|
||||
return rc;
|
||||
return dp_power_clk_init(power);
|
||||
}
|
||||
|
||||
void dp_power_client_deinit(struct dp_power *dp_power)
|
||||
{
|
||||
struct dp_power_private *power;
|
||||
|
||||
if (!dp_power) {
|
||||
DRM_ERROR("invalid power data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
power = container_of(dp_power, struct dp_power_private, dp_power);
|
||||
|
||||
pm_runtime_disable(&power->pdev->dev);
|
||||
pm_runtime_disable(power->dev);
|
||||
}
|
||||
|
||||
int dp_power_init(struct dp_power *dp_power, bool flip)
|
||||
int dp_power_init(struct dp_power *dp_power)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dp_power_private *power = NULL;
|
||||
|
||||
if (!dp_power) {
|
||||
DRM_ERROR("invalid power data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
power = container_of(dp_power, struct dp_power_private, dp_power);
|
||||
|
||||
pm_runtime_get_sync(&power->pdev->dev);
|
||||
pm_runtime_get_sync(power->dev);
|
||||
|
||||
rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
|
||||
goto exit;
|
||||
}
|
||||
if (rc)
|
||||
pm_runtime_put_sync(power->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
pm_runtime_put_sync(&power->pdev->dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -229,7 +189,7 @@ int dp_power_deinit(struct dp_power *dp_power)
|
|||
power = container_of(dp_power, struct dp_power_private, dp_power);
|
||||
|
||||
dp_power_clk_enable(dp_power, DP_CORE_PM, false);
|
||||
pm_runtime_put_sync(&power->pdev->dev);
|
||||
pm_runtime_put_sync(power->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -238,17 +198,11 @@ struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
|
|||
struct dp_power_private *power;
|
||||
struct dp_power *dp_power;
|
||||
|
||||
if (!parser) {
|
||||
DRM_ERROR("invalid input\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
|
||||
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
|
||||
if (!power)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
power->parser = parser;
|
||||
power->pdev = parser->pdev;
|
||||
power->dev = dev;
|
||||
|
||||
dp_power = &power->dp_power;
|
||||
|
|
|
@ -26,13 +26,12 @@ struct dp_power {
|
|||
* dp_power_init() - enable power supplies for display controller
|
||||
*
|
||||
* @power: instance of power module
|
||||
* @flip: bool for flipping gpio direction
|
||||
* return: 0 if success or error if failure.
|
||||
*
|
||||
* This API will turn on the regulators and configures gpio's
|
||||
* aux/hpd.
|
||||
*/
|
||||
int dp_power_init(struct dp_power *power, bool flip);
|
||||
int dp_power_init(struct dp_power *power);
|
||||
|
||||
/**
|
||||
* dp_power_deinit() - turn off regulators and gpios.
|
||||
|
|
|
@ -245,6 +245,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
|
|||
&apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
|
||||
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0_2,
|
||||
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
|
||||
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define MSM_DSI_VER_MAJOR_V2 0x02
|
||||
#define MSM_DSI_VER_MAJOR_6G 0x03
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_0_2 0x10000002
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "dsi.xml.h"
|
||||
#include "sfpb.xml.h"
|
||||
#include "dsi_cfg.h"
|
||||
#include "msm_dsc_helper.h"
|
||||
#include "msm_kms.h"
|
||||
#include "msm_gem.h"
|
||||
#include "phy/dsi_phy.h"
|
||||
|
@ -117,8 +118,6 @@ struct msm_dsi_host {
|
|||
struct clk *byte_clk;
|
||||
struct clk *esc_clk;
|
||||
struct clk *pixel_clk;
|
||||
struct clk *byte_clk_src;
|
||||
struct clk *pixel_clk_src;
|
||||
struct clk *byte_intf_clk;
|
||||
|
||||
unsigned long byte_clk_rate;
|
||||
|
@ -128,8 +127,6 @@ struct msm_dsi_host {
|
|||
|
||||
/* DSI v2 specific clocks */
|
||||
struct clk *src_clk;
|
||||
struct clk *esc_clk_src;
|
||||
struct clk *dsi_clk_src;
|
||||
|
||||
unsigned long src_clk_rate;
|
||||
|
||||
|
@ -266,21 +263,6 @@ int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
|
|||
return ret;
|
||||
}
|
||||
|
||||
msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
|
||||
if (!msm_host->esc_clk_src) {
|
||||
ret = -ENODEV;
|
||||
pr_err("%s: can't get esc clock parent. ret=%d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
|
||||
if (!msm_host->dsi_clk_src) {
|
||||
ret = -ENODEV;
|
||||
pr_err("%s: can't get src clock parent. ret=%d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -345,20 +327,6 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
|
|||
goto exit;
|
||||
}
|
||||
|
||||
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
|
||||
if (IS_ERR(msm_host->byte_clk_src)) {
|
||||
ret = PTR_ERR(msm_host->byte_clk_src);
|
||||
pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
|
||||
if (IS_ERR(msm_host->pixel_clk_src)) {
|
||||
ret = PTR_ERR(msm_host->pixel_clk_src);
|
||||
pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (cfg_hnd->ops->clk_init_ver)
|
||||
ret = cfg_hnd->ops->clk_init_ver(msm_host);
|
||||
exit:
|
||||
|
@ -560,12 +528,27 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
|
|||
clk_disable_unprepare(msm_host->byte_clk);
|
||||
}
|
||||
|
||||
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, bool is_bonded_dsi)
|
||||
static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
|
||||
const struct drm_dsc_config *dsc)
|
||||
{
|
||||
int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc),
|
||||
dsc->bits_per_component * 3);
|
||||
|
||||
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
|
||||
|
||||
return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
|
||||
}
|
||||
|
||||
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
|
||||
const struct drm_dsc_config *dsc, bool is_bonded_dsi)
|
||||
{
|
||||
unsigned long pclk_rate;
|
||||
|
||||
pclk_rate = mode->clock * 1000;
|
||||
|
||||
if (dsc)
|
||||
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
|
||||
|
||||
/*
|
||||
* For bonded DSI mode, the current DRM mode has the complete width of the
|
||||
* panel. Since, the complete panel is driven by two DSI controllers,
|
||||
|
@ -584,8 +567,8 @@ unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_d
|
|||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||
u8 lanes = msm_host->lanes;
|
||||
u32 bpp = dsi_get_bpp(msm_host->format);
|
||||
unsigned long pclk_rate = dsi_get_pclk_rate(mode, is_bonded_dsi);
|
||||
u64 pclk_bpp = (u64)pclk_rate * bpp;
|
||||
unsigned long pclk_rate = dsi_get_pclk_rate(mode, msm_host->dsc, is_bonded_dsi);
|
||||
unsigned long pclk_bpp;
|
||||
|
||||
if (lanes == 0) {
|
||||
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
|
||||
|
@ -594,16 +577,16 @@ unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_d
|
|||
|
||||
/* CPHY "byte_clk" is in units of 16 bits */
|
||||
if (msm_host->cphy_mode)
|
||||
do_div(pclk_bpp, (16 * lanes));
|
||||
pclk_bpp = mult_frac(pclk_rate, bpp, 16 * lanes);
|
||||
else
|
||||
do_div(pclk_bpp, (8 * lanes));
|
||||
pclk_bpp = mult_frac(pclk_rate, bpp, 8 * lanes);
|
||||
|
||||
return pclk_bpp;
|
||||
}
|
||||
|
||||
static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
|
||||
{
|
||||
msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, is_bonded_dsi);
|
||||
msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, msm_host->dsc, is_bonded_dsi);
|
||||
msm_host->byte_clk_rate = dsi_byte_clk_get_rate(&msm_host->base, is_bonded_dsi,
|
||||
msm_host->mode);
|
||||
|
||||
|
@ -627,15 +610,12 @@ int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
|
|||
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
|
||||
{
|
||||
u32 bpp = dsi_get_bpp(msm_host->format);
|
||||
u64 pclk_bpp;
|
||||
unsigned int esc_mhz, esc_div;
|
||||
unsigned long byte_mhz;
|
||||
|
||||
dsi_calc_pclk(msm_host, is_bonded_dsi);
|
||||
|
||||
pclk_bpp = (u64)dsi_get_pclk_rate(msm_host->mode, is_bonded_dsi) * bpp;
|
||||
do_div(pclk_bpp, 8);
|
||||
msm_host->src_clk_rate = pclk_bpp;
|
||||
msm_host->src_clk_rate = mult_frac(msm_host->pixel_clk_rate, bpp, 8);
|
||||
|
||||
/*
|
||||
* esc clock is byte clock followed by a 4 bit divider,
|
||||
|
@ -725,7 +705,12 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
|
|||
}
|
||||
}
|
||||
|
||||
static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
|
||||
static void dsi_ctrl_disable(struct msm_dsi_host *msm_host)
|
||||
{
|
||||
dsi_write(msm_host, REG_DSI_CTRL, 0);
|
||||
}
|
||||
|
||||
static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
|
||||
struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
|
||||
{
|
||||
u32 flags = msm_host->mode_flags;
|
||||
|
@ -733,11 +718,6 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
|
|||
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
|
||||
u32 data = 0, lane_ctrl = 0;
|
||||
|
||||
if (!enable) {
|
||||
dsi_write(msm_host, REG_DSI_CTRL, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (flags & MIPI_DSI_MODE_VIDEO) {
|
||||
if (flags & MIPI_DSI_MODE_VIDEO_HSE)
|
||||
data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
|
||||
|
@ -822,7 +802,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
|
|||
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
|
||||
lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
|
||||
|
||||
if (msm_dsi_phy_set_continuous_clock(phy, enable))
|
||||
if (msm_dsi_phy_set_continuous_clock(phy, true))
|
||||
lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY;
|
||||
|
||||
dsi_write(msm_host, REG_DSI_LANE_CTRL,
|
||||
|
@ -848,20 +828,19 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
|
|||
/* first calculate dsc parameters and then program
|
||||
* compress mode registers
|
||||
*/
|
||||
slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
|
||||
|
||||
/*
|
||||
* If slice_count is greater than slice_per_intf
|
||||
* then default to 1. This can happen during partial
|
||||
* update.
|
||||
*/
|
||||
if (dsc->slice_count > slice_per_intf)
|
||||
dsc->slice_count = 1;
|
||||
slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
|
||||
|
||||
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
|
||||
|
||||
eol_byte_num = total_bytes_per_intf % 3;
|
||||
pkt_per_line = slice_per_intf / dsc->slice_count;
|
||||
|
||||
/*
|
||||
* Typically, pkt_per_line = slice_per_intf * slice_per_pkt.
|
||||
*
|
||||
* Since the current driver only supports slice_per_pkt = 1,
|
||||
* pkt_per_line will be equal to slice per intf for now.
|
||||
*/
|
||||
pkt_per_line = slice_per_intf;
|
||||
|
||||
if (is_cmd_mode) /* packet data type */
|
||||
reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
|
||||
|
@ -951,7 +930,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
|
|||
* pulse width same
|
||||
*/
|
||||
h_total -= hdisplay;
|
||||
hdisplay /= 3;
|
||||
hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), 3);
|
||||
h_total += hdisplay;
|
||||
ha_end = ha_start + hdisplay;
|
||||
}
|
||||
|
@ -985,7 +964,14 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
|
|||
if (!msm_host->dsc)
|
||||
wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
|
||||
else
|
||||
wc = msm_host->dsc->slice_chunk_size * msm_host->dsc->slice_count + 1;
|
||||
/*
|
||||
* When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1.
|
||||
* Currently, the driver only supports default value of slice_per_pkt = 1
|
||||
*
|
||||
* TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info
|
||||
* and adjust DSC math to account for slice_per_pkt.
|
||||
*/
|
||||
wc = msm_host->dsc->slice_chunk_size + 1;
|
||||
|
||||
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
|
||||
DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
|
||||
|
@ -1731,28 +1717,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static u32 dsi_dsc_rc_buf_thresh[DSC_NUM_BUF_RANGES - 1] = {
|
||||
0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62,
|
||||
0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e
|
||||
};
|
||||
|
||||
/* only 8bpc, 8bpp added */
|
||||
static char min_qp[DSC_NUM_BUF_RANGES] = {
|
||||
0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13
|
||||
};
|
||||
|
||||
static char max_qp[DSC_NUM_BUF_RANGES] = {
|
||||
4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15
|
||||
};
|
||||
|
||||
static char bpg_offset[DSC_NUM_BUF_RANGES] = {
|
||||
2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
|
||||
};
|
||||
|
||||
static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc)
|
||||
{
|
||||
int i;
|
||||
u16 bpp = dsc->bits_per_pixel >> 4;
|
||||
int ret;
|
||||
|
||||
if (dsc->bits_per_pixel & 0xf) {
|
||||
DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n");
|
||||
|
@ -1764,49 +1731,23 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
dsc->rc_model_size = 8192;
|
||||
dsc->first_line_bpg_offset = 12;
|
||||
dsc->rc_edge_factor = 6;
|
||||
dsc->rc_tgt_offset_high = 3;
|
||||
dsc->rc_tgt_offset_low = 3;
|
||||
dsc->simple_422 = 0;
|
||||
dsc->convert_rgb = 1;
|
||||
dsc->vbr_enable = 0;
|
||||
|
||||
/* handle only bpp = bpc = 8 */
|
||||
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
|
||||
dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
|
||||
drm_dsc_set_const_params(dsc);
|
||||
drm_dsc_set_rc_buf_thresh(dsc);
|
||||
|
||||
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
|
||||
dsc->rc_range_params[i].range_min_qp = min_qp[i];
|
||||
dsc->rc_range_params[i].range_max_qp = max_qp[i];
|
||||
/*
|
||||
* Range BPG Offset contains two's-complement signed values that fill
|
||||
* 8 bits, yet the registers and DCS PPS field are only 6 bits wide.
|
||||
*/
|
||||
dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i] & DSC_RANGE_BPG_OFFSET_MASK;
|
||||
/* handle only bpp = bpc = 8, pre-SCR panels */
|
||||
ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dsc->initial_offset = 6144; /* Not bpp 12 */
|
||||
if (bpp != 8)
|
||||
dsc->initial_offset = 2048; /* bpp = 12 */
|
||||
|
||||
if (dsc->bits_per_component <= 10)
|
||||
dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
|
||||
else
|
||||
dsc->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
|
||||
|
||||
dsc->initial_xmit_delay = 512;
|
||||
dsc->initial_scale_value = 32;
|
||||
dsc->first_line_bpg_offset = 12;
|
||||
dsc->initial_scale_value = drm_dsc_initial_scale_value(dsc);
|
||||
dsc->line_buf_depth = dsc->bits_per_component + 1;
|
||||
|
||||
/* bpc 8 */
|
||||
dsc->flatness_min_qp = 3;
|
||||
dsc->flatness_max_qp = 12;
|
||||
dsc->rc_quant_incr_limit0 = 11;
|
||||
dsc->rc_quant_incr_limit1 = 11;
|
||||
|
||||
return drm_dsc_compute_rc_parameters(dsc);
|
||||
}
|
||||
|
||||
|
@ -2417,7 +2358,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
|
|||
|
||||
dsi_timing_setup(msm_host, is_bonded_dsi);
|
||||
dsi_sw_reset(msm_host);
|
||||
dsi_ctrl_config(msm_host, true, phy_shared_timings, phy);
|
||||
dsi_ctrl_enable(msm_host, phy_shared_timings, phy);
|
||||
|
||||
if (msm_host->disp_en_gpio)
|
||||
gpiod_set_value(msm_host->disp_en_gpio, 1);
|
||||
|
@ -2449,7 +2390,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
|
|||
goto unlock_ret;
|
||||
}
|
||||
|
||||
dsi_ctrl_config(msm_host, false, NULL, NULL);
|
||||
dsi_ctrl_disable(msm_host);
|
||||
|
||||
if (msm_host->disp_en_gpio)
|
||||
gpiod_set_value(msm_host->disp_en_gpio, 0);
|
||||
|
|
|
@ -34,32 +34,6 @@ static struct msm_dsi_manager msm_dsim_glb;
|
|||
#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
|
||||
#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static bool dsi_mgr_power_on_early(struct drm_bridge *bridge)
|
||||
{
|
||||
struct drm_bridge *next_bridge = drm_bridge_get_next_bridge(bridge);
|
||||
|
||||
/*
|
||||
* If the next bridge in the chain is the Parade ps8640 bridge chip
|
||||
* then don't power on early since it seems to violate the expectations
|
||||
* of the firmware that the bridge chip is running.
|
||||
*
|
||||
* NOTE: this is expected to be a temporary special case. It's expected
|
||||
* that we'll eventually have a framework that allows the next level
|
||||
* bridge to indicate whether it needs us to power on before it or
|
||||
* after it. When that framework is in place then we'll use it and
|
||||
* remove this special case.
|
||||
*/
|
||||
return !(next_bridge && next_bridge->of_node &&
|
||||
of_device_is_compatible(next_bridge->of_node, "parade,ps8640"));
|
||||
}
|
||||
#else
|
||||
static inline bool dsi_mgr_power_on_early(struct drm_bridge *bridge)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
|
||||
{
|
||||
return msm_dsim_glb.dsi[id];
|
||||
|
@ -254,7 +228,7 @@ static void msm_dsi_manager_set_split_display(u8 id)
|
|||
}
|
||||
}
|
||||
|
||||
static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
|
||||
static int dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
|
||||
{
|
||||
int id = dsi_mgr_bridge_get_id(bridge);
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
|
@ -265,12 +239,6 @@ static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
|
|||
int ret;
|
||||
|
||||
DBG("id=%d", id);
|
||||
if (!msm_dsi_device_connected(msm_dsi))
|
||||
return;
|
||||
|
||||
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
|
||||
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
|
||||
return;
|
||||
|
||||
ret = dsi_mgr_phy_enable(id, phy_shared_timings);
|
||||
if (ret)
|
||||
|
@ -300,14 +268,31 @@ static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
|
|||
if (is_bonded_dsi && msm_dsi1)
|
||||
msm_dsi_host_enable_irq(msm_dsi1->host);
|
||||
|
||||
return;
|
||||
return 0;
|
||||
|
||||
host1_on_fail:
|
||||
msm_dsi_host_power_off(host);
|
||||
host_on_fail:
|
||||
dsi_mgr_phy_disable(id);
|
||||
phy_en_fail:
|
||||
return;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dsi_mgr_bridge_power_off(struct drm_bridge *bridge)
|
||||
{
|
||||
int id = dsi_mgr_bridge_get_id(bridge);
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
|
||||
struct mipi_dsi_host *host = msm_dsi->host;
|
||||
bool is_bonded_dsi = IS_BONDED_DSI();
|
||||
|
||||
msm_dsi_host_disable_irq(host);
|
||||
if (is_bonded_dsi && msm_dsi1) {
|
||||
msm_dsi_host_disable_irq(msm_dsi1->host);
|
||||
msm_dsi_host_power_off(msm_dsi1->host);
|
||||
}
|
||||
msm_dsi_host_power_off(host);
|
||||
dsi_mgr_phy_disable(id);
|
||||
}
|
||||
|
||||
static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
|
@ -327,8 +312,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
|
|||
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
|
||||
return;
|
||||
|
||||
if (!dsi_mgr_power_on_early(bridge))
|
||||
dsi_mgr_bridge_power_on(bridge);
|
||||
ret = dsi_mgr_bridge_power_on(bridge);
|
||||
if (ret) {
|
||||
dev_err(&msm_dsi->pdev->dev, "Power on failed: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = msm_dsi_host_enable(host);
|
||||
if (ret) {
|
||||
|
@ -349,8 +337,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
|
|||
host1_en_fail:
|
||||
msm_dsi_host_disable(host);
|
||||
host_en_fail:
|
||||
|
||||
return;
|
||||
dsi_mgr_bridge_power_off(bridge);
|
||||
}
|
||||
|
||||
void msm_dsi_manager_tpg_enable(void)
|
||||
|
@ -438,9 +425,6 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
|
|||
msm_dsi_host_set_display_mode(host, adjusted_mode);
|
||||
if (is_bonded_dsi && other_dsi)
|
||||
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
|
||||
|
||||
if (dsi_mgr_power_on_early(bridge))
|
||||
dsi_mgr_bridge_power_on(bridge);
|
||||
}
|
||||
|
||||
static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
|
||||
|
|
|
@ -541,6 +541,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
|
|||
.data = &dsi_phy_28nm_hpm_famb_cfgs },
|
||||
{ .compatible = "qcom,dsi-phy-28nm-lp",
|
||||
.data = &dsi_phy_28nm_lp_cfgs },
|
||||
{ .compatible = "qcom,dsi-phy-28nm-8226",
|
||||
.data = &dsi_phy_28nm_8226_cfgs },
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
|
||||
{ .compatible = "qcom,dsi-phy-20nm",
|
||||
|
|
|
@ -46,8 +46,9 @@ struct msm_dsi_phy_cfg {
|
|||
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
|
||||
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs;
|
||||
|
|
|
@ -539,6 +539,9 @@ static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
|
|||
if (unlikely(pll_14nm->phy->pll_on))
|
||||
return 0;
|
||||
|
||||
if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
|
||||
dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
|
||||
|
||||
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
|
||||
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
|
||||
/* v2.0.0 28nm LP implementation */
|
||||
#define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0)
|
||||
#define DSI_PHY_28NM_QUIRK_PHY_8226 BIT(1)
|
||||
|
||||
#define LPFR_LUT_SIZE 10
|
||||
struct lpfr_cfg {
|
||||
|
@ -377,6 +378,74 @@ static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
|
||||
{
|
||||
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
|
||||
struct device *dev = &pll_28nm->phy->pdev->dev;
|
||||
void __iomem *base = pll_28nm->phy->pll_base;
|
||||
u32 max_reads = 5, timeout_us = 100;
|
||||
bool locked;
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
DBG("id=%d", pll_28nm->phy->id);
|
||||
|
||||
pll_28nm_software_reset(pll_28nm);
|
||||
|
||||
/*
|
||||
* PLL power up sequence.
|
||||
* Add necessary delays recommended by hardware.
|
||||
*/
|
||||
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34);
|
||||
|
||||
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
|
||||
|
||||
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
|
||||
|
||||
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
|
||||
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
|
||||
|
||||
for (i = 0; i < 7; i++) {
|
||||
/* DSI Uniphy lock detect setting */
|
||||
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
|
||||
0x0c, 100);
|
||||
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
|
||||
|
||||
/* poll for PLL ready status */
|
||||
locked = pll_28nm_poll_for_ready(pll_28nm,
|
||||
max_reads, timeout_us);
|
||||
if (locked)
|
||||
break;
|
||||
|
||||
pll_28nm_software_reset(pll_28nm);
|
||||
|
||||
/*
|
||||
* PLL power up sequence.
|
||||
* Add necessary delays recommended by hardware.
|
||||
*/
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00, 50);
|
||||
|
||||
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
|
||||
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 100);
|
||||
|
||||
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
|
||||
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
|
||||
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
|
||||
}
|
||||
|
||||
if (unlikely(!locked))
|
||||
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
|
||||
else
|
||||
DBG("DSI PLL Lock success");
|
||||
|
||||
return locked ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
|
||||
{
|
||||
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
|
||||
|
@ -471,6 +540,15 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
|
|||
.is_enabled = dsi_pll_28nm_clk_is_enabled,
|
||||
};
|
||||
|
||||
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
|
||||
.round_rate = dsi_pll_28nm_clk_round_rate,
|
||||
.set_rate = dsi_pll_28nm_clk_set_rate,
|
||||
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
|
||||
.prepare = dsi_pll_28nm_vco_prepare_8226,
|
||||
.unprepare = dsi_pll_28nm_vco_unprepare,
|
||||
.is_enabled = dsi_pll_28nm_clk_is_enabled,
|
||||
};
|
||||
|
||||
/*
|
||||
* PLL Callbacks
|
||||
*/
|
||||
|
@ -536,6 +614,8 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
|
|||
|
||||
if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
|
||||
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
|
||||
else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226)
|
||||
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226;
|
||||
else
|
||||
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
|
||||
|
||||
|
@ -820,3 +900,20 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
|
|||
.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
|
||||
};
|
||||
|
||||
const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
|
||||
.has_phy_regulator = true,
|
||||
.regulator_data = dsi_phy_28nm_regulators,
|
||||
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
|
||||
.ops = {
|
||||
.enable = dsi_28nm_phy_enable,
|
||||
.disable = dsi_28nm_phy_disable,
|
||||
.pll_init = dsi_pll_28nm_init,
|
||||
.save_pll_state = dsi_28nm_pll_save_state,
|
||||
.restore_pll_state = dsi_28nm_pll_restore_state,
|
||||
},
|
||||
.min_pll_rate = VCO_MIN_RATE,
|
||||
.max_pll_rate = VCO_MAX_RATE,
|
||||
.io_start = { 0xfd922b00 },
|
||||
.num_dsi_phy = 1,
|
||||
.quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
|
||||
};
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче