dmaengine updates for v5.12-rc1
New drivers/devices - Intel LGM SoC DMA driver - Actions Semi S500 DMA controller - Renesas r8a779a0 dma controller - Ingenic JZ4760(B) dma controller - Intel KeemBay AxiDMA controller Removed - Coh901318 dma driver - Zte zx dma driver - Sirfsoc dma driver Updates: - mmp_pdma, mmp_tdma gained module support - imx-sdma become modern and dropped platform data support - dw-axi driver gained slave and cyclic dma support -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmA0gIEACgkQfBQHDyUj g0fLyw//bfIBqmyvN01QNmYV0qrud0nZGRAGHSEwZ1Nrw2CvK37+XphYzVYy/PGk Cg6ca+QXGJdIfqmQV/rnIEwrNx/GeNUAulVT5hxdHQw/HDPoZexU5S+Lyetr4g7l FaE2C5se4RBp07eGhcOWkneHE/fhC9fX23VdNGNM6Nzb1F0j4MTmzcJAlsdCq2Q+ 1UlJ2O4w/t/mdqgec4J+JGTsfb+BXxs0nWnuwVSy1SEkac3Gj0kqHlIHsQqLCiST /D2rs1I0Chscu+ChrPNaVXDEobQipxIEdkzO6623t8C5KqfSf5i8rLvZvRP5YKf1 U5ZAi3p0c/t5VgXvA6WD79pN6ZLPsEMFDxyKQAazGPgrEP4gmI4dteETiJyr6Ag6 j6WqiDJwkmdVyuTiFDJsN3pTOqvT+TeHlLbnygAiuyMeNaF9skc7kxtq0XtXQigT vLcwtGavFnmF7TZGjEVv4JTMdMFPfczE8y+fhM7ET/uF36gTrPHaoD3KIwgimwIt Cmfpe+Ij8R3tBwV80454hp4+Gb+cR83OUgwy+EcBCw9P0/Pf4t0NyTgim+wN02Kt X7tkkgxGkvziIkfbXQa4zdVqAbT6+WcRUjEDZY3/Lp7EFyVyM8APEVSAie9b/TWN UgQo3TDuB6SU7XQ3Ahj6Swra0+UoVztHtwOmgIqiVz5on6780lg= =0AE9 -----END PGP SIGNATURE----- Merge tag 'dmaengine-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine updates from Vinod Koul: "We have couple of drivers removed a new driver and bunch of new device support and few updates to drivers for this round. New drivers/devices: - Intel LGM SoC DMA driver - Actions Semi S500 DMA controller - Renesas r8a779a0 dma controller - Ingenic JZ4760(B) dma controller - Intel KeemBay AxiDMA controller Removed: - Coh901318 dma driver - Zte zx dma driver - Sirfsoc dma driver Updates: - mmp_pdma, mmp_tdma gained module support - imx-sdma become modern and dropped platform data support - dw-axi driver gained slave and cyclic dma support" * tag 'dmaengine-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (58 commits) dmaengine: dw-axi-dmac: remove redundant null check on desc dmaengine: xilinx_dma: Alloc tx descriptors GFP_NOWAIT dmaengine: dw-axi-dmac: Virtually split the linked-list dmaengine: dw-axi-dmac: Set constraint to the Max segment size dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA BYTE and HALFWORD registers dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA handshake dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA support dmaengine: drivers: Kconfig: add HAS_IOMEM dependency to DW_AXI_DMAC dmaengine: dw-axi-dmac: Add Intel KeemBay DMA register fields dt-binding: dma: dw-axi-dmac: Add support for Intel KeemBay AxiDMA dmaengine: dw-axi-dmac: Support burst residue granularity dmaengine: dw-axi-dmac: Support of_dma_controller_register() dmaegine: dw-axi-dmac: Support device_prep_dma_cyclic() dmaengine: dw-axi-dmac: Support device_prep_slave_sg dmaengine: dw-axi-dmac: Add device_config operation dmaengine: dw-axi-dmac: Add device_synchronize() callback dmaengine: dw-axi-dmac: move dma_pool_create() to alloc_chan_resources() dmaengine: dw-axi-dmac: simplify descriptor management dt-bindings: dma: Add YAML schemas for dw-axi-dmac dmaengine: ti: k3-psil: optimize struct psil_endpoint_config for size ...
This commit is contained in:
Коммит
143983e585
|
@ -1674,6 +1674,12 @@
|
|||
In such case C2/C3 won't be used again.
|
||||
idle=nomwait: Disable mwait for CPU C-states
|
||||
|
||||
idxd.sva= [HW]
|
||||
Format: <bool>
|
||||
Allow force disabling of Shared Virtual Memory (SVA)
|
||||
support for the idxd driver. By default it is set to
|
||||
true (1).
|
||||
|
||||
ieee754= [MIPS] Select IEEE Std 754 conformance mode
|
||||
Format: { strict | legacy | 2008 | relaxed }
|
||||
Default: strict
|
||||
|
|
|
@ -17,6 +17,8 @@ properties:
|
|||
enum:
|
||||
- ingenic,jz4740-dma
|
||||
- ingenic,jz4725b-dma
|
||||
- ingenic,jz4760-dma
|
||||
- ingenic,jz4760b-dma
|
||||
- ingenic,jz4770-dma
|
||||
- ingenic,jz4780-dma
|
||||
- ingenic,x1000-dma
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/dma/intel,ldma.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Lightning Mountain centralized DMA controllers.
|
||||
|
||||
maintainers:
|
||||
- chuanhua.lei@intel.com
|
||||
- mallikarjunax.reddy@intel.com
|
||||
|
||||
allOf:
|
||||
- $ref: "dma-controller.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- intel,lgm-cdma
|
||||
- intel,lgm-dma2tx
|
||||
- intel,lgm-dma1rx
|
||||
- intel,lgm-dma1tx
|
||||
- intel,lgm-dma0tx
|
||||
- intel,lgm-dma3
|
||||
- intel,lgm-toe-dma30
|
||||
- intel,lgm-toe-dma31
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
"#dma-cells":
|
||||
const: 3
|
||||
description:
|
||||
The first cell is the peripheral's DMA request line.
|
||||
The second cell is the peripheral's (port) number corresponding to the channel.
|
||||
The third cell is the burst length of the channel.
|
||||
|
||||
dma-channels:
|
||||
minimum: 1
|
||||
maximum: 16
|
||||
|
||||
dma-channel-mask:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: ctrl
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
intel,dma-poll-cnt:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
DMA descriptor polling counter is used to control the poling mechanism
|
||||
for the descriptor fetching for all channels.
|
||||
|
||||
intel,dma-byte-en:
|
||||
type: boolean
|
||||
description:
|
||||
DMA byte enable is only valid for DMA write(RX).
|
||||
Byte enable(1) means DMA write will be based on the number of dwords
|
||||
instead of the whole burst.
|
||||
|
||||
intel,dma-drb:
|
||||
type: boolean
|
||||
description:
|
||||
DMA descriptor read back to make sure data and desc synchronization.
|
||||
|
||||
intel,dma-dburst-wr:
|
||||
type: boolean
|
||||
description:
|
||||
Enable RX dynamic burst write. When it is enabled, the DMA does RX dynamic burst;
|
||||
if it is disabled, the DMA RX will still support programmable fixed burst size of 2,4,8,16.
|
||||
It only applies to RX DMA and memcopy DMA.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
dma0: dma-controller@e0e00000 {
|
||||
compatible = "intel,lgm-cdma";
|
||||
reg = <0xe0e00000 0x1000>;
|
||||
#dma-cells = <3>;
|
||||
dma-channels = <16>;
|
||||
dma-channel-mask = <0xFFFF>;
|
||||
interrupt-parent = <&ioapic1>;
|
||||
interrupts = <82 1>;
|
||||
resets = <&rcu0 0x30 0>;
|
||||
reset-names = "ctrl";
|
||||
clocks = <&cgu0 80>;
|
||||
intel,dma-poll-cnt = <4>;
|
||||
intel,dma-byte-en;
|
||||
intel,dma-drb;
|
||||
};
|
||||
- |
|
||||
dma3: dma-controller@ec800000 {
|
||||
compatible = "intel,lgm-dma3";
|
||||
reg = <0xec800000 0x1000>;
|
||||
clocks = <&cgu0 71>;
|
||||
resets = <&rcu0 0x10 9>;
|
||||
#dma-cells = <3>;
|
||||
intel,dma-poll-cnt = <16>;
|
||||
intel,dma-byte-en;
|
||||
intel,dma-dburst-wr;
|
||||
};
|
|
@ -8,8 +8,8 @@ title: Actions Semi Owl SoCs DMA controller
|
|||
|
||||
description: |
|
||||
The OWL DMA is a general-purpose direct memory access controller capable of
|
||||
supporting 10 and 12 independent DMA channels for S700 and S900 SoCs
|
||||
respectively.
|
||||
supporting 10 independent DMA channels for the Actions Semi S700 SoC and 12
|
||||
independent DMA channels for the S500 and S900 SoC variants.
|
||||
|
||||
maintainers:
|
||||
- Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
|
@ -20,8 +20,9 @@ allOf:
|
|||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- actions,s900-dma
|
||||
- actions,s500-dma
|
||||
- actions,s700-dma
|
||||
- actions,s900-dma
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
|
|
@ -14,34 +14,37 @@ allOf:
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- renesas,dmac-r8a7742 # RZ/G1H
|
||||
- renesas,dmac-r8a7743 # RZ/G1M
|
||||
- renesas,dmac-r8a7744 # RZ/G1N
|
||||
- renesas,dmac-r8a7745 # RZ/G1E
|
||||
- renesas,dmac-r8a77470 # RZ/G1C
|
||||
- renesas,dmac-r8a774a1 # RZ/G2M
|
||||
- renesas,dmac-r8a774b1 # RZ/G2N
|
||||
- renesas,dmac-r8a774c0 # RZ/G2E
|
||||
- renesas,dmac-r8a774e1 # RZ/G2H
|
||||
- renesas,dmac-r8a7790 # R-Car H2
|
||||
- renesas,dmac-r8a7791 # R-Car M2-W
|
||||
- renesas,dmac-r8a7792 # R-Car V2H
|
||||
- renesas,dmac-r8a7793 # R-Car M2-N
|
||||
- renesas,dmac-r8a7794 # R-Car E2
|
||||
- renesas,dmac-r8a7795 # R-Car H3
|
||||
- renesas,dmac-r8a7796 # R-Car M3-W
|
||||
- renesas,dmac-r8a77961 # R-Car M3-W+
|
||||
- renesas,dmac-r8a77965 # R-Car M3-N
|
||||
- renesas,dmac-r8a77970 # R-Car V3M
|
||||
- renesas,dmac-r8a77980 # R-Car V3H
|
||||
- renesas,dmac-r8a77990 # R-Car E3
|
||||
- renesas,dmac-r8a77995 # R-Car D3
|
||||
- const: renesas,rcar-dmac
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,dmac-r8a7742 # RZ/G1H
|
||||
- renesas,dmac-r8a7743 # RZ/G1M
|
||||
- renesas,dmac-r8a7744 # RZ/G1N
|
||||
- renesas,dmac-r8a7745 # RZ/G1E
|
||||
- renesas,dmac-r8a77470 # RZ/G1C
|
||||
- renesas,dmac-r8a774a1 # RZ/G2M
|
||||
- renesas,dmac-r8a774b1 # RZ/G2N
|
||||
- renesas,dmac-r8a774c0 # RZ/G2E
|
||||
- renesas,dmac-r8a774e1 # RZ/G2H
|
||||
- renesas,dmac-r8a7790 # R-Car H2
|
||||
- renesas,dmac-r8a7791 # R-Car M2-W
|
||||
- renesas,dmac-r8a7792 # R-Car V2H
|
||||
- renesas,dmac-r8a7793 # R-Car M2-N
|
||||
- renesas,dmac-r8a7794 # R-Car E2
|
||||
- renesas,dmac-r8a7795 # R-Car H3
|
||||
- renesas,dmac-r8a7796 # R-Car M3-W
|
||||
- renesas,dmac-r8a77961 # R-Car M3-W+
|
||||
- renesas,dmac-r8a77965 # R-Car M3-N
|
||||
- renesas,dmac-r8a77970 # R-Car V3M
|
||||
- renesas,dmac-r8a77980 # R-Car V3H
|
||||
- renesas,dmac-r8a77990 # R-Car E3
|
||||
- renesas,dmac-r8a77995 # R-Car D3
|
||||
- const: renesas,rcar-dmac
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
- items:
|
||||
- const: renesas,dmac-r8a779a0 # R-Car V3U
|
||||
|
||||
reg: true
|
||||
|
||||
interrupts:
|
||||
minItems: 9
|
||||
|
@ -110,6 +113,23 @@ required:
|
|||
- power-domains
|
||||
- resets
|
||||
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,dmac-r8a779a0
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- description: Base register block
|
||||
- description: Channel register block
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
* CSR SiRFSoC DMA controller
|
||||
|
||||
See dma.txt first
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "sirf,prima2-dmac", "sirf,atlas7-dmac" or
|
||||
"sirf,atlas7-dmac-v2"
|
||||
- reg: Should contain DMA registers location and length.
|
||||
- interrupts: Should contain one interrupt shared by all channel
|
||||
- #dma-cells: must be <1>. used to represent the number of integer
|
||||
cells in the dmas property of client device.
|
||||
- clocks: clock required
|
||||
|
||||
Example:
|
||||
|
||||
Controller:
|
||||
dmac0: dma-controller@b00b0000 {
|
||||
compatible = "sirf,prima2-dmac";
|
||||
reg = <0xb00b0000 0x10000>;
|
||||
interrupts = <12>;
|
||||
clocks = <&clks 24>;
|
||||
#dma-cells = <1>;
|
||||
};
|
||||
|
||||
|
||||
Client:
|
||||
Fill the specific dma request line in dmas. In the below example, spi0 read
|
||||
channel request line is 9 of the 2nd dma controller, while write channel uses
|
||||
4 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st
|
||||
dma controller, while write channel uses 13 of the 1st dma controller:
|
||||
|
||||
spi0: spi@b00d0000 {
|
||||
compatible = "sirf,prima2-spi";
|
||||
dmas = <&dmac1 9>,
|
||||
<&dmac1 4>;
|
||||
dma-names = "rx", "tx";
|
||||
};
|
||||
|
||||
spi1: spi@b0170000 {
|
||||
compatible = "sirf,prima2-spi";
|
||||
dmas = <&dmac0 12>,
|
||||
<&dmac0 13>;
|
||||
dma-names = "rx", "tx";
|
||||
};
|
|
@ -1,39 +0,0 @@
|
|||
Synopsys DesignWare AXI DMA Controller
|
||||
|
||||
Required properties:
|
||||
- compatible: "snps,axi-dma-1.01a"
|
||||
- reg: Address range of the DMAC registers. This should include
|
||||
all of the per-channel registers.
|
||||
- interrupt: Should contain the DMAC interrupt number.
|
||||
- dma-channels: Number of channels supported by hardware.
|
||||
- snps,dma-masters: Number of AXI masters supported by the hardware.
|
||||
- snps,data-width: Maximum AXI data width supported by hardware.
|
||||
(0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits)
|
||||
- snps,priority: Priority of channel. Array size is equal to the number of
|
||||
dma-channels. Priority value must be programmed within [0:dma-channels-1]
|
||||
range. (0 - minimum priority)
|
||||
- snps,block-size: Maximum block size supported by the controller channel.
|
||||
Array size is equal to the number of dma-channels.
|
||||
|
||||
Optional properties:
|
||||
- snps,axi-max-burst-len: Restrict master AXI burst length by value specified
|
||||
in this property. If this property is missing the maximum AXI burst length
|
||||
supported by DMAC is used. [1:256]
|
||||
|
||||
Example:
|
||||
|
||||
dmac: dma-controller@80000 {
|
||||
compatible = "snps,axi-dma-1.01a";
|
||||
reg = <0x80000 0x400>;
|
||||
clocks = <&core_clk>, <&cfgr_clk>;
|
||||
clock-names = "core-clk", "cfgr-clk";
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <27>;
|
||||
|
||||
dma-channels = <4>;
|
||||
snps,dma-masters = <2>;
|
||||
snps,data-width = <3>;
|
||||
snps,block-size = <4096 4096 4096 4096>;
|
||||
snps,priority = <0 1 2 3>;
|
||||
snps,axi-max-burst-len = <16>;
|
||||
};
|
|
@ -0,0 +1,126 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/dma/snps,dw-axi-dmac.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Synopsys DesignWare AXI DMA Controller
|
||||
|
||||
maintainers:
|
||||
- Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
|
||||
- Jee Heng Sia <jee.heng.sia@intel.com>
|
||||
|
||||
description:
|
||||
Synopsys DesignWare AXI DMA Controller DT Binding
|
||||
|
||||
allOf:
|
||||
- $ref: "dma-controller.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- snps,axi-dma-1.01a
|
||||
- intel,kmb-axi-dma
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: Address range of the DMAC registers
|
||||
- description: Address range of the DMAC APB registers
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: axidma_ctrl_regs
|
||||
- const: axidma_apb_regs
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Bus Clock
|
||||
- description: Module Clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: core-clk
|
||||
- const: cfgr-clk
|
||||
|
||||
'#dma-cells':
|
||||
const: 1
|
||||
|
||||
dma-channels:
|
||||
minimum: 1
|
||||
maximum: 8
|
||||
|
||||
snps,dma-masters:
|
||||
description: |
|
||||
Number of AXI masters supported by the hardware.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [1, 2]
|
||||
|
||||
snps,data-width:
|
||||
description: |
|
||||
AXI data width supported by hardware.
|
||||
(0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits)
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [0, 1, 2, 3, 4, 5, 6]
|
||||
|
||||
snps,priority:
|
||||
description: |
|
||||
Channel priority specifier associated with the DMA channels.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
snps,block-size:
|
||||
description: |
|
||||
Channel block size specifier associated with the DMA channels.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
snps,axi-max-burst-len:
|
||||
description: |
|
||||
Restrict master AXI burst length by value specified in this property.
|
||||
If this property is missing the maximum AXI burst length supported by
|
||||
DMAC is used.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
minimum: 1
|
||||
maximum: 256
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- interrupts
|
||||
- '#dma-cells'
|
||||
- dma-channels
|
||||
- snps,dma-masters
|
||||
- snps,data-width
|
||||
- snps,priority
|
||||
- snps,block-size
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
/* example with snps,dw-axi-dmac */
|
||||
dmac: dma-controller@80000 {
|
||||
compatible = "snps,axi-dma-1.01a";
|
||||
reg = <0x80000 0x400>;
|
||||
clocks = <&core_clk>, <&cfgr_clk>;
|
||||
clock-names = "core-clk", "cfgr-clk";
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <27>;
|
||||
#dma-cells = <1>;
|
||||
dma-channels = <4>;
|
||||
snps,dma-masters = <2>;
|
||||
snps,data-width = <3>;
|
||||
snps,block-size = <4096 4096 4096 4096>;
|
||||
snps,priority = <0 1 2 3>;
|
||||
snps,axi-max-burst-len = <16>;
|
||||
};
|
|
@ -1,32 +0,0 @@
|
|||
ST-Ericsson COH 901 318 DMA Controller
|
||||
|
||||
This is a DMA controller which has begun as a fork of the
|
||||
ARM PL08x PrimeCell VHDL code.
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "stericsson,coh901318"
|
||||
- reg: register locations and length
|
||||
- interrupts: the single DMA IRQ
|
||||
- #dma-cells: must be set to <1>, as the channels on the
|
||||
COH 901 318 are simple and identified by a single number
|
||||
- dma-channels: the number of DMA channels handled
|
||||
|
||||
Example:
|
||||
|
||||
dmac: dma-controller@c00020000 {
|
||||
compatible = "stericsson,coh901318";
|
||||
reg = <0xc0020000 0x1000>;
|
||||
interrupt-parent = <&vica>;
|
||||
interrupts = <2>;
|
||||
#dma-cells = <1>;
|
||||
dma-channels = <40>;
|
||||
};
|
||||
|
||||
Consumers example:
|
||||
|
||||
uart0: serial@c0013000 {
|
||||
compatible = "...";
|
||||
(...)
|
||||
dmas = <&dmac 17 &dmac 18>;
|
||||
dma-names = "tx", "rx";
|
||||
};
|
|
@ -1,38 +0,0 @@
|
|||
* ZTE ZX296702 DMA controller
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "zte,zx296702-dma"
|
||||
- reg: Should contain DMA registers location and length.
|
||||
- interrupts: Should contain one interrupt shared by all channel
|
||||
- #dma-cells: see dma.txt, should be 1, para number
|
||||
- dma-channels: physical channels supported
|
||||
- dma-requests: virtual channels supported, each virtual channel
|
||||
have specific request line
|
||||
- clocks: clock required
|
||||
|
||||
Example:
|
||||
|
||||
Controller:
|
||||
dma: dma-controller@09c00000{
|
||||
compatible = "zte,zx296702-dma";
|
||||
reg = <0x09c00000 0x1000>;
|
||||
clocks = <&topclk ZX296702_DMA_ACLK>;
|
||||
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#dma-cells = <1>;
|
||||
dma-channels = <24>;
|
||||
dma-requests = <24>;
|
||||
};
|
||||
|
||||
Client:
|
||||
Use specific request line passing from dmax
|
||||
For example, spdif0 tx channel request line is 4
|
||||
spdif0: spdif0@b004000 {
|
||||
#sound-dai-cells = <0>;
|
||||
compatible = "zte,zx296702-spdif";
|
||||
reg = <0x0b004000 0x1000>;
|
||||
clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
|
||||
clock-names = "tx";
|
||||
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dmas = <&dma 4>;
|
||||
dma-names = "tx";
|
||||
}
|
|
@ -2828,9 +2828,7 @@ S: Odd fixes
|
|||
W: http://sourceforge.net/projects/xscaleiop
|
||||
F: Documentation/crypto/async-tx-api.rst
|
||||
F: crypto/async_tx/
|
||||
F: drivers/dma/
|
||||
F: include/linux/async_tx.h
|
||||
F: include/linux/dmaengine.h
|
||||
|
||||
AT24 EEPROM DRIVER
|
||||
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
||||
|
@ -5271,6 +5269,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
|
|||
F: Documentation/devicetree/bindings/dma/
|
||||
F: Documentation/driver-api/dmaengine/
|
||||
F: drivers/dma/
|
||||
F: include/linux/dma/
|
||||
F: include/linux/dmaengine.h
|
||||
F: include/linux/of_dma.h
|
||||
|
||||
|
@ -11614,7 +11613,6 @@ F: drivers/dma/at_hdmac.c
|
|||
F: drivers/dma/at_hdmac_regs.h
|
||||
F: drivers/dma/at_xdmac.c
|
||||
F: include/dt-bindings/dma/at91.h
|
||||
F: include/linux/platform_data/dma-atmel.h
|
||||
|
||||
MICROCHIP AT91 SERIAL DRIVER
|
||||
M: Richard Genoud <richard.genoud@gmail.com>
|
||||
|
|
|
@ -124,13 +124,6 @@ config BCM_SBA_RAID
|
|||
has the capability to offload memcpy, xor and pq computation
|
||||
for raid5/6.
|
||||
|
||||
config COH901318
|
||||
bool "ST-Ericsson COH901318 DMA support"
|
||||
select DMA_ENGINE
|
||||
depends on ARCH_U300 || COMPILE_TEST
|
||||
help
|
||||
Enable support for ST-Ericsson COH 901 318 DMA.
|
||||
|
||||
config DMA_BCM2835
|
||||
tristate "BCM2835 DMA engine support"
|
||||
depends on ARCH_BCM2835
|
||||
|
@ -179,6 +172,7 @@ config DMA_SUN6I
|
|||
config DW_AXI_DMAC
|
||||
tristate "Synopsys DesignWare AXI DMA support"
|
||||
depends on OF || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -378,14 +372,14 @@ config MILBEAUT_XDMAC
|
|||
XDMAC device.
|
||||
|
||||
config MMP_PDMA
|
||||
bool "MMP PDMA support"
|
||||
tristate "MMP PDMA support"
|
||||
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support the MMP PDMA engine for PXA and MMP platform.
|
||||
|
||||
config MMP_TDMA
|
||||
bool "MMP Two-Channel DMA support"
|
||||
tristate "MMP Two-Channel DMA support"
|
||||
depends on ARCH_MMP || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
select GENERIC_ALLOCATOR
|
||||
|
@ -519,13 +513,6 @@ config PLX_DMA
|
|||
These are exposed via extra functions on the switch's
|
||||
upstream port. Each function exposes one DMA channel.
|
||||
|
||||
config SIRF_DMA
|
||||
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
|
||||
depends on ARCH_SIRF
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for the CSR SiRFprimaII DMA engine.
|
||||
|
||||
config STE_DMA40
|
||||
bool "ST-Ericsson DMA40 support"
|
||||
depends on ARCH_U8500
|
||||
|
@ -710,15 +697,6 @@ config XILINX_ZYNQMP_DPDMA
|
|||
driver provides the dmaengine required by the DisplayPort subsystem
|
||||
display driver.
|
||||
|
||||
config ZX_DMA
|
||||
tristate "ZTE ZX DMA support"
|
||||
depends on ARCH_ZX || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Support the DMA engine for ZTE ZX family platform devices.
|
||||
|
||||
|
||||
# driver files
|
||||
source "drivers/dma/bestcomm/Kconfig"
|
||||
|
||||
|
@ -740,6 +718,8 @@ source "drivers/dma/ti/Kconfig"
|
|||
|
||||
source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
|
||||
|
||||
source "drivers/dma/lgm/Kconfig"
|
||||
|
||||
# clients
|
||||
comment "DMA Clients"
|
||||
depends on DMA_ENGINE
|
||||
|
|
|
@ -20,7 +20,6 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
|
|||
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
|
||||
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
|
||||
obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
|
||||
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
|
||||
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
|
||||
obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
|
||||
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
|
||||
|
@ -65,7 +64,6 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
|
|||
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
|
||||
obj-$(CONFIG_RENESAS_DMA) += sh/
|
||||
obj-$(CONFIG_SF_PDMA) += sf-pdma/
|
||||
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
|
||||
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
|
||||
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
|
||||
obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
|
||||
|
@ -79,9 +77,9 @@ obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
|||
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
|
||||
obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
|
||||
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
|
||||
obj-$(CONFIG_ZX_DMA) += zx_dma.o
|
||||
obj-$(CONFIG_ST_FDMA) += st_fdma.o
|
||||
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
|
||||
obj-$(CONFIG_INTEL_LDMA) += lgm/
|
||||
|
||||
obj-y += mediatek/
|
||||
obj-y += qcom/
|
||||
|
|
|
@ -54,6 +54,25 @@ module_param(init_nr_desc_per_channel, uint, 0644);
|
|||
MODULE_PARM_DESC(init_nr_desc_per_channel,
|
||||
"initial descriptors per channel (default: 64)");
|
||||
|
||||
/**
|
||||
* struct at_dma_platform_data - Controller configuration parameters
|
||||
* @nr_channels: Number of channels supported by hardware (max 8)
|
||||
* @cap_mask: dma_capability flags supported by the platform
|
||||
*/
|
||||
struct at_dma_platform_data {
|
||||
unsigned int nr_channels;
|
||||
dma_cap_mask_t cap_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct at_dma_slave - Controller-specific information about a slave
|
||||
* @dma_dev: required DMA master device
|
||||
* @cfg: Platform-specific initializer for the CFG register
|
||||
*/
|
||||
struct at_dma_slave {
|
||||
struct device *dma_dev;
|
||||
u32 cfg;
|
||||
};
|
||||
|
||||
/* prototypes */
|
||||
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
#ifndef AT_HDMAC_REGS_H
|
||||
#define AT_HDMAC_REGS_H
|
||||
|
||||
#include <linux/platform_data/dma-atmel.h>
|
||||
|
||||
#define AT_DMA_MAX_NR_CHANNELS 8
|
||||
|
||||
|
||||
|
@ -148,7 +146,31 @@
|
|||
#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
|
||||
|
||||
/* Bitfields in CFG */
|
||||
/* are in at_hdmac.h */
|
||||
#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
|
||||
|
||||
#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
|
||||
#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
|
||||
#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
|
||||
#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
|
||||
#define ATC_SRC_H2SEL_SW (0x0 << 9)
|
||||
#define ATC_SRC_H2SEL_HW (0x1 << 9)
|
||||
#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
|
||||
#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
|
||||
#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
|
||||
#define ATC_DST_H2SEL_SW (0x0 << 13)
|
||||
#define ATC_DST_H2SEL_HW (0x1 << 13)
|
||||
#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
|
||||
#define ATC_SOD (0x1 << 16) /* Stop On Done */
|
||||
#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
|
||||
#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
|
||||
#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
|
||||
#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
|
||||
#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
|
||||
#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
|
||||
#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
|
||||
#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
|
||||
#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
|
||||
#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
|
||||
|
||||
/* Bitfields in SPIP */
|
||||
#define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,141 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2007-2013 ST-Ericsson
|
||||
* DMA driver for COH 901 318
|
||||
* Author: Per Friden <per.friden@stericsson.com>
|
||||
*/
|
||||
|
||||
#ifndef COH901318_H
|
||||
#define COH901318_H
|
||||
|
||||
#define MAX_DMA_PACKET_SIZE_SHIFT 11
|
||||
#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
|
||||
|
||||
struct device;
|
||||
|
||||
struct coh901318_pool {
|
||||
spinlock_t lock;
|
||||
struct dma_pool *dmapool;
|
||||
struct device *dev;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int debugfs_pool_counter;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct coh901318_lli - linked list item for DMAC
|
||||
* @control: control settings for DMAC
|
||||
* @src_addr: transfer source address
|
||||
* @dst_addr: transfer destination address
|
||||
* @link_addr: physical address to next lli
|
||||
* @virt_link_addr: virtual address of next lli (only used by pool_free)
|
||||
* @phy_this: physical address of current lli (only used by pool_free)
|
||||
*/
|
||||
struct coh901318_lli {
|
||||
u32 control;
|
||||
dma_addr_t src_addr;
|
||||
dma_addr_t dst_addr;
|
||||
dma_addr_t link_addr;
|
||||
|
||||
void *virt_link_addr;
|
||||
dma_addr_t phy_this;
|
||||
};
|
||||
|
||||
/**
|
||||
* coh901318_pool_create() - Creates an dma pool for lli:s
|
||||
* @pool: pool handle
|
||||
* @dev: dma device
|
||||
* @lli_nbr: number of lli:s in the pool
|
||||
* @algin: address alignemtn of lli:s
|
||||
* returns 0 on success otherwise none zero
|
||||
*/
|
||||
int coh901318_pool_create(struct coh901318_pool *pool,
|
||||
struct device *dev,
|
||||
size_t lli_nbr, size_t align);
|
||||
|
||||
/**
|
||||
* coh901318_pool_destroy() - Destroys the dma pool
|
||||
* @pool: pool handle
|
||||
* returns 0 on success otherwise none zero
|
||||
*/
|
||||
int coh901318_pool_destroy(struct coh901318_pool *pool);
|
||||
|
||||
/**
|
||||
* coh901318_lli_alloc() - Allocates a linked list
|
||||
*
|
||||
* @pool: pool handle
|
||||
* @len: length to list
|
||||
* return: none NULL if success otherwise NULL
|
||||
*/
|
||||
struct coh901318_lli *
|
||||
coh901318_lli_alloc(struct coh901318_pool *pool,
|
||||
unsigned int len);
|
||||
|
||||
/**
|
||||
* coh901318_lli_free() - Returns the linked list items to the pool
|
||||
* @pool: pool handle
|
||||
* @lli: reference to lli pointer to be freed
|
||||
*/
|
||||
void coh901318_lli_free(struct coh901318_pool *pool,
|
||||
struct coh901318_lli **lli);
|
||||
|
||||
/**
|
||||
* coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
|
||||
* @pool: pool handle
|
||||
* @lli: allocated lli
|
||||
* @src: src address
|
||||
* @size: transfer size
|
||||
* @dst: destination address
|
||||
* @ctrl_chained: ctrl for chained lli
|
||||
* @ctrl_last: ctrl for the last lli
|
||||
* returns number of CPU interrupts for the lli, negative on error.
|
||||
*/
|
||||
int
|
||||
coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
|
||||
struct coh901318_lli *lli,
|
||||
dma_addr_t src, unsigned int size,
|
||||
dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
|
||||
|
||||
/**
|
||||
* coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
|
||||
* @pool: pool handle
|
||||
* @lli: allocated lli
|
||||
* @buf: transfer buffer
|
||||
* @size: transfer size
|
||||
* @dev_addr: address of periphal
|
||||
* @ctrl_chained: ctrl for chained lli
|
||||
* @ctrl_last: ctrl for the last lli
|
||||
* @dir: direction of transfer (to or from device)
|
||||
* returns number of CPU interrupts for the lli, negative on error.
|
||||
*/
|
||||
int
|
||||
coh901318_lli_fill_single(struct coh901318_pool *pool,
|
||||
struct coh901318_lli *lli,
|
||||
dma_addr_t buf, unsigned int size,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
|
||||
enum dma_transfer_direction dir);
|
||||
|
||||
/**
|
||||
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
|
||||
* @pool: pool handle
|
||||
* @lli: allocated lli
|
||||
* @sg: scatter gather list
|
||||
* @nents: number of entries in sg
|
||||
* @dev_addr: address of periphal
|
||||
* @ctrl_chained: ctrl for chained lli
|
||||
* @ctrl: ctrl of middle lli
|
||||
* @ctrl_last: ctrl for the last lli
|
||||
* @dir: direction of transfer (to or from device)
|
||||
* @ctrl_irq_mask: ctrl mask for CPU interrupt
|
||||
* returns number of CPU interrupts for the lli, negative on error.
|
||||
*/
|
||||
int
|
||||
coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
||||
struct coh901318_lli *lli,
|
||||
struct scatterlist *sg, unsigned int nents,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained,
|
||||
u32 ctrl, u32 ctrl_last,
|
||||
enum dma_transfer_direction dir, u32 ctrl_irq_mask);
|
||||
|
||||
#endif /* COH901318_H */
|
|
@ -1,313 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* driver/dma/coh901318_lli.c
|
||||
*
|
||||
* Copyright (C) 2007-2009 ST-Ericsson
|
||||
* Support functions for handling lli for dma
|
||||
* Author: Per Friden <per.friden@stericsson.com>
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#include "coh901318.h"
|
||||
|
||||
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
|
||||
#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
|
||||
#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
|
||||
#else
|
||||
#define DEBUGFS_POOL_COUNTER_RESET(pool)
|
||||
#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
|
||||
#endif
|
||||
|
||||
static struct coh901318_lli *
|
||||
coh901318_lli_next(struct coh901318_lli *data)
|
||||
{
|
||||
if (data == NULL || data->link_addr == 0)
|
||||
return NULL;
|
||||
|
||||
return (struct coh901318_lli *) data->virt_link_addr;
|
||||
}
|
||||
|
||||
int coh901318_pool_create(struct coh901318_pool *pool,
|
||||
struct device *dev,
|
||||
size_t size, size_t align)
|
||||
{
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->dev = dev;
|
||||
pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
|
||||
|
||||
DEBUGFS_POOL_COUNTER_RESET(pool);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int coh901318_pool_destroy(struct coh901318_pool *pool)
|
||||
{
|
||||
|
||||
dma_pool_destroy(pool->dmapool);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct coh901318_lli *
|
||||
coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
|
||||
{
|
||||
int i;
|
||||
struct coh901318_lli *head;
|
||||
struct coh901318_lli *lli;
|
||||
struct coh901318_lli *lli_prev;
|
||||
dma_addr_t phy;
|
||||
|
||||
if (len == 0)
|
||||
return NULL;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
|
||||
|
||||
if (head == NULL)
|
||||
goto err;
|
||||
|
||||
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
|
||||
|
||||
lli = head;
|
||||
lli->phy_this = phy;
|
||||
lli->link_addr = 0x00000000;
|
||||
lli->virt_link_addr = NULL;
|
||||
|
||||
for (i = 1; i < len; i++) {
|
||||
lli_prev = lli;
|
||||
|
||||
lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
|
||||
|
||||
if (lli == NULL)
|
||||
goto err_clean_up;
|
||||
|
||||
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
|
||||
lli->phy_this = phy;
|
||||
lli->link_addr = 0x00000000;
|
||||
lli->virt_link_addr = NULL;
|
||||
|
||||
lli_prev->link_addr = phy;
|
||||
lli_prev->virt_link_addr = lli;
|
||||
}
|
||||
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return head;
|
||||
|
||||
err:
|
||||
spin_unlock(&pool->lock);
|
||||
return NULL;
|
||||
|
||||
err_clean_up:
|
||||
lli_prev->link_addr = 0x00000000U;
|
||||
spin_unlock(&pool->lock);
|
||||
coh901318_lli_free(pool, &head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void coh901318_lli_free(struct coh901318_pool *pool,
|
||||
struct coh901318_lli **lli)
|
||||
{
|
||||
struct coh901318_lli *l;
|
||||
struct coh901318_lli *next;
|
||||
|
||||
if (lli == NULL)
|
||||
return;
|
||||
|
||||
l = *lli;
|
||||
|
||||
if (l == NULL)
|
||||
return;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
while (l->link_addr) {
|
||||
next = l->virt_link_addr;
|
||||
dma_pool_free(pool->dmapool, l, l->phy_this);
|
||||
DEBUGFS_POOL_COUNTER_ADD(pool, -1);
|
||||
l = next;
|
||||
}
|
||||
dma_pool_free(pool->dmapool, l, l->phy_this);
|
||||
DEBUGFS_POOL_COUNTER_ADD(pool, -1);
|
||||
|
||||
spin_unlock(&pool->lock);
|
||||
*lli = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
|
||||
struct coh901318_lli *lli,
|
||||
dma_addr_t source, unsigned int size,
|
||||
dma_addr_t destination, u32 ctrl_chained,
|
||||
u32 ctrl_eom)
|
||||
{
|
||||
int s = size;
|
||||
dma_addr_t src = source;
|
||||
dma_addr_t dst = destination;
|
||||
|
||||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
while (lli->link_addr) {
|
||||
lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
|
||||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
s -= MAX_DMA_PACKET_SIZE;
|
||||
lli = coh901318_lli_next(lli);
|
||||
|
||||
src += MAX_DMA_PACKET_SIZE;
|
||||
dst += MAX_DMA_PACKET_SIZE;
|
||||
}
|
||||
|
||||
lli->control = ctrl_eom | s;
|
||||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
coh901318_lli_fill_single(struct coh901318_pool *pool,
|
||||
struct coh901318_lli *lli,
|
||||
dma_addr_t buf, unsigned int size,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
|
||||
enum dma_transfer_direction dir)
|
||||
{
|
||||
int s = size;
|
||||
dma_addr_t src;
|
||||
dma_addr_t dst;
|
||||
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
src = buf;
|
||||
dst = dev_addr;
|
||||
|
||||
} else if (dir == DMA_DEV_TO_MEM) {
|
||||
|
||||
src = dev_addr;
|
||||
dst = buf;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (lli->link_addr) {
|
||||
size_t block_size = MAX_DMA_PACKET_SIZE;
|
||||
lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
|
||||
|
||||
/* If we are on the next-to-final block and there will
|
||||
* be less than half a DMA packet left for the last
|
||||
* block, then we want to make this block a little
|
||||
* smaller to balance the sizes. This is meant to
|
||||
* avoid too small transfers if the buffer size is
|
||||
* (MAX_DMA_PACKET_SIZE*N + 1) */
|
||||
if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
|
||||
block_size = MAX_DMA_PACKET_SIZE/2;
|
||||
|
||||
s -= block_size;
|
||||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
lli = coh901318_lli_next(lli);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
src += block_size;
|
||||
else if (dir == DMA_DEV_TO_MEM)
|
||||
dst += block_size;
|
||||
}
|
||||
|
||||
lli->control = ctrl_eom | s;
|
||||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
||||
struct coh901318_lli *lli,
|
||||
struct scatterlist *sgl, unsigned int nents,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
|
||||
u32 ctrl_last,
|
||||
enum dma_transfer_direction dir, u32 ctrl_irq_mask)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
u32 ctrl_sg;
|
||||
dma_addr_t src = 0;
|
||||
dma_addr_t dst = 0;
|
||||
u32 bytes_to_transfer;
|
||||
u32 elem_size;
|
||||
|
||||
if (lli == NULL)
|
||||
goto err;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
dst = dev_addr;
|
||||
else if (dir == DMA_DEV_TO_MEM)
|
||||
src = dev_addr;
|
||||
else
|
||||
goto err;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
if (sg_is_chain(sg)) {
|
||||
/* sg continues to the next sg-element don't
|
||||
* send ctrl_finish until the last
|
||||
* sg-element in the chain
|
||||
*/
|
||||
ctrl_sg = ctrl_chained;
|
||||
} else if (i == nents - 1)
|
||||
ctrl_sg = ctrl_last;
|
||||
else
|
||||
ctrl_sg = ctrl ? ctrl : ctrl_last;
|
||||
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
/* increment source address */
|
||||
src = sg_dma_address(sg);
|
||||
else
|
||||
/* increment destination address */
|
||||
dst = sg_dma_address(sg);
|
||||
|
||||
bytes_to_transfer = sg_dma_len(sg);
|
||||
|
||||
while (bytes_to_transfer) {
|
||||
u32 val;
|
||||
|
||||
if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
|
||||
elem_size = MAX_DMA_PACKET_SIZE;
|
||||
val = ctrl_chained;
|
||||
} else {
|
||||
elem_size = bytes_to_transfer;
|
||||
val = ctrl_sg;
|
||||
}
|
||||
|
||||
lli->control = val | elem_size;
|
||||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
if (dir == DMA_DEV_TO_MEM)
|
||||
dst += elem_size;
|
||||
else
|
||||
src += elem_size;
|
||||
|
||||
BUG_ON(lli->link_addr & 3);
|
||||
|
||||
bytes_to_transfer -= elem_size;
|
||||
lli = coh901318_lli_next(lli);
|
||||
}
|
||||
|
||||
}
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
spin_unlock(&pool->lock);
|
||||
return -EINVAL;
|
||||
}
|
|
@ -1004,6 +1004,18 @@ static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
|
|||
JZ_SOC_DATA_BREAK_LINKS,
|
||||
};
|
||||
|
||||
static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
|
||||
.nb_channels = 5,
|
||||
.transfer_ord_max = 6,
|
||||
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
||||
};
|
||||
|
||||
static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
|
||||
.nb_channels = 5,
|
||||
.transfer_ord_max = 6,
|
||||
.flags = JZ_SOC_DATA_PER_CHAN_PM,
|
||||
};
|
||||
|
||||
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
|
||||
.nb_channels = 6,
|
||||
.transfer_ord_max = 6,
|
||||
|
@ -1031,6 +1043,8 @@ static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
|
|||
static const struct of_device_id jz4780_dma_dt_match[] = {
|
||||
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
|
||||
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
|
||||
{ .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
|
||||
{ .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
|
||||
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
|
||||
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
|
||||
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
|
||||
|
|
|
@ -12,15 +12,20 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "dw-axi-dmac.h"
|
||||
|
@ -195,43 +200,56 @@ static inline const char *axi_chan_name(struct axi_dma_chan *chan)
|
|||
return dma_chan_name(&chan->vc.chan);
|
||||
}
|
||||
|
||||
static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
|
||||
static struct axi_dma_desc *axi_desc_alloc(u32 num)
|
||||
{
|
||||
struct dw_axi_dma *dw = chan->chip->dw;
|
||||
struct axi_dma_desc *desc;
|
||||
|
||||
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
|
||||
if (!desc->hw_desc) {
|
||||
kfree(desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
|
||||
dma_addr_t *addr)
|
||||
{
|
||||
struct axi_dma_lli *lli;
|
||||
dma_addr_t phys;
|
||||
|
||||
desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
|
||||
if (unlikely(!desc)) {
|
||||
lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
|
||||
if (unlikely(!lli)) {
|
||||
dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
|
||||
axi_chan_name(chan));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_inc(&chan->descs_allocated);
|
||||
INIT_LIST_HEAD(&desc->xfer_list);
|
||||
desc->vd.tx.phys = phys;
|
||||
desc->chan = chan;
|
||||
*addr = phys;
|
||||
|
||||
return desc;
|
||||
return lli;
|
||||
}
|
||||
|
||||
static void axi_desc_put(struct axi_dma_desc *desc)
|
||||
{
|
||||
struct axi_dma_chan *chan = desc->chan;
|
||||
struct dw_axi_dma *dw = chan->chip->dw;
|
||||
struct axi_dma_desc *child, *_next;
|
||||
unsigned int descs_put = 0;
|
||||
int count = atomic_read(&chan->descs_allocated);
|
||||
struct axi_dma_hw_desc *hw_desc;
|
||||
int descs_put;
|
||||
|
||||
list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
|
||||
list_del(&child->xfer_list);
|
||||
dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
|
||||
descs_put++;
|
||||
for (descs_put = 0; descs_put < count; descs_put++) {
|
||||
hw_desc = &desc->hw_desc[descs_put];
|
||||
dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
|
||||
}
|
||||
|
||||
dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
|
||||
descs_put++;
|
||||
|
||||
kfree(desc->hw_desc);
|
||||
kfree(desc);
|
||||
atomic_sub(descs_put, &chan->descs_allocated);
|
||||
dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
|
||||
axi_chan_name(chan), descs_put,
|
||||
|
@ -248,19 +266,41 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
|
|||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
enum dma_status ret;
|
||||
struct virt_dma_desc *vdesc;
|
||||
enum dma_status status;
|
||||
u32 completed_length;
|
||||
unsigned long flags;
|
||||
u32 completed_blocks;
|
||||
size_t bytes = 0;
|
||||
u32 length;
|
||||
u32 len;
|
||||
|
||||
ret = dma_cookie_status(dchan, cookie, txstate);
|
||||
status = dma_cookie_status(dchan, cookie, txstate);
|
||||
if (status == DMA_COMPLETE || !txstate)
|
||||
return status;
|
||||
|
||||
if (chan->is_paused && ret == DMA_IN_PROGRESS)
|
||||
ret = DMA_PAUSED;
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
|
||||
return ret;
|
||||
vdesc = vchan_find_desc(&chan->vc, cookie);
|
||||
if (vdesc) {
|
||||
length = vd_to_axi_desc(vdesc)->length;
|
||||
completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
|
||||
len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
|
||||
completed_length = completed_blocks * len;
|
||||
bytes = length - completed_length;
|
||||
} else {
|
||||
bytes = vd_to_axi_desc(vdesc)->length;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
dma_set_residue(txstate, bytes);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
|
||||
static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
|
||||
{
|
||||
desc->lli.llp = cpu_to_le64(adr);
|
||||
desc->lli->llp = cpu_to_le64(adr);
|
||||
}
|
||||
|
||||
static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
|
||||
|
@ -268,6 +308,29 @@ static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
|
|||
axi_chan_iowrite64(chan, CH_LLP, adr);
|
||||
}
|
||||
|
||||
static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
|
||||
{
|
||||
u32 offset = DMAC_APB_BYTE_WR_CH_EN;
|
||||
u32 reg_width, val;
|
||||
|
||||
if (!chan->chip->apb_regs) {
|
||||
dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
|
||||
return;
|
||||
}
|
||||
|
||||
reg_width = __ffs(chan->config.dst_addr_width);
|
||||
if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
|
||||
offset = DMAC_APB_HALFWORD_WR_CH_EN;
|
||||
|
||||
val = ioread32(chan->chip->apb_regs + offset);
|
||||
|
||||
if (set)
|
||||
val |= BIT(chan->id);
|
||||
else
|
||||
val &= ~BIT(chan->id);
|
||||
|
||||
iowrite32(val, chan->chip->apb_regs + offset);
|
||||
}
|
||||
/* Called in chan locked context */
|
||||
static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
|
||||
struct axi_dma_desc *first)
|
||||
|
@ -293,9 +356,26 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
|
|||
priority << CH_CFG_H_PRIORITY_POS |
|
||||
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
|
||||
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
|
||||
switch (chan->direction) {
|
||||
case DMA_MEM_TO_DEV:
|
||||
dw_axi_dma_set_byte_halfword(chan, true);
|
||||
reg |= (chan->config.device_fc ?
|
||||
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
|
||||
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
|
||||
<< CH_CFG_H_TT_FC_POS;
|
||||
break;
|
||||
case DMA_DEV_TO_MEM:
|
||||
reg |= (chan->config.device_fc ?
|
||||
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
|
||||
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
|
||||
<< CH_CFG_H_TT_FC_POS;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
axi_chan_iowrite32(chan, CH_CFG_H, reg);
|
||||
|
||||
write_chan_llp(chan, first->vd.tx.phys | lms);
|
||||
write_chan_llp(chan, first->hw_desc[0].llp | lms);
|
||||
|
||||
irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
|
||||
axi_chan_irq_sig_set(chan, irq_mask);
|
||||
|
@ -333,6 +413,13 @@ static void dma_chan_issue_pending(struct dma_chan *dchan)
|
|||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
}
|
||||
|
||||
static void dw_axi_dma_synchronize(struct dma_chan *dchan)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
|
||||
vchan_synchronize(&chan->vc);
|
||||
}
|
||||
|
||||
static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
|
@ -344,6 +431,15 @@ static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* LLI address must be aligned to a 64-byte boundary */
|
||||
chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
|
||||
chan->chip->dev,
|
||||
sizeof(struct axi_dma_lli),
|
||||
64, 0);
|
||||
if (!chan->desc_pool) {
|
||||
dev_err(chan2dev(chan), "No memory for descriptors\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
|
||||
|
||||
pm_runtime_get(chan->chip->dev);
|
||||
|
@ -365,6 +461,8 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
|
|||
|
||||
vchan_free_chan_resources(&chan->vc);
|
||||
|
||||
dma_pool_destroy(chan->desc_pool);
|
||||
chan->desc_pool = NULL;
|
||||
dev_vdbg(dchan2dev(dchan),
|
||||
"%s: free resources, descriptor still allocated: %u\n",
|
||||
axi_chan_name(chan), atomic_read(&chan->descs_allocated));
|
||||
|
@ -372,73 +470,398 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
|
|||
pm_runtime_put(chan->chip->dev);
|
||||
}
|
||||
|
||||
static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
|
||||
u32 handshake_num, bool set)
|
||||
{
|
||||
unsigned long start = 0;
|
||||
unsigned long reg_value;
|
||||
unsigned long reg_mask;
|
||||
unsigned long reg_set;
|
||||
unsigned long mask;
|
||||
unsigned long val;
|
||||
|
||||
if (!chip->apb_regs) {
|
||||
dev_dbg(chip->dev, "apb_regs not initialized\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* An unused DMA channel has a default value of 0x3F.
|
||||
* Lock the DMA channel by assign a handshake number to the channel.
|
||||
* Unlock the DMA channel by assign 0x3F to the channel.
|
||||
*/
|
||||
if (set) {
|
||||
reg_set = UNUSED_CHANNEL;
|
||||
val = handshake_num;
|
||||
} else {
|
||||
reg_set = handshake_num;
|
||||
val = UNUSED_CHANNEL;
|
||||
}
|
||||
|
||||
reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
|
||||
|
||||
for_each_set_clump8(start, reg_mask, ®_value, 64) {
|
||||
if (reg_mask == reg_set) {
|
||||
mask = GENMASK_ULL(start + 7, start);
|
||||
reg_value &= ~mask;
|
||||
reg_value |= rol64(val, start);
|
||||
lo_hi_writeq(reg_value,
|
||||
chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
|
||||
* as 1, it understands that the current block is the final block in the
|
||||
* transfer and completes the DMA transfer operation at the end of current
|
||||
* block transfer.
|
||||
*/
|
||||
static void set_desc_last(struct axi_dma_desc *desc)
|
||||
static void set_desc_last(struct axi_dma_hw_desc *desc)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = le32_to_cpu(desc->lli.ctl_hi);
|
||||
val = le32_to_cpu(desc->lli->ctl_hi);
|
||||
val |= CH_CTL_H_LLI_LAST;
|
||||
desc->lli.ctl_hi = cpu_to_le32(val);
|
||||
desc->lli->ctl_hi = cpu_to_le32(val);
|
||||
}
|
||||
|
||||
static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
|
||||
static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
|
||||
{
|
||||
desc->lli.sar = cpu_to_le64(adr);
|
||||
desc->lli->sar = cpu_to_le64(adr);
|
||||
}
|
||||
|
||||
static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
|
||||
static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
|
||||
{
|
||||
desc->lli.dar = cpu_to_le64(adr);
|
||||
desc->lli->dar = cpu_to_le64(adr);
|
||||
}
|
||||
|
||||
static void set_desc_src_master(struct axi_dma_desc *desc)
|
||||
static void set_desc_src_master(struct axi_dma_hw_desc *desc)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Select AXI0 for source master */
|
||||
val = le32_to_cpu(desc->lli.ctl_lo);
|
||||
val = le32_to_cpu(desc->lli->ctl_lo);
|
||||
val &= ~CH_CTL_L_SRC_MAST;
|
||||
desc->lli.ctl_lo = cpu_to_le32(val);
|
||||
desc->lli->ctl_lo = cpu_to_le32(val);
|
||||
}
|
||||
|
||||
static void set_desc_dest_master(struct axi_dma_desc *desc)
|
||||
static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
|
||||
struct axi_dma_desc *desc)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Select AXI1 for source master if available */
|
||||
val = le32_to_cpu(desc->lli.ctl_lo);
|
||||
val = le32_to_cpu(hw_desc->lli->ctl_lo);
|
||||
if (desc->chan->chip->dw->hdata->nr_masters > 1)
|
||||
val |= CH_CTL_L_DST_MAST;
|
||||
else
|
||||
val &= ~CH_CTL_L_DST_MAST;
|
||||
|
||||
desc->lli.ctl_lo = cpu_to_le32(val);
|
||||
hw_desc->lli->ctl_lo = cpu_to_le32(val);
|
||||
}
|
||||
|
||||
static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
|
||||
struct axi_dma_hw_desc *hw_desc,
|
||||
dma_addr_t mem_addr, size_t len)
|
||||
{
|
||||
unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
|
||||
unsigned int reg_width;
|
||||
unsigned int mem_width;
|
||||
dma_addr_t device_addr;
|
||||
size_t axi_block_ts;
|
||||
size_t block_ts;
|
||||
u32 ctllo, ctlhi;
|
||||
u32 burst_len;
|
||||
|
||||
axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
|
||||
|
||||
mem_width = __ffs(data_width | mem_addr | len);
|
||||
if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
|
||||
mem_width = DWAXIDMAC_TRANS_WIDTH_32;
|
||||
|
||||
if (!IS_ALIGNED(mem_addr, 4)) {
|
||||
dev_err(chan->chip->dev, "invalid buffer alignment\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (chan->direction) {
|
||||
case DMA_MEM_TO_DEV:
|
||||
reg_width = __ffs(chan->config.dst_addr_width);
|
||||
device_addr = chan->config.dst_addr;
|
||||
ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
|
||||
mem_width << CH_CTL_L_SRC_WIDTH_POS |
|
||||
DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
|
||||
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
|
||||
block_ts = len >> mem_width;
|
||||
break;
|
||||
case DMA_DEV_TO_MEM:
|
||||
reg_width = __ffs(chan->config.src_addr_width);
|
||||
device_addr = chan->config.src_addr;
|
||||
ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
|
||||
mem_width << CH_CTL_L_DST_WIDTH_POS |
|
||||
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
|
||||
DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
|
||||
block_ts = len >> reg_width;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (block_ts > axi_block_ts)
|
||||
return -EINVAL;
|
||||
|
||||
hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
|
||||
if (unlikely(!hw_desc->lli))
|
||||
return -ENOMEM;
|
||||
|
||||
ctlhi = CH_CTL_H_LLI_VALID;
|
||||
|
||||
if (chan->chip->dw->hdata->restrict_axi_burst_len) {
|
||||
burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
|
||||
ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
|
||||
burst_len << CH_CTL_H_ARLEN_POS |
|
||||
burst_len << CH_CTL_H_AWLEN_POS;
|
||||
}
|
||||
|
||||
hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
|
||||
|
||||
if (chan->direction == DMA_MEM_TO_DEV) {
|
||||
write_desc_sar(hw_desc, mem_addr);
|
||||
write_desc_dar(hw_desc, device_addr);
|
||||
} else {
|
||||
write_desc_sar(hw_desc, device_addr);
|
||||
write_desc_dar(hw_desc, mem_addr);
|
||||
}
|
||||
|
||||
hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
|
||||
|
||||
ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
|
||||
DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
|
||||
hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
|
||||
|
||||
set_desc_src_master(hw_desc);
|
||||
|
||||
hw_desc->len = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t calculate_block_len(struct axi_dma_chan *chan,
|
||||
dma_addr_t dma_addr, size_t buf_len,
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
u32 data_width, reg_width, mem_width;
|
||||
size_t axi_block_ts, block_len;
|
||||
|
||||
axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
|
||||
|
||||
switch (direction) {
|
||||
case DMA_MEM_TO_DEV:
|
||||
data_width = BIT(chan->chip->dw->hdata->m_data_width);
|
||||
mem_width = __ffs(data_width | dma_addr | buf_len);
|
||||
if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
|
||||
mem_width = DWAXIDMAC_TRANS_WIDTH_32;
|
||||
|
||||
block_len = axi_block_ts << mem_width;
|
||||
break;
|
||||
case DMA_DEV_TO_MEM:
|
||||
reg_width = __ffs(chan->config.src_addr_width);
|
||||
block_len = axi_block_ts << reg_width;
|
||||
break;
|
||||
default:
|
||||
block_len = 0;
|
||||
}
|
||||
|
||||
return block_len;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
|
||||
size_t buf_len, size_t period_len,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
struct axi_dma_hw_desc *hw_desc = NULL;
|
||||
struct axi_dma_desc *desc = NULL;
|
||||
dma_addr_t src_addr = dma_addr;
|
||||
u32 num_periods, num_segments;
|
||||
size_t axi_block_len;
|
||||
u32 total_segments;
|
||||
u32 segment_len;
|
||||
unsigned int i;
|
||||
int status;
|
||||
u64 llp = 0;
|
||||
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
||||
|
||||
num_periods = buf_len / period_len;
|
||||
|
||||
axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
|
||||
if (axi_block_len == 0)
|
||||
return NULL;
|
||||
|
||||
num_segments = DIV_ROUND_UP(period_len, axi_block_len);
|
||||
segment_len = DIV_ROUND_UP(period_len, num_segments);
|
||||
|
||||
total_segments = num_periods * num_segments;
|
||||
|
||||
desc = axi_desc_alloc(total_segments);
|
||||
if (unlikely(!desc))
|
||||
goto err_desc_get;
|
||||
|
||||
chan->direction = direction;
|
||||
desc->chan = chan;
|
||||
chan->cyclic = true;
|
||||
desc->length = 0;
|
||||
desc->period_len = period_len;
|
||||
|
||||
for (i = 0; i < total_segments; i++) {
|
||||
hw_desc = &desc->hw_desc[i];
|
||||
|
||||
status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
|
||||
segment_len);
|
||||
if (status < 0)
|
||||
goto err_desc_get;
|
||||
|
||||
desc->length += hw_desc->len;
|
||||
/* Set end-of-link to the linked descriptor, so that cyclic
|
||||
* callback function can be triggered during interrupt.
|
||||
*/
|
||||
set_desc_last(hw_desc);
|
||||
|
||||
src_addr += segment_len;
|
||||
}
|
||||
|
||||
llp = desc->hw_desc[0].llp;
|
||||
|
||||
/* Managed transfer list */
|
||||
do {
|
||||
hw_desc = &desc->hw_desc[--total_segments];
|
||||
write_desc_llp(hw_desc, llp | lms);
|
||||
llp = hw_desc->llp;
|
||||
} while (total_segments);
|
||||
|
||||
dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
|
||||
|
||||
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
|
||||
|
||||
err_desc_get:
|
||||
if (desc)
|
||||
axi_desc_put(desc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
|
||||
unsigned int sg_len,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags, void *context)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
struct axi_dma_hw_desc *hw_desc = NULL;
|
||||
struct axi_dma_desc *desc = NULL;
|
||||
u32 num_segments, segment_len;
|
||||
unsigned int loop = 0;
|
||||
struct scatterlist *sg;
|
||||
size_t axi_block_len;
|
||||
u32 len, num_sgs = 0;
|
||||
unsigned int i;
|
||||
dma_addr_t mem;
|
||||
int status;
|
||||
u64 llp = 0;
|
||||
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
||||
|
||||
if (unlikely(!is_slave_direction(direction) || !sg_len))
|
||||
return NULL;
|
||||
|
||||
mem = sg_dma_address(sgl);
|
||||
len = sg_dma_len(sgl);
|
||||
|
||||
axi_block_len = calculate_block_len(chan, mem, len, direction);
|
||||
if (axi_block_len == 0)
|
||||
return NULL;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i)
|
||||
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
|
||||
|
||||
desc = axi_desc_alloc(num_sgs);
|
||||
if (unlikely(!desc))
|
||||
goto err_desc_get;
|
||||
|
||||
desc->chan = chan;
|
||||
desc->length = 0;
|
||||
chan->direction = direction;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
mem = sg_dma_address(sg);
|
||||
len = sg_dma_len(sg);
|
||||
num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
|
||||
segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
|
||||
|
||||
do {
|
||||
hw_desc = &desc->hw_desc[loop++];
|
||||
status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
|
||||
if (status < 0)
|
||||
goto err_desc_get;
|
||||
|
||||
desc->length += hw_desc->len;
|
||||
len -= segment_len;
|
||||
mem += segment_len;
|
||||
} while (len >= segment_len);
|
||||
}
|
||||
|
||||
/* Set end-of-link to the last link descriptor of list */
|
||||
set_desc_last(&desc->hw_desc[num_sgs - 1]);
|
||||
|
||||
/* Managed transfer list */
|
||||
do {
|
||||
hw_desc = &desc->hw_desc[--num_sgs];
|
||||
write_desc_llp(hw_desc, llp | lms);
|
||||
llp = hw_desc->llp;
|
||||
} while (num_sgs);
|
||||
|
||||
dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
|
||||
|
||||
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
|
||||
|
||||
err_desc_get:
|
||||
if (desc)
|
||||
axi_desc_put(desc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
|
||||
dma_addr_t src_adr, size_t len, unsigned long flags)
|
||||
{
|
||||
struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
size_t block_ts, max_block_ts, xfer_len;
|
||||
u32 xfer_width, reg;
|
||||
struct axi_dma_hw_desc *hw_desc = NULL;
|
||||
struct axi_dma_desc *desc = NULL;
|
||||
u32 xfer_width, reg, num;
|
||||
u64 llp = 0;
|
||||
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
|
||||
axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
|
||||
|
||||
max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
|
||||
xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
|
||||
num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
|
||||
desc = axi_desc_alloc(num);
|
||||
if (unlikely(!desc))
|
||||
goto err_desc_get;
|
||||
|
||||
desc->chan = chan;
|
||||
num = 0;
|
||||
desc->length = 0;
|
||||
while (len) {
|
||||
xfer_len = len;
|
||||
|
||||
hw_desc = &desc->hw_desc[num];
|
||||
/*
|
||||
* Take care for the alignment.
|
||||
* Actually source and destination widths can be different, but
|
||||
|
@ -457,13 +880,13 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
|
|||
xfer_len = max_block_ts << xfer_width;
|
||||
}
|
||||
|
||||
desc = axi_desc_get(chan);
|
||||
if (unlikely(!desc))
|
||||
hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
|
||||
if (unlikely(!hw_desc->lli))
|
||||
goto err_desc_get;
|
||||
|
||||
write_desc_sar(desc, src_adr);
|
||||
write_desc_dar(desc, dst_adr);
|
||||
desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
|
||||
write_desc_sar(hw_desc, src_adr);
|
||||
write_desc_dar(hw_desc, dst_adr);
|
||||
hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
|
||||
|
||||
reg = CH_CTL_H_LLI_VALID;
|
||||
if (chan->chip->dw->hdata->restrict_axi_burst_len) {
|
||||
|
@ -474,7 +897,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
|
|||
CH_CTL_H_AWLEN_EN |
|
||||
burst_len << CH_CTL_H_AWLEN_POS);
|
||||
}
|
||||
desc->lli.ctl_hi = cpu_to_le32(reg);
|
||||
hw_desc->lli->ctl_hi = cpu_to_le32(reg);
|
||||
|
||||
reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
|
||||
DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
|
||||
|
@ -482,62 +905,68 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
|
|||
xfer_width << CH_CTL_L_SRC_WIDTH_POS |
|
||||
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
|
||||
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
|
||||
desc->lli.ctl_lo = cpu_to_le32(reg);
|
||||
hw_desc->lli->ctl_lo = cpu_to_le32(reg);
|
||||
|
||||
set_desc_src_master(desc);
|
||||
set_desc_dest_master(desc);
|
||||
|
||||
/* Manage transfer list (xfer_list) */
|
||||
if (!first) {
|
||||
first = desc;
|
||||
} else {
|
||||
list_add_tail(&desc->xfer_list, &first->xfer_list);
|
||||
write_desc_llp(prev, desc->vd.tx.phys | lms);
|
||||
}
|
||||
prev = desc;
|
||||
set_desc_src_master(hw_desc);
|
||||
set_desc_dest_master(hw_desc, desc);
|
||||
|
||||
hw_desc->len = xfer_len;
|
||||
desc->length += hw_desc->len;
|
||||
/* update the length and addresses for the next loop cycle */
|
||||
len -= xfer_len;
|
||||
dst_adr += xfer_len;
|
||||
src_adr += xfer_len;
|
||||
num++;
|
||||
}
|
||||
|
||||
/* Total len of src/dest sg == 0, so no descriptor were allocated */
|
||||
if (unlikely(!first))
|
||||
return NULL;
|
||||
|
||||
/* Set end-of-link to the last link descriptor of list */
|
||||
set_desc_last(desc);
|
||||
set_desc_last(&desc->hw_desc[num - 1]);
|
||||
/* Managed transfer list */
|
||||
do {
|
||||
hw_desc = &desc->hw_desc[--num];
|
||||
write_desc_llp(hw_desc, llp | lms);
|
||||
llp = hw_desc->llp;
|
||||
} while (num);
|
||||
|
||||
return vchan_tx_prep(&chan->vc, &first->vd, flags);
|
||||
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
|
||||
|
||||
err_desc_get:
|
||||
if (first)
|
||||
axi_desc_put(first);
|
||||
if (desc)
|
||||
axi_desc_put(desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
|
||||
memcpy(&chan->config, config, sizeof(*config));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
|
||||
struct axi_dma_desc *desc)
|
||||
struct axi_dma_hw_desc *desc)
|
||||
{
|
||||
dev_err(dchan2dev(&chan->vc.chan),
|
||||
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
|
||||
le64_to_cpu(desc->lli.sar),
|
||||
le64_to_cpu(desc->lli.dar),
|
||||
le64_to_cpu(desc->lli.llp),
|
||||
le32_to_cpu(desc->lli.block_ts_lo),
|
||||
le32_to_cpu(desc->lli.ctl_hi),
|
||||
le32_to_cpu(desc->lli.ctl_lo));
|
||||
le64_to_cpu(desc->lli->sar),
|
||||
le64_to_cpu(desc->lli->dar),
|
||||
le64_to_cpu(desc->lli->llp),
|
||||
le32_to_cpu(desc->lli->block_ts_lo),
|
||||
le32_to_cpu(desc->lli->ctl_hi),
|
||||
le32_to_cpu(desc->lli->ctl_lo));
|
||||
}
|
||||
|
||||
static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
|
||||
struct axi_dma_desc *desc_head)
|
||||
{
|
||||
struct axi_dma_desc *desc;
|
||||
int count = atomic_read(&chan->descs_allocated);
|
||||
int i;
|
||||
|
||||
axi_chan_dump_lli(chan, desc_head);
|
||||
list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
|
||||
axi_chan_dump_lli(chan, desc);
|
||||
for (i = 0; i < count; i++)
|
||||
axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
|
||||
}
|
||||
|
||||
static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
|
||||
|
@ -570,8 +999,13 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
|
|||
|
||||
static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
|
||||
{
|
||||
int count = atomic_read(&chan->descs_allocated);
|
||||
struct axi_dma_hw_desc *hw_desc;
|
||||
struct axi_dma_desc *desc;
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
u64 llp;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
if (unlikely(axi_chan_is_hw_enable(chan))) {
|
||||
|
@ -582,12 +1016,34 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
|
|||
|
||||
/* The completed descriptor currently is in the head of vc list */
|
||||
vd = vchan_next_desc(&chan->vc);
|
||||
/* Remove the completed descriptor from issued list before completing */
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
|
||||
/* Submit queued descriptors after processing the completed ones */
|
||||
axi_chan_start_first_queued(chan);
|
||||
if (chan->cyclic) {
|
||||
desc = vd_to_axi_desc(vd);
|
||||
if (desc) {
|
||||
llp = lo_hi_readq(chan->chan_regs + CH_LLP);
|
||||
for (i = 0; i < count; i++) {
|
||||
hw_desc = &desc->hw_desc[i];
|
||||
if (hw_desc->llp == llp) {
|
||||
axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
|
||||
hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
|
||||
desc->completed_blocks = i;
|
||||
|
||||
if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
|
||||
vchan_cyclic_callback(vd);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
axi_chan_enable(chan);
|
||||
}
|
||||
} else {
|
||||
/* Remove the completed descriptor from issued list before completing */
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
|
||||
/* Submit queued descriptors after processing the completed ones */
|
||||
axi_chan_start_first_queued(chan);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
}
|
||||
|
@ -627,15 +1083,31 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
|
|||
static int dma_chan_terminate_all(struct dma_chan *dchan)
|
||||
{
|
||||
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
|
||||
u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
|
||||
axi_chan_disable(chan);
|
||||
|
||||
ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
|
||||
!(val & chan_active), 1000, 10000);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_warn(dchan2dev(dchan),
|
||||
"%s failed to stop\n", axi_chan_name(chan));
|
||||
|
||||
if (chan->direction != DMA_MEM_TO_MEM)
|
||||
dw_axi_dma_set_hw_channel(chan->chip,
|
||||
chan->hw_handshake_num, false);
|
||||
if (chan->direction == DMA_MEM_TO_DEV)
|
||||
dw_axi_dma_set_byte_halfword(chan, false);
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
|
||||
vchan_get_all_descriptors(&chan->vc, &head);
|
||||
|
||||
chan->cyclic = false;
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(&chan->vc, &head);
|
||||
|
@ -746,6 +1218,22 @@ static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
|
|||
return axi_dma_resume(chip);
|
||||
}
|
||||
|
||||
static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct dw_axi_dma *dw = ofdma->of_dma_data;
|
||||
struct axi_dma_chan *chan;
|
||||
struct dma_chan *dchan;
|
||||
|
||||
dchan = dma_get_any_slave_channel(&dw->dma);
|
||||
if (!dchan)
|
||||
return NULL;
|
||||
|
||||
chan = dchan_to_axi_dma_chan(dchan);
|
||||
chan->hw_handshake_num = dma_spec->args[0];
|
||||
return dchan;
|
||||
}
|
||||
|
||||
static int parse_device_properties(struct axi_dma_chip *chip)
|
||||
{
|
||||
struct device *dev = chip->dev;
|
||||
|
@ -816,6 +1304,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
|
|||
|
||||
static int dw_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct axi_dma_chip *chip;
|
||||
struct resource *mem;
|
||||
struct dw_axi_dma *dw;
|
||||
|
@ -848,6 +1337,12 @@ static int dw_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(chip->regs))
|
||||
return PTR_ERR(chip->regs);
|
||||
|
||||
if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
|
||||
chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(chip->apb_regs))
|
||||
return PTR_ERR(chip->apb_regs);
|
||||
}
|
||||
|
||||
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
|
||||
if (IS_ERR(chip->core_clk))
|
||||
return PTR_ERR(chip->core_clk);
|
||||
|
@ -870,13 +1365,6 @@ static int dw_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Lli address must be aligned to a 64-byte boundary */
|
||||
dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
|
||||
sizeof(struct axi_dma_desc), 64, 0);
|
||||
if (!dw->desc_pool) {
|
||||
dev_err(chip->dev, "No memory for descriptors dma pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&dw->dma.channels);
|
||||
for (i = 0; i < hdata->nr_channels; i++) {
|
||||
|
@ -893,13 +1381,16 @@ static int dw_probe(struct platform_device *pdev)
|
|||
|
||||
/* Set capabilities */
|
||||
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
|
||||
|
||||
/* DMA capabilities */
|
||||
dw->dma.chancnt = hdata->nr_channels;
|
||||
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
|
||||
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
|
||||
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
|
||||
dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||
dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
|
||||
dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
|
||||
dw->dma.dev = chip->dev;
|
||||
dw->dma.device_tx_status = dma_chan_tx_status;
|
||||
|
@ -912,7 +1403,18 @@ static int dw_probe(struct platform_device *pdev)
|
|||
dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
|
||||
|
||||
dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
|
||||
dw->dma.device_synchronize = dw_axi_dma_synchronize;
|
||||
dw->dma.device_config = dw_axi_dma_chan_slave_config;
|
||||
dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
|
||||
dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
|
||||
|
||||
/*
|
||||
* Synopsis DesignWare AxiDMA datasheet mentioned Maximum
|
||||
* supported blocks is 1024. Device register width is 4 bytes.
|
||||
* Therefore, set constraint to 1024 * 4.
|
||||
*/
|
||||
dw->dma.dev->dma_parms = &dw->dma_parms;
|
||||
dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
|
||||
platform_set_drvdata(pdev, chip);
|
||||
|
||||
pm_runtime_enable(chip->dev);
|
||||
|
@ -935,6 +1437,13 @@ static int dw_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto err_pm_disable;
|
||||
|
||||
/* Register with OF helpers for DMA lookups */
|
||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
||||
dw_axi_dma_of_xlate, dw);
|
||||
if (ret < 0)
|
||||
dev_warn(&pdev->dev,
|
||||
"Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
|
||||
|
||||
dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
|
||||
dw->hdata->nr_channels);
|
||||
|
||||
|
@ -968,6 +1477,8 @@ static int dw_remove(struct platform_device *pdev)
|
|||
|
||||
devm_free_irq(chip->dev, chip->irq, chip);
|
||||
|
||||
of_dma_controller_free(chip->dev->of_node);
|
||||
|
||||
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
|
||||
vc.chan.device_node) {
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
|
@ -983,6 +1494,7 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
|
|||
|
||||
static const struct of_device_id dw_dma_of_id_table[] = {
|
||||
{ .compatible = "snps,axi-dma-1.01a" },
|
||||
{ .compatible = "intel,kmb-axi-dma" },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
|
||||
|
|
|
@ -37,10 +37,16 @@ struct axi_dma_chan {
|
|||
struct axi_dma_chip *chip;
|
||||
void __iomem *chan_regs;
|
||||
u8 id;
|
||||
u8 hw_handshake_num;
|
||||
atomic_t descs_allocated;
|
||||
|
||||
struct dma_pool *desc_pool;
|
||||
struct virt_dma_chan vc;
|
||||
|
||||
struct axi_dma_desc *desc;
|
||||
struct dma_slave_config config;
|
||||
enum dma_transfer_direction direction;
|
||||
bool cyclic;
|
||||
/* these other elements are all protected by vc.lock */
|
||||
bool is_paused;
|
||||
};
|
||||
|
@ -48,7 +54,7 @@ struct axi_dma_chan {
|
|||
struct dw_axi_dma {
|
||||
struct dma_device dma;
|
||||
struct dw_axi_dma_hcfg *hdata;
|
||||
struct dma_pool *desc_pool;
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
||||
/* channels */
|
||||
struct axi_dma_chan *chan;
|
||||
|
@ -58,6 +64,7 @@ struct axi_dma_chip {
|
|||
struct device *dev;
|
||||
int irq;
|
||||
void __iomem *regs;
|
||||
void __iomem *apb_regs;
|
||||
struct clk *core_clk;
|
||||
struct clk *cfgr_clk;
|
||||
struct dw_axi_dma *dw;
|
||||
|
@ -80,12 +87,20 @@ struct __packed axi_dma_lli {
|
|||
__le32 reserved_hi;
|
||||
};
|
||||
|
||||
struct axi_dma_hw_desc {
|
||||
struct axi_dma_lli *lli;
|
||||
dma_addr_t llp;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
struct axi_dma_desc {
|
||||
struct axi_dma_lli lli;
|
||||
struct axi_dma_hw_desc *hw_desc;
|
||||
|
||||
struct virt_dma_desc vd;
|
||||
struct axi_dma_chan *chan;
|
||||
struct list_head xfer_list;
|
||||
u32 completed_blocks;
|
||||
u32 length;
|
||||
u32 period_len;
|
||||
};
|
||||
|
||||
static inline struct device *dchan2dev(struct dma_chan *dchan)
|
||||
|
@ -157,6 +172,19 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
|||
#define CH_INTSIGNAL_ENA 0x090 /* R/W Chan Interrupt Signal Enable */
|
||||
#define CH_INTCLEAR 0x098 /* W Chan Interrupt Clear */
|
||||
|
||||
/* These Apb registers are used by Intel KeemBay SoC */
|
||||
#define DMAC_APB_CFG 0x000 /* DMAC Apb Configuration Register */
|
||||
#define DMAC_APB_STAT 0x004 /* DMAC Apb Status Register */
|
||||
#define DMAC_APB_DEBUG_STAT_0 0x008 /* DMAC Apb Debug Status Register 0 */
|
||||
#define DMAC_APB_DEBUG_STAT_1 0x00C /* DMAC Apb Debug Status Register 1 */
|
||||
#define DMAC_APB_HW_HS_SEL_0 0x010 /* DMAC Apb HW HS register 0 */
|
||||
#define DMAC_APB_HW_HS_SEL_1 0x014 /* DMAC Apb HW HS register 1 */
|
||||
#define DMAC_APB_LPI 0x018 /* DMAC Apb Low Power Interface Reg */
|
||||
#define DMAC_APB_BYTE_WR_CH_EN 0x01C /* DMAC Apb Byte Write Enable */
|
||||
#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
|
||||
|
||||
#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
|
||||
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
|
||||
|
||||
/* DMAC_CFG */
|
||||
#define DMAC_EN_POS 0
|
||||
|
|
|
@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
|
|||
{
|
||||
struct fsldma_device *fdev;
|
||||
struct device_node *child;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
|
||||
|
@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
|
|||
return 0;
|
||||
|
||||
out_free_fdev:
|
||||
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
|
||||
if (fdev->chan[i])
|
||||
fsl_dma_chan_remove(fdev->chan[i]);
|
||||
}
|
||||
irq_dispose_mapping(fdev->irq);
|
||||
iounmap(fdev->regs);
|
||||
out_free:
|
||||
|
@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
|
|||
if (fdev->chan[i])
|
||||
fsl_dma_chan_remove(fdev->chan[i]);
|
||||
}
|
||||
irq_dispose_mapping(fdev->irq);
|
||||
|
||||
iounmap(fdev->regs);
|
||||
kfree(fdev);
|
||||
|
|
|
@ -26,22 +26,12 @@
|
|||
static irqreturn_t hsu_pci_irq(int irq, void *dev)
|
||||
{
|
||||
struct hsu_dma_chip *chip = dev;
|
||||
struct pci_dev *pdev = to_pci_dev(chip->dev);
|
||||
u32 dmaisr;
|
||||
u32 status;
|
||||
unsigned short i;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
|
||||
* to have different numbers, is shared between HSU DMA and UART IPs.
|
||||
* Thus on such SoCs we are expecting that IRQ handler is called in
|
||||
* UART driver only.
|
||||
*/
|
||||
if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
|
||||
for (i = 0; i < chip->hsu->nr_channels; i++) {
|
||||
if (dmaisr & 0x1) {
|
||||
|
@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (ret)
|
||||
goto err_register_irq;
|
||||
|
||||
/*
|
||||
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
|
||||
* to have different numbers, is shared between HSU DMA and UART IPs.
|
||||
* Thus on such SoCs we are expecting that IRQ handler is called in
|
||||
* UART driver only. Instead of handling the spurious interrupt
|
||||
* from HSU DMA here and waste CPU time and delay HSU UART interrupt
|
||||
* handling, disable the interrupt entirely.
|
||||
*/
|
||||
if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
|
||||
disable_irq_nosync(chip->irq);
|
||||
|
||||
pci_set_drvdata(pdev, chip);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -165,6 +165,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
|
|||
INIT_LIST_HEAD(&dma->channels);
|
||||
dma->dev = &idxd->pdev->dev;
|
||||
|
||||
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||||
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
|
||||
dma->device_release = idxd_dma_release;
|
||||
|
||||
|
|
|
@ -26,12 +26,16 @@ MODULE_VERSION(IDXD_DRIVER_VERSION);
|
|||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
|
||||
static bool sva = true;
|
||||
module_param(sva, bool, 0644);
|
||||
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
|
||||
|
||||
#define DRV_NAME "idxd"
|
||||
|
||||
bool support_enqcmd;
|
||||
|
||||
static struct idr idxd_idrs[IDXD_TYPE_MAX];
|
||||
static struct mutex idxd_idr_lock;
|
||||
static DEFINE_MUTEX(idxd_idr_lock);
|
||||
|
||||
static struct pci_device_id idxd_pci_tbl[] = {
|
||||
/* DSA ver 1.0 platforms */
|
||||
|
@ -341,12 +345,14 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
|
||||
dev_dbg(dev, "IDXD reset complete\n");
|
||||
|
||||
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
|
||||
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
|
||||
rc = idxd_enable_system_pasid(idxd);
|
||||
if (rc < 0)
|
||||
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
|
||||
else
|
||||
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
} else if (!sva) {
|
||||
dev_warn(dev, "User forced SVA off via module param.\n");
|
||||
}
|
||||
|
||||
idxd_read_caps(idxd);
|
||||
|
@ -547,7 +553,6 @@ static int __init idxd_init_module(void)
|
|||
else
|
||||
support_enqcmd = true;
|
||||
|
||||
mutex_init(&idxd_idr_lock);
|
||||
for (i = 0; i < IDXD_TYPE_MAX; i++)
|
||||
idr_init(&idxd_idrs[i]);
|
||||
|
||||
|
|
|
@ -1952,8 +1952,6 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
|
|||
|
||||
static int sdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id =
|
||||
of_match_device(sdma_dt_ids, &pdev->dev);
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct device_node *spba_bus;
|
||||
const char *fw_name;
|
||||
|
@ -1961,17 +1959,9 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
int irq;
|
||||
struct resource *iores;
|
||||
struct resource spba_res;
|
||||
struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
||||
int i;
|
||||
struct sdma_engine *sdma;
|
||||
s32 *saddr_arr;
|
||||
const struct sdma_driver_data *drvdata = NULL;
|
||||
|
||||
drvdata = of_id->data;
|
||||
if (!drvdata) {
|
||||
dev_err(&pdev->dev, "unable to find driver data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
|
@ -1984,7 +1974,7 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
spin_lock_init(&sdma->channel_0_lock);
|
||||
|
||||
sdma->dev = &pdev->dev;
|
||||
sdma->drvdata = drvdata;
|
||||
sdma->drvdata = of_device_get_match_data(sdma->dev);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
|
@ -2063,8 +2053,6 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
|
||||
if (sdma->drvdata->script_addrs)
|
||||
sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
|
||||
if (pdata && pdata->script_addrs)
|
||||
sdma_add_scripts(sdma, pdata->script_addrs);
|
||||
|
||||
sdma->dma_device.dev = &pdev->dev;
|
||||
|
||||
|
@ -2110,30 +2098,18 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/*
|
||||
* Kick off firmware loading as the very last step:
|
||||
* attempt to load firmware only if we're not on the error path, because
|
||||
* the firmware callback requires a fully functional and allocated sdma
|
||||
* instance.
|
||||
* Because that device tree does not encode ROM script address,
|
||||
* the RAM script in firmware is mandatory for device tree
|
||||
* probe, otherwise it fails.
|
||||
*/
|
||||
if (pdata) {
|
||||
ret = sdma_get_firmware(sdma, pdata->fw_name);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
|
||||
ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
|
||||
&fw_name);
|
||||
if (ret) {
|
||||
dev_warn(&pdev->dev, "failed to get firmware name\n");
|
||||
} else {
|
||||
/*
|
||||
* Because that device tree does not encode ROM script address,
|
||||
* the RAM script in firmware is mandatory for device tree
|
||||
* probe, otherwise it fails.
|
||||
*/
|
||||
ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
|
||||
&fw_name);
|
||||
if (ret) {
|
||||
dev_warn(&pdev->dev, "failed to get firmware name\n");
|
||||
} else {
|
||||
ret = sdma_get_firmware(sdma, fw_name);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
|
||||
}
|
||||
ret = sdma_get_firmware(sdma, fw_name);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INTEL_LDMA
|
||||
bool "Lightning Mountain centralized DMA controllers"
|
||||
depends on X86 || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Enable support for Intel Lightning Mountain SOC DMA controllers.
|
||||
These controllers provide DMA capabilities for a variety of on-chip
|
||||
devices such as HSNAND and GSWIP (Gigabit Switch IP).
|
|
@ -0,0 +1,2 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_INTEL_LDMA) += lgm-dma.o
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -18,7 +18,6 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/dma/mmp-pdma.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
|
||||
|
@ -1148,19 +1147,6 @@ static struct platform_driver mmp_pdma_driver = {
|
|||
.remove = mmp_pdma_remove,
|
||||
};
|
||||
|
||||
bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
|
||||
{
|
||||
struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
|
||||
|
||||
if (chan->device->dev->driver != &mmp_pdma_driver.driver)
|
||||
return false;
|
||||
|
||||
c->drcmr = *(unsigned int *)param;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
|
||||
|
||||
module_platform_driver(mmp_pdma_driver);
|
||||
|
||||
MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
|
||||
|
|
|
@ -1080,8 +1080,9 @@ static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
|
|||
}
|
||||
|
||||
static const struct of_device_id owl_dma_match[] = {
|
||||
{ .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
|
||||
{ .compatible = "actions,s500-dma", .data = (void *)S900_DMA,},
|
||||
{ .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
|
||||
{ .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, owl_dma_match);
|
||||
|
@ -1245,6 +1246,7 @@ static int owl_dma_remove(struct platform_device *pdev)
|
|||
owl_dma_free(od);
|
||||
|
||||
clk_disable_unprepare(od->clk);
|
||||
dma_pool_destroy(od->lli_pool);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1270,13 +1270,13 @@ static int bam_dma_probe(struct platform_device *pdev)
|
|||
dev_err(bdev->dev, "num-ees unspecified in dt\n");
|
||||
}
|
||||
|
||||
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
|
||||
if (IS_ERR(bdev->bamclk)) {
|
||||
if (!bdev->controlled_remotely)
|
||||
return PTR_ERR(bdev->bamclk);
|
||||
if (bdev->controlled_remotely)
|
||||
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
|
||||
else
|
||||
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
|
||||
|
||||
bdev->bamclk = NULL;
|
||||
}
|
||||
if (IS_ERR(bdev->bamclk))
|
||||
return PTR_ERR(bdev->bamclk);
|
||||
|
||||
ret = clk_prepare_enable(bdev->bamclk);
|
||||
if (ret) {
|
||||
|
@ -1350,7 +1350,7 @@ static int bam_dma_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto err_unregister_dma;
|
||||
|
||||
if (bdev->controlled_remotely) {
|
||||
if (!bdev->bamclk) {
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1438,10 +1438,10 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
|
|||
{
|
||||
struct bam_device *bdev = dev_get_drvdata(dev);
|
||||
|
||||
if (!bdev->controlled_remotely)
|
||||
if (bdev->bamclk) {
|
||||
pm_runtime_force_suspend(dev);
|
||||
|
||||
clk_unprepare(bdev->bamclk);
|
||||
clk_unprepare(bdev->bamclk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1451,12 +1451,13 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
|
|||
struct bam_device *bdev = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare(bdev->bamclk);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (bdev->bamclk) {
|
||||
ret = clk_prepare(bdev->bamclk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!bdev->controlled_remotely)
|
||||
pm_runtime_force_resume(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -584,7 +584,7 @@ static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
|
|||
gpi_write_reg(gpii, addr, val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
static __always_inline void
|
||||
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
|
||||
{
|
||||
void __iomem *addr = gpii->regs + offset;
|
||||
|
@ -1700,7 +1700,7 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
|
|||
|
||||
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
|
||||
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
|
||||
};
|
||||
}
|
||||
|
||||
for (i = 0; i < tre_idx; i++)
|
||||
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
|
||||
|
|
|
@ -189,7 +189,8 @@ struct rcar_dmac_chan {
|
|||
* struct rcar_dmac - R-Car Gen2 DMA Controller
|
||||
* @engine: base DMA engine object
|
||||
* @dev: the hardware device
|
||||
* @iomem: remapped I/O memory base
|
||||
* @dmac_base: remapped base register block
|
||||
* @chan_base: remapped channel register block (optional)
|
||||
* @n_channels: number of available channels
|
||||
* @channels: array of DMAC channels
|
||||
* @channels_mask: bitfield of which DMA channels are managed by this driver
|
||||
|
@ -198,7 +199,8 @@ struct rcar_dmac_chan {
|
|||
struct rcar_dmac {
|
||||
struct dma_device engine;
|
||||
struct device *dev;
|
||||
void __iomem *iomem;
|
||||
void __iomem *dmac_base;
|
||||
void __iomem *chan_base;
|
||||
|
||||
unsigned int n_channels;
|
||||
struct rcar_dmac_chan *channels;
|
||||
|
@ -209,6 +211,10 @@ struct rcar_dmac {
|
|||
|
||||
#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
|
||||
|
||||
#define for_each_rcar_dmac_chan(i, dmac, chan) \
|
||||
for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
|
||||
if (!((dmac)->channels_mask & BIT(i))) continue; else
|
||||
|
||||
/*
|
||||
* struct rcar_dmac_of_data - This driver's OF data
|
||||
* @chan_offset_base: DMAC channels base offset
|
||||
|
@ -230,7 +236,7 @@ struct rcar_dmac_of_data {
|
|||
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
|
||||
#define RCAR_DMAOR_AE (1 << 2)
|
||||
#define RCAR_DMAOR_DME (1 << 0)
|
||||
#define RCAR_DMACHCLR 0x0080
|
||||
#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */
|
||||
#define RCAR_DMADPSEC 0x00a0
|
||||
|
||||
#define RCAR_DMASAR 0x0000
|
||||
|
@ -293,6 +299,9 @@ struct rcar_dmac_of_data {
|
|||
#define RCAR_DMAFIXDAR 0x0014
|
||||
#define RCAR_DMAFIXDPBASE 0x0060
|
||||
|
||||
/* For R-Car V3U */
|
||||
#define RCAR_V3U_DMACHCLR 0x0100
|
||||
|
||||
/* Hardcode the MEMCPY transfer size to 4 bytes. */
|
||||
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
|
||||
|
||||
|
@ -303,17 +312,17 @@ struct rcar_dmac_of_data {
|
|||
static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
|
||||
{
|
||||
if (reg == RCAR_DMAOR)
|
||||
writew(data, dmac->iomem + reg);
|
||||
writew(data, dmac->dmac_base + reg);
|
||||
else
|
||||
writel(data, dmac->iomem + reg);
|
||||
writel(data, dmac->dmac_base + reg);
|
||||
}
|
||||
|
||||
static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
|
||||
{
|
||||
if (reg == RCAR_DMAOR)
|
||||
return readw(dmac->iomem + reg);
|
||||
return readw(dmac->dmac_base + reg);
|
||||
else
|
||||
return readl(dmac->iomem + reg);
|
||||
return readl(dmac->dmac_base + reg);
|
||||
}
|
||||
|
||||
static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
|
||||
|
@ -332,6 +341,28 @@ static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
|
|||
writel(data, chan->iomem + reg);
|
||||
}
|
||||
|
||||
static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
|
||||
struct rcar_dmac_chan *chan)
|
||||
{
|
||||
if (dmac->chan_base)
|
||||
rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
|
||||
else
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
|
||||
}
|
||||
|
||||
static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
|
||||
{
|
||||
struct rcar_dmac_chan *chan;
|
||||
unsigned int i;
|
||||
|
||||
if (dmac->chan_base) {
|
||||
for_each_rcar_dmac_chan(i, dmac, chan)
|
||||
rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
|
||||
} else {
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Initialization and configuration
|
||||
*/
|
||||
|
@ -447,7 +478,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
|
|||
u16 dmaor;
|
||||
|
||||
/* Clear all channels and enable the DMAC globally. */
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
|
||||
rcar_dmac_chan_clear_all(dmac);
|
||||
rcar_dmac_write(dmac, RCAR_DMAOR,
|
||||
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
|
||||
|
||||
|
@ -817,15 +848,11 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
|
|||
|
||||
static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
|
||||
{
|
||||
struct rcar_dmac_chan *chan;
|
||||
unsigned int i;
|
||||
|
||||
/* Stop all channels. */
|
||||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
struct rcar_dmac_chan *chan = &dmac->channels[i];
|
||||
|
||||
if (!(dmac->channels_mask & BIT(i)))
|
||||
continue;
|
||||
|
||||
for_each_rcar_dmac_chan(i, dmac, chan) {
|
||||
/* Stop and reinitialize the channel. */
|
||||
spin_lock_irq(&chan->lock);
|
||||
rcar_dmac_chan_halt(chan);
|
||||
|
@ -1566,7 +1593,7 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
|
|||
* because channel is already stopped in error case.
|
||||
* We need to clear register and check DE bit as recovery.
|
||||
*/
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
|
||||
rcar_dmac_chan_clear(dmac, chan);
|
||||
rcar_dmac_chcr_de_barrier(chan);
|
||||
reinit = true;
|
||||
goto spin_lock_end;
|
||||
|
@ -1732,9 +1759,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
|
|||
*/
|
||||
|
||||
static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
||||
struct rcar_dmac_chan *rchan,
|
||||
const struct rcar_dmac_of_data *data,
|
||||
unsigned int index)
|
||||
struct rcar_dmac_chan *rchan)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dmac->dev);
|
||||
struct dma_chan *chan = &rchan->chan;
|
||||
|
@ -1742,9 +1767,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
|||
char *irqname;
|
||||
int ret;
|
||||
|
||||
rchan->index = index;
|
||||
rchan->iomem = dmac->iomem + data->chan_offset_base +
|
||||
data->chan_offset_stride * index;
|
||||
rchan->mid_rid = -EINVAL;
|
||||
|
||||
spin_lock_init(&rchan->lock);
|
||||
|
@ -1756,13 +1778,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
|||
INIT_LIST_HEAD(&rchan->desc.wait);
|
||||
|
||||
/* Request the channel interrupt. */
|
||||
sprintf(pdev_irqname, "ch%u", index);
|
||||
sprintf(pdev_irqname, "ch%u", rchan->index);
|
||||
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
|
||||
if (rchan->irq < 0)
|
||||
return -ENODEV;
|
||||
|
||||
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
|
||||
dev_name(dmac->dev), index);
|
||||
dev_name(dmac->dev), rchan->index);
|
||||
if (!irqname)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1828,9 +1850,11 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
|
||||
struct dma_device *engine;
|
||||
struct rcar_dmac *dmac;
|
||||
const struct rcar_dmac_of_data *data;
|
||||
struct rcar_dmac_chan *chan;
|
||||
struct dma_device *engine;
|
||||
void __iomem *chan_base;
|
||||
struct rcar_dmac *dmac;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
|
@ -1868,9 +1892,24 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Request resources. */
|
||||
dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(dmac->iomem))
|
||||
return PTR_ERR(dmac->iomem);
|
||||
dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(dmac->dmac_base))
|
||||
return PTR_ERR(dmac->dmac_base);
|
||||
|
||||
if (!data->chan_offset_base) {
|
||||
dmac->chan_base = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(dmac->chan_base))
|
||||
return PTR_ERR(dmac->chan_base);
|
||||
|
||||
chan_base = dmac->chan_base;
|
||||
} else {
|
||||
chan_base = dmac->dmac_base + data->chan_offset_base;
|
||||
}
|
||||
|
||||
for_each_rcar_dmac_chan(i, dmac, chan) {
|
||||
chan->index = i;
|
||||
chan->iomem = chan_base + i * data->chan_offset_stride;
|
||||
}
|
||||
|
||||
/* Enable runtime PM and initialize the device. */
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
@ -1916,11 +1955,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
|
||||
INIT_LIST_HEAD(&engine->channels);
|
||||
|
||||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
if (!(dmac->channels_mask & BIT(i)))
|
||||
continue;
|
||||
|
||||
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
|
||||
for_each_rcar_dmac_chan(i, dmac, chan) {
|
||||
ret = rcar_dmac_chan_probe(dmac, chan);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
|
@ -1968,14 +2004,22 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
static const struct rcar_dmac_of_data rcar_dmac_data = {
|
||||
.chan_offset_base = 0x8000,
|
||||
.chan_offset_stride = 0x80,
|
||||
.chan_offset_base = 0x8000,
|
||||
.chan_offset_stride = 0x80,
|
||||
};
|
||||
|
||||
static const struct rcar_dmac_of_data rcar_v3u_dmac_data = {
|
||||
.chan_offset_base = 0x0,
|
||||
.chan_offset_stride = 0x1000,
|
||||
};
|
||||
|
||||
static const struct of_device_id rcar_dmac_of_ids[] = {
|
||||
{
|
||||
.compatible = "renesas,rcar-dmac",
|
||||
.data = &rcar_dmac_data,
|
||||
}, {
|
||||
.compatible = "renesas,dmac-r8a779a0",
|
||||
.data = &rcar_v3u_dmac_data,
|
||||
},
|
||||
{ /* Sentinel */ }
|
||||
};
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -78,7 +78,7 @@ static int dma40_memcpy_channels[] = {
|
|||
DB8500_DMA_MEMCPY_EV_5,
|
||||
};
|
||||
|
||||
/* Default configuration for physcial memcpy */
|
||||
/* Default configuration for physical memcpy */
|
||||
static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
|
||||
.mode = STEDMA40_MODE_PHYSICAL,
|
||||
.dir = DMA_MEM_TO_MEM,
|
||||
|
|
|
@ -121,6 +121,11 @@ struct udma_oes_offsets {
|
|||
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
|
||||
#define UDMA_FLAG_PDMA_BURST BIT(1)
|
||||
#define UDMA_FLAG_TDTYPE BIT(2)
|
||||
#define UDMA_FLAG_BURST_SIZE BIT(3)
|
||||
#define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
|
||||
UDMA_FLAG_PDMA_BURST | \
|
||||
UDMA_FLAG_TDTYPE | \
|
||||
UDMA_FLAG_BURST_SIZE)
|
||||
|
||||
struct udma_match_data {
|
||||
enum k3_dma_type type;
|
||||
|
@ -128,6 +133,7 @@ struct udma_match_data {
|
|||
bool enable_memcpy_support;
|
||||
u32 flags;
|
||||
u32 statictr_z_mask;
|
||||
u8 burst_size[3];
|
||||
};
|
||||
|
||||
struct udma_soc_data {
|
||||
|
@ -436,6 +442,18 @@ static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
|
|||
}
|
||||
}
|
||||
|
||||
static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < tpl_map->levels; i++) {
|
||||
if (chan_id >= tpl_map->start_idx[i])
|
||||
return i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void udma_reset_uchan(struct udma_chan *uc)
|
||||
{
|
||||
memset(&uc->config, 0, sizeof(uc->config));
|
||||
|
@ -1811,13 +1829,21 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
|
|||
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
|
||||
struct udma_tchan *tchan = uc->tchan;
|
||||
struct udma_rchan *rchan = uc->rchan;
|
||||
int ret = 0;
|
||||
u8 burst_size = 0;
|
||||
int ret;
|
||||
u8 tpl;
|
||||
|
||||
/* Non synchronized - mem to mem type of transfer */
|
||||
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
|
||||
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
|
||||
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
|
||||
|
||||
if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
|
||||
tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
|
||||
|
||||
burst_size = ud->match_data->burst_size[tpl];
|
||||
}
|
||||
|
||||
req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
|
||||
req_tx.nav_id = tisci_rm->tisci_dev_id;
|
||||
req_tx.index = tchan->id;
|
||||
|
@ -1825,6 +1851,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
|
|||
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
|
||||
req_tx.txcq_qnum = tc_ring;
|
||||
req_tx.tx_atype = ud->atype;
|
||||
if (burst_size) {
|
||||
req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
|
||||
req_tx.tx_burst_size = burst_size;
|
||||
}
|
||||
|
||||
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
|
||||
if (ret) {
|
||||
|
@ -1839,6 +1869,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
|
|||
req_rx.rxcq_qnum = tc_ring;
|
||||
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
|
||||
req_rx.rx_atype = ud->atype;
|
||||
if (burst_size) {
|
||||
req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
|
||||
req_rx.rx_burst_size = burst_size;
|
||||
}
|
||||
|
||||
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
|
||||
if (ret)
|
||||
|
@ -1854,12 +1888,24 @@ static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
|
|||
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
|
||||
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
|
||||
struct udma_bchan *bchan = uc->bchan;
|
||||
int ret = 0;
|
||||
u8 burst_size = 0;
|
||||
int ret;
|
||||
u8 tpl;
|
||||
|
||||
if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
|
||||
tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
|
||||
|
||||
burst_size = ud->match_data->burst_size[tpl];
|
||||
}
|
||||
|
||||
req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
|
||||
req_tx.nav_id = tisci_rm->tisci_dev_id;
|
||||
req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
|
||||
req_tx.index = bchan->id;
|
||||
if (burst_size) {
|
||||
req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
|
||||
req_tx.tx_burst_size = burst_size;
|
||||
}
|
||||
|
||||
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
|
||||
if (ret)
|
||||
|
@ -1877,7 +1923,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
|
|||
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
|
||||
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
|
||||
u32 mode, fetch_size;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (uc->config.pkt_mode) {
|
||||
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
|
||||
|
@ -1918,7 +1964,7 @@ static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
|
|||
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
|
||||
struct udma_tchan *tchan = uc->tchan;
|
||||
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
|
||||
req_tx.nav_id = tisci_rm->tisci_dev_id;
|
||||
|
@ -1951,7 +1997,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
|
|||
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
|
||||
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
|
||||
u32 mode, fetch_size;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (uc->config.pkt_mode) {
|
||||
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
|
||||
|
@ -2028,7 +2074,7 @@ static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
|
|||
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
|
||||
struct udma_rchan *rchan = uc->rchan;
|
||||
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
|
||||
req_rx.nav_id = tisci_rm->tisci_dev_id;
|
||||
|
@ -2048,7 +2094,7 @@ static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
|
|||
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
|
||||
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
|
||||
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
|
||||
req_rx.nav_id = tisci_rm->tisci_dev_id;
|
||||
|
@ -4168,6 +4214,11 @@ static struct udma_match_data am654_main_data = {
|
|||
.psil_base = 0x1000,
|
||||
.enable_memcpy_support = true,
|
||||
.statictr_z_mask = GENMASK(11, 0),
|
||||
.burst_size = {
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
|
||||
0, /* No UH Channels */
|
||||
},
|
||||
};
|
||||
|
||||
static struct udma_match_data am654_mcu_data = {
|
||||
|
@ -4175,38 +4226,63 @@ static struct udma_match_data am654_mcu_data = {
|
|||
.psil_base = 0x6000,
|
||||
.enable_memcpy_support = false,
|
||||
.statictr_z_mask = GENMASK(11, 0),
|
||||
.burst_size = {
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
|
||||
0, /* No UH Channels */
|
||||
},
|
||||
};
|
||||
|
||||
static struct udma_match_data j721e_main_data = {
|
||||
.type = DMA_TYPE_UDMA,
|
||||
.psil_base = 0x1000,
|
||||
.enable_memcpy_support = true,
|
||||
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
|
||||
.flags = UDMA_FLAGS_J7_CLASS,
|
||||
.statictr_z_mask = GENMASK(23, 0),
|
||||
.burst_size = {
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
|
||||
},
|
||||
};
|
||||
|
||||
static struct udma_match_data j721e_mcu_data = {
|
||||
.type = DMA_TYPE_UDMA,
|
||||
.psil_base = 0x6000,
|
||||
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
|
||||
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
|
||||
.flags = UDMA_FLAGS_J7_CLASS,
|
||||
.statictr_z_mask = GENMASK(23, 0),
|
||||
.burst_size = {
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
|
||||
0, /* No UH Channels */
|
||||
},
|
||||
};
|
||||
|
||||
static struct udma_match_data am64_bcdma_data = {
|
||||
.type = DMA_TYPE_BCDMA,
|
||||
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
|
||||
.enable_memcpy_support = true, /* Supported via bchan */
|
||||
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
|
||||
.flags = UDMA_FLAGS_J7_CLASS,
|
||||
.statictr_z_mask = GENMASK(23, 0),
|
||||
.burst_size = {
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
|
||||
0, /* No H Channels */
|
||||
0, /* No UH Channels */
|
||||
},
|
||||
};
|
||||
|
||||
static struct udma_match_data am64_pktdma_data = {
|
||||
.type = DMA_TYPE_PKTDMA,
|
||||
.psil_base = 0x1000,
|
||||
.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
|
||||
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
|
||||
.flags = UDMA_FLAGS_J7_CLASS,
|
||||
.statictr_z_mask = GENMASK(23, 0),
|
||||
.burst_size = {
|
||||
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
|
||||
0, /* No H Channels */
|
||||
0, /* No UH Channels */
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id udma_of_match[] = {
|
||||
|
@ -4306,6 +4382,7 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
|
|||
ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
|
||||
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
|
||||
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
|
||||
ud->rflow_cnt = ud->rchan_cnt;
|
||||
break;
|
||||
case DMA_TYPE_PKTDMA:
|
||||
cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
|
||||
|
@ -5046,6 +5123,34 @@ static void udma_dbg_summary_show(struct seq_file *s,
|
|||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
|
||||
{
|
||||
const struct udma_match_data *match_data = ud->match_data;
|
||||
u8 tpl;
|
||||
|
||||
if (!match_data->enable_memcpy_support)
|
||||
return DMAENGINE_ALIGN_8_BYTES;
|
||||
|
||||
/* Get the highest TPL level the device supports for memcpy */
|
||||
if (ud->bchan_cnt)
|
||||
tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
|
||||
else if (ud->tchan_cnt)
|
||||
tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
|
||||
else
|
||||
return DMAENGINE_ALIGN_8_BYTES;
|
||||
|
||||
switch (match_data->burst_size[tpl]) {
|
||||
case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
|
||||
return DMAENGINE_ALIGN_256_BYTES;
|
||||
case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
|
||||
return DMAENGINE_ALIGN_128_BYTES;
|
||||
case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
|
||||
fallthrough;
|
||||
default:
|
||||
return DMAENGINE_ALIGN_64_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
|
||||
|
@ -5202,7 +5307,6 @@ static int udma_probe(struct platform_device *pdev)
|
|||
ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
|
||||
ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
|
||||
ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
|
||||
DESC_METADATA_ENGINE;
|
||||
if (ud->match_data->enable_memcpy_support &&
|
||||
|
@ -5284,6 +5388,9 @@ static int udma_probe(struct platform_device *pdev)
|
|||
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
|
||||
}
|
||||
|
||||
/* Configure the copy_align to the maximum burst size the device supports */
|
||||
ud->ddev.copy_align = udma_get_copy_align(ud);
|
||||
|
||||
ret = dma_async_device_register(&ud->ddev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
|
||||
|
|
|
@ -800,7 +800,7 @@ xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
|
|||
{
|
||||
struct xilinx_dma_tx_descriptor *desc;
|
||||
|
||||
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
||||
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1,941 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2015 Linaro.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/of_dma.h>
|
||||
|
||||
#include "virt-dma.h"
|
||||
|
||||
#define DRIVER_NAME "zx-dma"
|
||||
#define DMA_ALIGN 4
|
||||
#define DMA_MAX_SIZE (0x10000 - 512)
|
||||
#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
|
||||
|
||||
#define REG_ZX_SRC_ADDR 0x00
|
||||
#define REG_ZX_DST_ADDR 0x04
|
||||
#define REG_ZX_TX_X_COUNT 0x08
|
||||
#define REG_ZX_TX_ZY_COUNT 0x0c
|
||||
#define REG_ZX_SRC_ZY_STEP 0x10
|
||||
#define REG_ZX_DST_ZY_STEP 0x14
|
||||
#define REG_ZX_LLI_ADDR 0x1c
|
||||
#define REG_ZX_CTRL 0x20
|
||||
#define REG_ZX_TC_IRQ 0x800
|
||||
#define REG_ZX_SRC_ERR_IRQ 0x804
|
||||
#define REG_ZX_DST_ERR_IRQ 0x808
|
||||
#define REG_ZX_CFG_ERR_IRQ 0x80c
|
||||
#define REG_ZX_TC_IRQ_RAW 0x810
|
||||
#define REG_ZX_SRC_ERR_IRQ_RAW 0x814
|
||||
#define REG_ZX_DST_ERR_IRQ_RAW 0x818
|
||||
#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c
|
||||
#define REG_ZX_STATUS 0x820
|
||||
#define REG_ZX_DMA_GRP_PRIO 0x824
|
||||
#define REG_ZX_DMA_ARB 0x828
|
||||
|
||||
#define ZX_FORCE_CLOSE BIT(31)
|
||||
#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13)
|
||||
#define ZX_MAX_BURST_LEN 16
|
||||
#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9)
|
||||
#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6)
|
||||
#define ZX_IRQ_ENABLE_ALL (3 << 4)
|
||||
#define ZX_DST_FIFO_MODE BIT(3)
|
||||
#define ZX_SRC_FIFO_MODE BIT(2)
|
||||
#define ZX_SOFT_REQ BIT(1)
|
||||
#define ZX_CH_ENABLE BIT(0)
|
||||
|
||||
#define ZX_DMA_BUSWIDTHS \
|
||||
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
|
||||
|
||||
enum zx_dma_burst_width {
|
||||
ZX_DMA_WIDTH_8BIT = 0,
|
||||
ZX_DMA_WIDTH_16BIT = 1,
|
||||
ZX_DMA_WIDTH_32BIT = 2,
|
||||
ZX_DMA_WIDTH_64BIT = 3,
|
||||
};
|
||||
|
||||
struct zx_desc_hw {
|
||||
u32 saddr;
|
||||
u32 daddr;
|
||||
u32 src_x;
|
||||
u32 src_zy;
|
||||
u32 src_zy_step;
|
||||
u32 dst_zy_step;
|
||||
u32 reserved1;
|
||||
u32 lli;
|
||||
u32 ctr;
|
||||
u32 reserved[7]; /* pack as hardware registers region size */
|
||||
} __aligned(32);
|
||||
|
||||
struct zx_dma_desc_sw {
|
||||
struct virt_dma_desc vd;
|
||||
dma_addr_t desc_hw_lli;
|
||||
size_t desc_num;
|
||||
size_t size;
|
||||
struct zx_desc_hw *desc_hw;
|
||||
};
|
||||
|
||||
struct zx_dma_phy;
|
||||
|
||||
struct zx_dma_chan {
|
||||
struct dma_slave_config slave_cfg;
|
||||
int id; /* Request phy chan id */
|
||||
u32 ccfg;
|
||||
u32 cyclic;
|
||||
struct virt_dma_chan vc;
|
||||
struct zx_dma_phy *phy;
|
||||
struct list_head node;
|
||||
dma_addr_t dev_addr;
|
||||
enum dma_status status;
|
||||
};
|
||||
|
||||
struct zx_dma_phy {
|
||||
u32 idx;
|
||||
void __iomem *base;
|
||||
struct zx_dma_chan *vchan;
|
||||
struct zx_dma_desc_sw *ds_run;
|
||||
struct zx_dma_desc_sw *ds_done;
|
||||
};
|
||||
|
||||
struct zx_dma_dev {
|
||||
struct dma_device slave;
|
||||
void __iomem *base;
|
||||
spinlock_t lock; /* lock for ch and phy */
|
||||
struct list_head chan_pending;
|
||||
struct zx_dma_phy *phy;
|
||||
struct zx_dma_chan *chans;
|
||||
struct clk *clk;
|
||||
struct dma_pool *pool;
|
||||
u32 dma_channels;
|
||||
u32 dma_requests;
|
||||
int irq;
|
||||
};
|
||||
|
||||
#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
|
||||
|
||||
static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct zx_dma_chan, vc.chan);
|
||||
}
|
||||
|
||||
static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
val = readl_relaxed(phy->base + REG_ZX_CTRL);
|
||||
val &= ~ZX_CH_ENABLE;
|
||||
val |= ZX_FORCE_CLOSE;
|
||||
writel_relaxed(val, phy->base + REG_ZX_CTRL);
|
||||
|
||||
val = 0x1 << phy->idx;
|
||||
writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
|
||||
writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
|
||||
writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
|
||||
writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
|
||||
}
|
||||
|
||||
static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
|
||||
{
|
||||
writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
|
||||
writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
|
||||
writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
|
||||
writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
|
||||
writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
|
||||
writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
|
||||
writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
|
||||
writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
|
||||
}
|
||||
|
||||
static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
|
||||
{
|
||||
return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
|
||||
}
|
||||
|
||||
static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
|
||||
{
|
||||
return readl_relaxed(d->base + REG_ZX_STATUS);
|
||||
}
|
||||
|
||||
static void zx_dma_init_state(struct zx_dma_dev *d)
|
||||
{
|
||||
/* set same priority */
|
||||
writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
|
||||
/* clear all irq */
|
||||
writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
|
||||
writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
|
||||
writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
|
||||
writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
|
||||
}
|
||||
|
||||
static int zx_dma_start_txd(struct zx_dma_chan *c)
|
||||
{
|
||||
struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
|
||||
struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
|
||||
|
||||
if (!c->phy)
|
||||
return -EAGAIN;
|
||||
|
||||
if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
|
||||
return -EAGAIN;
|
||||
|
||||
if (vd) {
|
||||
struct zx_dma_desc_sw *ds =
|
||||
container_of(vd, struct zx_dma_desc_sw, vd);
|
||||
/*
|
||||
* fetch and remove request from vc->desc_issued
|
||||
* so vc->desc_issued only contains desc pending
|
||||
*/
|
||||
list_del(&ds->vd.node);
|
||||
c->phy->ds_run = ds;
|
||||
c->phy->ds_done = NULL;
|
||||
/* start dma */
|
||||
zx_dma_set_desc(c->phy, ds->desc_hw);
|
||||
return 0;
|
||||
}
|
||||
c->phy->ds_done = NULL;
|
||||
c->phy->ds_run = NULL;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void zx_dma_task(struct zx_dma_dev *d)
|
||||
{
|
||||
struct zx_dma_phy *p;
|
||||
struct zx_dma_chan *c, *cn;
|
||||
unsigned pch, pch_alloc = 0;
|
||||
unsigned long flags;
|
||||
|
||||
/* check new dma request of running channel in vc->desc_issued */
|
||||
list_for_each_entry_safe(c, cn, &d->slave.channels,
|
||||
vc.chan.device_node) {
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
p = c->phy;
|
||||
if (p && p->ds_done && zx_dma_start_txd(c)) {
|
||||
/* No current txd associated with this channel */
|
||||
dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
|
||||
/* Mark this channel free */
|
||||
c->phy = NULL;
|
||||
p->vchan = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
||||
/* check new channel request in d->chan_pending */
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
while (!list_empty(&d->chan_pending)) {
|
||||
c = list_first_entry(&d->chan_pending,
|
||||
struct zx_dma_chan, node);
|
||||
p = &d->phy[c->id];
|
||||
if (!p->vchan) {
|
||||
/* remove from d->chan_pending */
|
||||
list_del_init(&c->node);
|
||||
pch_alloc |= 1 << c->id;
|
||||
/* Mark this channel allocated */
|
||||
p->vchan = c;
|
||||
c->phy = p;
|
||||
} else {
|
||||
dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
|
||||
for (pch = 0; pch < d->dma_channels; pch++) {
|
||||
if (pch_alloc & (1 << pch)) {
|
||||
p = &d->phy[pch];
|
||||
c = p->vchan;
|
||||
if (c) {
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
zx_dma_start_txd(c);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
|
||||
struct zx_dma_phy *p;
|
||||
struct zx_dma_chan *c;
|
||||
u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
|
||||
u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
|
||||
u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
|
||||
u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
|
||||
u32 i, irq_chan = 0, task = 0;
|
||||
|
||||
while (tc) {
|
||||
i = __ffs(tc);
|
||||
tc &= ~BIT(i);
|
||||
p = &d->phy[i];
|
||||
c = p->vchan;
|
||||
if (c) {
|
||||
spin_lock(&c->vc.lock);
|
||||
if (c->cyclic) {
|
||||
vchan_cyclic_callback(&p->ds_run->vd);
|
||||
} else {
|
||||
vchan_cookie_complete(&p->ds_run->vd);
|
||||
p->ds_done = p->ds_run;
|
||||
task = 1;
|
||||
}
|
||||
spin_unlock(&c->vc.lock);
|
||||
irq_chan |= BIT(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (serr || derr || cfg)
|
||||
dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
|
||||
serr, derr, cfg);
|
||||
|
||||
writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
|
||||
writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
|
||||
writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
|
||||
writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
|
||||
|
||||
if (task)
|
||||
zx_dma_task(d);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void zx_dma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_dev *d = to_zx_dma(chan->device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
list_del_init(&c->node);
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
|
||||
vchan_free_chan_resources(&c->vc);
|
||||
c->ccfg = 0;
|
||||
}
|
||||
|
||||
static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie,
|
||||
struct dma_tx_state *state)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_phy *p;
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
enum dma_status ret;
|
||||
size_t bytes = 0;
|
||||
|
||||
ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
||||
if (ret == DMA_COMPLETE || !state)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
p = c->phy;
|
||||
ret = c->status;
|
||||
|
||||
/*
|
||||
* If the cookie is on our issue queue, then the residue is
|
||||
* its total size.
|
||||
*/
|
||||
vd = vchan_find_desc(&c->vc, cookie);
|
||||
if (vd) {
|
||||
bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
|
||||
} else if ((!p) || (!p->ds_run)) {
|
||||
bytes = 0;
|
||||
} else {
|
||||
struct zx_dma_desc_sw *ds = p->ds_run;
|
||||
u32 clli = 0, index = 0;
|
||||
|
||||
bytes = 0;
|
||||
clli = zx_dma_get_curr_lli(p);
|
||||
index = (clli - ds->desc_hw_lli) /
|
||||
sizeof(struct zx_desc_hw) + 1;
|
||||
for (; index < ds->desc_num; index++) {
|
||||
bytes += ds->desc_hw[index].src_x;
|
||||
/* end of lli */
|
||||
if (!ds->desc_hw[index].lli)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
dma_set_residue(state, bytes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void zx_dma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_dev *d = to_zx_dma(chan->device);
|
||||
unsigned long flags;
|
||||
int issue = 0;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
/* add request to vc->desc_issued */
|
||||
if (vchan_issue_pending(&c->vc)) {
|
||||
spin_lock(&d->lock);
|
||||
if (!c->phy && list_empty(&c->node)) {
|
||||
/* if new channel, add chan_pending */
|
||||
list_add_tail(&c->node, &d->chan_pending);
|
||||
issue = 1;
|
||||
dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
|
||||
}
|
||||
spin_unlock(&d->lock);
|
||||
} else {
|
||||
dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
if (issue)
|
||||
zx_dma_task(d);
|
||||
}
|
||||
|
||||
static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
|
||||
dma_addr_t src, size_t len, u32 num, u32 ccfg)
|
||||
{
|
||||
if ((num + 1) < ds->desc_num)
|
||||
ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
|
||||
sizeof(struct zx_desc_hw);
|
||||
ds->desc_hw[num].saddr = src;
|
||||
ds->desc_hw[num].daddr = dst;
|
||||
ds->desc_hw[num].src_x = len;
|
||||
ds->desc_hw[num].ctr = ccfg;
|
||||
}
|
||||
|
||||
static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_desc_sw *ds;
|
||||
struct zx_dma_dev *d = to_zx_dma(chan->device);
|
||||
int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
|
||||
|
||||
if (num > lli_limit) {
|
||||
dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
|
||||
&c->vc, num, lli_limit);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
|
||||
if (!ds)
|
||||
return NULL;
|
||||
|
||||
ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
|
||||
if (!ds->desc_hw) {
|
||||
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
|
||||
kfree(ds);
|
||||
return NULL;
|
||||
}
|
||||
ds->desc_num = num;
|
||||
return ds;
|
||||
}
|
||||
|
||||
static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
|
||||
{
|
||||
switch (width) {
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
case DMA_SLAVE_BUSWIDTH_8_BYTES:
|
||||
return ffs(width) - 1;
|
||||
default:
|
||||
return ZX_DMA_WIDTH_32BIT;
|
||||
}
|
||||
}
|
||||
|
||||
static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
|
||||
{
|
||||
struct dma_slave_config *cfg = &c->slave_cfg;
|
||||
enum zx_dma_burst_width src_width;
|
||||
enum zx_dma_burst_width dst_width;
|
||||
u32 maxburst = 0;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_MEM_TO_MEM:
|
||||
c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
|
||||
| ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
|
||||
| ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
|
||||
| ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
|
||||
break;
|
||||
case DMA_MEM_TO_DEV:
|
||||
c->dev_addr = cfg->dst_addr;
|
||||
/* dst len is calculated from src width, len and dst width.
|
||||
* We need make sure dst len not exceed MAX LEN.
|
||||
* Trailing single transaction that does not fill a full
|
||||
* burst also require identical src/dst data width.
|
||||
*/
|
||||
dst_width = zx_dma_burst_width(cfg->dst_addr_width);
|
||||
maxburst = cfg->dst_maxburst;
|
||||
maxburst = maxburst < ZX_MAX_BURST_LEN ?
|
||||
maxburst : ZX_MAX_BURST_LEN;
|
||||
c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
|
||||
| ZX_SRC_BURST_LEN(maxburst - 1)
|
||||
| ZX_SRC_BURST_WIDTH(dst_width)
|
||||
| ZX_DST_BURST_WIDTH(dst_width);
|
||||
break;
|
||||
case DMA_DEV_TO_MEM:
|
||||
c->dev_addr = cfg->src_addr;
|
||||
src_width = zx_dma_burst_width(cfg->src_addr_width);
|
||||
maxburst = cfg->src_maxburst;
|
||||
maxburst = maxburst < ZX_MAX_BURST_LEN ?
|
||||
maxburst : ZX_MAX_BURST_LEN;
|
||||
c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
|
||||
| ZX_SRC_BURST_LEN(maxburst - 1)
|
||||
| ZX_SRC_BURST_WIDTH(src_width)
|
||||
| ZX_DST_BURST_WIDTH(src_width);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
|
||||
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
|
||||
size_t len, unsigned long flags)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_desc_sw *ds;
|
||||
size_t copy = 0;
|
||||
int num = 0;
|
||||
|
||||
if (!len)
|
||||
return NULL;
|
||||
|
||||
if (zx_pre_config(c, DMA_MEM_TO_MEM))
|
||||
return NULL;
|
||||
|
||||
num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
|
||||
|
||||
ds = zx_alloc_desc_resource(num, chan);
|
||||
if (!ds)
|
||||
return NULL;
|
||||
|
||||
ds->size = len;
|
||||
num = 0;
|
||||
|
||||
do {
|
||||
copy = min_t(size_t, len, DMA_MAX_SIZE);
|
||||
zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
|
||||
|
||||
src += copy;
|
||||
dst += copy;
|
||||
len -= copy;
|
||||
} while (len);
|
||||
|
||||
c->cyclic = 0;
|
||||
ds->desc_hw[num - 1].lli = 0; /* end of link */
|
||||
ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
|
||||
return vchan_tx_prep(&c->vc, &ds->vd, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
|
||||
enum dma_transfer_direction dir, unsigned long flags, void *context)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_desc_sw *ds;
|
||||
size_t len, avail, total = 0;
|
||||
struct scatterlist *sg;
|
||||
dma_addr_t addr, src = 0, dst = 0;
|
||||
int num = sglen, i;
|
||||
|
||||
if (!sgl)
|
||||
return NULL;
|
||||
|
||||
if (zx_pre_config(c, dir))
|
||||
return NULL;
|
||||
|
||||
for_each_sg(sgl, sg, sglen, i) {
|
||||
avail = sg_dma_len(sg);
|
||||
if (avail > DMA_MAX_SIZE)
|
||||
num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
|
||||
}
|
||||
|
||||
ds = zx_alloc_desc_resource(num, chan);
|
||||
if (!ds)
|
||||
return NULL;
|
||||
|
||||
c->cyclic = 0;
|
||||
num = 0;
|
||||
for_each_sg(sgl, sg, sglen, i) {
|
||||
addr = sg_dma_address(sg);
|
||||
avail = sg_dma_len(sg);
|
||||
total += avail;
|
||||
|
||||
do {
|
||||
len = min_t(size_t, avail, DMA_MAX_SIZE);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
src = addr;
|
||||
dst = c->dev_addr;
|
||||
} else if (dir == DMA_DEV_TO_MEM) {
|
||||
src = c->dev_addr;
|
||||
dst = addr;
|
||||
}
|
||||
|
||||
zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
|
||||
|
||||
addr += len;
|
||||
avail -= len;
|
||||
} while (avail);
|
||||
}
|
||||
|
||||
ds->desc_hw[num - 1].lli = 0; /* end of link */
|
||||
ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
|
||||
ds->size = total;
|
||||
return vchan_tx_prep(&c->vc, &ds->vd, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction dir,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_desc_sw *ds;
|
||||
dma_addr_t src = 0, dst = 0;
|
||||
int num_periods = buf_len / period_len;
|
||||
int buf = 0, num = 0;
|
||||
|
||||
if (period_len > DMA_MAX_SIZE) {
|
||||
dev_err(chan->device->dev, "maximum period size exceeded\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (zx_pre_config(c, dir))
|
||||
return NULL;
|
||||
|
||||
ds = zx_alloc_desc_resource(num_periods, chan);
|
||||
if (!ds)
|
||||
return NULL;
|
||||
c->cyclic = 1;
|
||||
|
||||
while (buf < buf_len) {
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
src = dma_addr;
|
||||
dst = c->dev_addr;
|
||||
} else if (dir == DMA_DEV_TO_MEM) {
|
||||
src = c->dev_addr;
|
||||
dst = dma_addr;
|
||||
}
|
||||
zx_dma_fill_desc(ds, dst, src, period_len, num++,
|
||||
c->ccfg | ZX_IRQ_ENABLE_ALL);
|
||||
dma_addr += period_len;
|
||||
buf += period_len;
|
||||
}
|
||||
|
||||
ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
|
||||
ds->size = buf_len;
|
||||
return vchan_tx_prep(&c->vc, &ds->vd, flags);
|
||||
}
|
||||
|
||||
static int zx_dma_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *cfg)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_dma_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
struct zx_dma_dev *d = to_zx_dma(chan->device);
|
||||
struct zx_dma_phy *p = c->phy;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
|
||||
|
||||
/* Prevent this channel being scheduled */
|
||||
spin_lock(&d->lock);
|
||||
list_del_init(&c->node);
|
||||
spin_unlock(&d->lock);
|
||||
|
||||
/* Clear the tx descriptor lists */
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&c->vc, &head);
|
||||
if (p) {
|
||||
/* vchan is assigned to a pchan - stop the channel */
|
||||
zx_dma_terminate_chan(p, d);
|
||||
c->phy = NULL;
|
||||
p->vchan = NULL;
|
||||
p->ds_run = NULL;
|
||||
p->ds_done = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&c->vc, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_dma_transfer_pause(struct dma_chan *chan)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
u32 val = 0;
|
||||
|
||||
val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
|
||||
val &= ~ZX_CH_ENABLE;
|
||||
writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_dma_transfer_resume(struct dma_chan *chan)
|
||||
{
|
||||
struct zx_dma_chan *c = to_zx_chan(chan);
|
||||
u32 val = 0;
|
||||
|
||||
val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
|
||||
val |= ZX_CH_ENABLE;
|
||||
writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zx_dma_free_desc(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct zx_dma_desc_sw *ds =
|
||||
container_of(vd, struct zx_dma_desc_sw, vd);
|
||||
struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
|
||||
|
||||
dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
|
||||
kfree(ds);
|
||||
}
|
||||
|
||||
static const struct of_device_id zx6702_dma_dt_ids[] = {
|
||||
{ .compatible = "zte,zx296702-dma", },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
|
||||
|
||||
static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct zx_dma_dev *d = ofdma->of_dma_data;
|
||||
unsigned int request = dma_spec->args[0];
|
||||
struct dma_chan *chan;
|
||||
struct zx_dma_chan *c;
|
||||
|
||||
if (request >= d->dma_requests)
|
||||
return NULL;
|
||||
|
||||
chan = dma_get_any_slave_channel(&d->slave);
|
||||
if (!chan) {
|
||||
dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
c = to_zx_chan(chan);
|
||||
c->id = request;
|
||||
dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
|
||||
c->id, &c->vc);
|
||||
return chan;
|
||||
}
|
||||
|
||||
static int zx_dma_probe(struct platform_device *op)
|
||||
{
|
||||
struct zx_dma_dev *d;
|
||||
int i, ret = 0;
|
||||
|
||||
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
d->base = devm_platform_ioremap_resource(op, 0);
|
||||
if (IS_ERR(d->base))
|
||||
return PTR_ERR(d->base);
|
||||
|
||||
of_property_read_u32((&op->dev)->of_node,
|
||||
"dma-channels", &d->dma_channels);
|
||||
of_property_read_u32((&op->dev)->of_node,
|
||||
"dma-requests", &d->dma_requests);
|
||||
if (!d->dma_requests || !d->dma_channels)
|
||||
return -EINVAL;
|
||||
|
||||
d->clk = devm_clk_get(&op->dev, NULL);
|
||||
if (IS_ERR(d->clk)) {
|
||||
dev_err(&op->dev, "no dma clk\n");
|
||||
return PTR_ERR(d->clk);
|
||||
}
|
||||
|
||||
d->irq = platform_get_irq(op, 0);
|
||||
ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
|
||||
0, DRIVER_NAME, d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* A DMA memory pool for LLIs, align on 32-byte boundary */
|
||||
d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
|
||||
LLI_BLOCK_SIZE, 32, 0);
|
||||
if (!d->pool)
|
||||
return -ENOMEM;
|
||||
|
||||
/* init phy channel */
|
||||
d->phy = devm_kcalloc(&op->dev,
|
||||
d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL);
|
||||
if (!d->phy)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < d->dma_channels; i++) {
|
||||
struct zx_dma_phy *p = &d->phy[i];
|
||||
|
||||
p->idx = i;
|
||||
p->base = d->base + i * 0x40;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&d->slave.channels);
|
||||
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
|
||||
d->slave.dev = &op->dev;
|
||||
d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
|
||||
d->slave.device_tx_status = zx_dma_tx_status;
|
||||
d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
|
||||
d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
|
||||
d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
|
||||
d->slave.device_issue_pending = zx_dma_issue_pending;
|
||||
d->slave.device_config = zx_dma_config;
|
||||
d->slave.device_terminate_all = zx_dma_terminate_all;
|
||||
d->slave.device_pause = zx_dma_transfer_pause;
|
||||
d->slave.device_resume = zx_dma_transfer_resume;
|
||||
d->slave.copy_align = DMA_ALIGN;
|
||||
d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
|
||||
d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
|
||||
d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
|
||||
| BIT(DMA_DEV_TO_MEM);
|
||||
d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
|
||||
/* init virtual channel */
|
||||
d->chans = devm_kcalloc(&op->dev,
|
||||
d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL);
|
||||
if (!d->chans)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < d->dma_requests; i++) {
|
||||
struct zx_dma_chan *c = &d->chans[i];
|
||||
|
||||
c->status = DMA_IN_PROGRESS;
|
||||
INIT_LIST_HEAD(&c->node);
|
||||
c->vc.desc_free = zx_dma_free_desc;
|
||||
vchan_init(&c->vc, &d->slave);
|
||||
}
|
||||
|
||||
/* Enable clock before accessing registers */
|
||||
ret = clk_prepare_enable(d->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
|
||||
goto zx_dma_out;
|
||||
}
|
||||
|
||||
zx_dma_init_state(d);
|
||||
|
||||
spin_lock_init(&d->lock);
|
||||
INIT_LIST_HEAD(&d->chan_pending);
|
||||
platform_set_drvdata(op, d);
|
||||
|
||||
ret = dma_async_device_register(&d->slave);
|
||||
if (ret)
|
||||
goto clk_dis;
|
||||
|
||||
ret = of_dma_controller_register((&op->dev)->of_node,
|
||||
zx_of_dma_simple_xlate, d);
|
||||
if (ret)
|
||||
goto of_dma_register_fail;
|
||||
|
||||
dev_info(&op->dev, "initialized\n");
|
||||
return 0;
|
||||
|
||||
of_dma_register_fail:
|
||||
dma_async_device_unregister(&d->slave);
|
||||
clk_dis:
|
||||
clk_disable_unprepare(d->clk);
|
||||
zx_dma_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int zx_dma_remove(struct platform_device *op)
|
||||
{
|
||||
struct zx_dma_chan *c, *cn;
|
||||
struct zx_dma_dev *d = platform_get_drvdata(op);
|
||||
|
||||
/* explictly free the irq */
|
||||
devm_free_irq(&op->dev, d->irq, d);
|
||||
|
||||
dma_async_device_unregister(&d->slave);
|
||||
of_dma_controller_free((&op->dev)->of_node);
|
||||
|
||||
list_for_each_entry_safe(c, cn, &d->slave.channels,
|
||||
vc.chan.device_node) {
|
||||
list_del(&c->vc.chan.device_node);
|
||||
}
|
||||
clk_disable_unprepare(d->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int zx_dma_suspend_dev(struct device *dev)
|
||||
{
|
||||
struct zx_dma_dev *d = dev_get_drvdata(dev);
|
||||
u32 stat = 0;
|
||||
|
||||
stat = zx_dma_get_chan_stat(d);
|
||||
if (stat) {
|
||||
dev_warn(d->slave.dev,
|
||||
"chan %d is running fail to suspend\n", stat);
|
||||
return -1;
|
||||
}
|
||||
clk_disable_unprepare(d->clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_dma_resume_dev(struct device *dev)
|
||||
{
|
||||
struct zx_dma_dev *d = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
ret = clk_prepare_enable(d->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
zx_dma_init_state(d);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
|
||||
|
||||
static struct platform_driver zx_pdma_driver = {
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.pm = &zx_dma_pmops,
|
||||
.of_match_table = zx6702_dma_dt_ids,
|
||||
},
|
||||
.probe = zx_dma_probe,
|
||||
.remove = zx_dma_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(zx_pdma_driver);
|
||||
|
||||
MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
|
||||
MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -42,14 +42,14 @@ enum psil_endpoint_type {
|
|||
/**
|
||||
* struct psil_endpoint_config - PSI-L Endpoint configuration
|
||||
* @ep_type: PSI-L endpoint type
|
||||
* @channel_tpl: Desired throughput level for the channel
|
||||
* @pkt_mode: If set, the channel must be in Packet mode, otherwise in
|
||||
* TR mode
|
||||
* @notdpkt: TDCM must be suppressed on the TX channel
|
||||
* @needs_epib: Endpoint needs EPIB
|
||||
* @psd_size: If set, PSdata is used by the endpoint
|
||||
* @channel_tpl: Desired throughput level for the channel
|
||||
* @pdma_acc32: ACC32 must be enabled on the PDMA side
|
||||
* @pdma_burst: BURST must be enabled on the PDMA side
|
||||
* @psd_size: If set, PSdata is used by the endpoint
|
||||
* @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
|
||||
* The thread must be serviced by the specified channel if
|
||||
* mapped_channel_id is >= 0 in case of PKTDMA
|
||||
|
@ -62,23 +62,22 @@ enum psil_endpoint_type {
|
|||
*/
|
||||
struct psil_endpoint_config {
|
||||
enum psil_endpoint_type ep_type;
|
||||
enum udma_tp_level channel_tpl;
|
||||
|
||||
unsigned pkt_mode:1;
|
||||
unsigned notdpkt:1;
|
||||
unsigned needs_epib:1;
|
||||
u32 psd_size;
|
||||
enum udma_tp_level channel_tpl;
|
||||
|
||||
/* PDMA properties, valid for PSIL_EP_PDMA_* */
|
||||
unsigned pdma_acc32:1;
|
||||
unsigned pdma_burst:1;
|
||||
|
||||
u32 psd_size;
|
||||
/* PKDMA mapped channel */
|
||||
int mapped_channel_id;
|
||||
s16 mapped_channel_id;
|
||||
/* PKTDMA tflow and rflow ranges for mapped channel */
|
||||
u16 flow_start;
|
||||
u16 flow_num;
|
||||
u16 default_flow_id;
|
||||
s16 default_flow_id;
|
||||
};
|
||||
|
||||
int psil_set_new_ep_config(struct device *dev, const char *name,
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _MMP_PDMA_H_
|
||||
#define _MMP_PDMA_H_
|
||||
|
||||
struct dma_chan;
|
||||
|
||||
#ifdef CONFIG_MMP_PDMA
|
||||
bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
|
||||
#else
|
||||
static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _MMP_PDMA_H_ */
|
|
@ -745,6 +745,8 @@ enum dmaengine_alignment {
|
|||
DMAENGINE_ALIGN_16_BYTES = 4,
|
||||
DMAENGINE_ALIGN_32_BYTES = 5,
|
||||
DMAENGINE_ALIGN_64_BYTES = 6,
|
||||
DMAENGINE_ALIGN_128_BYTES = 7,
|
||||
DMAENGINE_ALIGN_256_BYTES = 8,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Header file for the Atmel AHB DMA Controller driver
|
||||
*
|
||||
* Copyright (C) 2008 Atmel Corporation
|
||||
*/
|
||||
#ifndef AT_HDMAC_H
|
||||
#define AT_HDMAC_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/**
|
||||
* struct at_dma_platform_data - Controller configuration parameters
|
||||
* @nr_channels: Number of channels supported by hardware (max 8)
|
||||
* @cap_mask: dma_capability flags supported by the platform
|
||||
*/
|
||||
struct at_dma_platform_data {
|
||||
unsigned int nr_channels;
|
||||
dma_cap_mask_t cap_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct at_dma_slave - Controller-specific information about a slave
|
||||
* @dma_dev: required DMA master device
|
||||
* @cfg: Platform-specific initializer for the CFG register
|
||||
*/
|
||||
struct at_dma_slave {
|
||||
struct device *dma_dev;
|
||||
u32 cfg;
|
||||
};
|
||||
|
||||
|
||||
/* Platform-configurable bits in CFG */
|
||||
#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
|
||||
|
||||
#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
|
||||
#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
|
||||
#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
|
||||
#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
|
||||
#define ATC_SRC_H2SEL_SW (0x0 << 9)
|
||||
#define ATC_SRC_H2SEL_HW (0x1 << 9)
|
||||
#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
|
||||
#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
|
||||
#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
|
||||
#define ATC_DST_H2SEL_SW (0x0 << 13)
|
||||
#define ATC_DST_H2SEL_HW (0x1 << 13)
|
||||
#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
|
||||
#define ATC_SOD (0x1 << 16) /* Stop On Done */
|
||||
#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
|
||||
#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
|
||||
#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
|
||||
#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
|
||||
#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
|
||||
#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
|
||||
#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
|
||||
#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
|
||||
#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
|
||||
#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
|
||||
|
||||
|
||||
#endif /* AT_HDMAC_H */
|
|
@ -1,72 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Platform data for the COH901318 DMA controller
|
||||
* Copyright (C) 2007-2013 ST-Ericsson
|
||||
*/
|
||||
|
||||
#ifndef PLAT_COH901318_H
|
||||
#define PLAT_COH901318_H
|
||||
|
||||
#ifdef CONFIG_COH901318
|
||||
|
||||
/* We only support the U300 DMA channels */
|
||||
#define U300_DMA_MSL_TX_0 0
|
||||
#define U300_DMA_MSL_TX_1 1
|
||||
#define U300_DMA_MSL_TX_2 2
|
||||
#define U300_DMA_MSL_TX_3 3
|
||||
#define U300_DMA_MSL_TX_4 4
|
||||
#define U300_DMA_MSL_TX_5 5
|
||||
#define U300_DMA_MSL_TX_6 6
|
||||
#define U300_DMA_MSL_RX_0 7
|
||||
#define U300_DMA_MSL_RX_1 8
|
||||
#define U300_DMA_MSL_RX_2 9
|
||||
#define U300_DMA_MSL_RX_3 10
|
||||
#define U300_DMA_MSL_RX_4 11
|
||||
#define U300_DMA_MSL_RX_5 12
|
||||
#define U300_DMA_MSL_RX_6 13
|
||||
#define U300_DMA_MMCSD_RX_TX 14
|
||||
#define U300_DMA_MSPRO_TX 15
|
||||
#define U300_DMA_MSPRO_RX 16
|
||||
#define U300_DMA_UART0_TX 17
|
||||
#define U300_DMA_UART0_RX 18
|
||||
#define U300_DMA_APEX_TX 19
|
||||
#define U300_DMA_APEX_RX 20
|
||||
#define U300_DMA_PCM_I2S0_TX 21
|
||||
#define U300_DMA_PCM_I2S0_RX 22
|
||||
#define U300_DMA_PCM_I2S1_TX 23
|
||||
#define U300_DMA_PCM_I2S1_RX 24
|
||||
#define U300_DMA_XGAM_CDI 25
|
||||
#define U300_DMA_XGAM_PDI 26
|
||||
#define U300_DMA_SPI_TX 27
|
||||
#define U300_DMA_SPI_RX 28
|
||||
#define U300_DMA_GENERAL_PURPOSE_0 29
|
||||
#define U300_DMA_GENERAL_PURPOSE_1 30
|
||||
#define U300_DMA_GENERAL_PURPOSE_2 31
|
||||
#define U300_DMA_GENERAL_PURPOSE_3 32
|
||||
#define U300_DMA_GENERAL_PURPOSE_4 33
|
||||
#define U300_DMA_GENERAL_PURPOSE_5 34
|
||||
#define U300_DMA_GENERAL_PURPOSE_6 35
|
||||
#define U300_DMA_GENERAL_PURPOSE_7 36
|
||||
#define U300_DMA_GENERAL_PURPOSE_8 37
|
||||
#define U300_DMA_UART1_TX 38
|
||||
#define U300_DMA_UART1_RX 39
|
||||
|
||||
#define U300_DMA_DEVICE_CHANNELS 32
|
||||
#define U300_DMA_CHANNELS 40
|
||||
|
||||
/**
|
||||
* coh901318_filter_id() - DMA channel filter function
|
||||
* @chan: dma channel handle
|
||||
* @chan_id: id of dma channel to be filter out
|
||||
*
|
||||
* In dma_request_channel() it specifies what channel id to be requested
|
||||
*/
|
||||
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
|
||||
#else
|
||||
static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* PLAT_COH901318_H */
|
|
@ -57,15 +57,4 @@ struct sdma_script_start_addrs {
|
|||
/* End of v4 array */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct sdma_platform_data - platform specific data for SDMA engine
|
||||
*
|
||||
* @fw_name The firmware name
|
||||
* @script_addrs SDMA scripts addresses in SDMA ROM
|
||||
*/
|
||||
struct sdma_platform_data {
|
||||
char *fw_name;
|
||||
struct sdma_script_start_addrs *script_addrs;
|
||||
};
|
||||
|
||||
#endif /* __MACH_MXC_SDMA_H__ */
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _SIRFSOC_DMA_H_
|
||||
#define _SIRFSOC_DMA_H_
|
||||
|
||||
bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
|
||||
|
||||
#endif
|
Загрузка…
Ссылка в новой задаче