main drm pull request for v4.15

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJaCm8RAAoJEAx081l5xIa+zX0QAJSm31kCG3vdw2CNiRx25L3q
 3hcsEOgAjVJ9FQVGKFWjzb8TK35tSqtNx5kWIj0VGaIfBE5Bdg5SLLgKKUYas8rY
 4LaphqICq2uxu2BNa2tpiar/sHhAnuozwQ4czpVWXzlaISnb9yYzRl7gMuyUVGkx
 +Gih5VUhLmQC0HsRTLJ3vaZQoUsLAl2gAjKcWa1bx57j2S+iKOPfsLaq7VYo+y1I
 Njc+iSGqMhJzRLXVkxL2lQKaslp7R38Bbh5K4Kvyjkm4Aq7zErOF6irpOXKMcrGl
 mwnr89vf1G9thjikrBaXpKnuvdbWYveoN/ORMlTdCfxkFnChHLnm3bd7NJ49RXDN
 Hv/Iq9YYjmZ9GTatxnx7lWtmXnZXC5he1yn1JAuz/yt7/0b/Wx+Mu/wEpBXYNFTd
 1AZdD586i+AmPo3yDkqH9nBu8JC0W0AnS9VZma4LVvZOP2UfJmj5Im1CLHItbGDN
 FnUCkwyD/lJUUk+WgT+w/GOMJgmFHDiFFl4tFtYVVjrUirpCFVguSKG9xuv6tT8P
 8iRsoP7RrcmDN9ojN2SEHwcpsAv3HnKkDv+9+GIbWnrGsSbCPq8Qm+JDSvf4h22I
 K5lwNpJrcpSKI+q10L7w2xliTBwb98sJkWGA/rssomrdBOWteGZAyqFRYAVgQ+mJ
 x/nJurIqQYh2KQN9+uLG
 =xVV2
 -----END PGP SIGNATURE-----

Merge tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main drm pull request for v4.15.

  Core:
   - Atomic object lifetime fixes
   - Atomic iterator improvements
   - Sparse/smatch fixes
   - Legacy kms ioctls to be interruptible
   - EDID override improvements
   - fb/gem helper cleanups
   - Simple outreachy patches
   - Documentation improvements
   - Fix dma-buf rcu races
   - DRM mode object leasing for improving VR use cases.
   - vgaarb improvements for non-x86 platforms.

  New driver:
   - tve200: Faraday Technology TVE200 block.

     This "TV Encoder" encodes a ITU-T BT.656 stream and can be found in
     the StorLink SL3516 (later Cortina Systems CS3516) as well as the
     Grain Media GM8180.

  New bridges:
   - SiI9234 support

  New panels:
   - S6E63J0X03, OTM8009A, Seiko 43WVF1G, 7" rpi touch panel, Toshiba
     LT089AC19000, Innolux AT043TN24

  i915:
   - Remove Coffeelake from alpha support
   - Cannonlake workarounds
   - Infoframe refactoring for DisplayPort
   - VBT updates
   - DisplayPort vswing/emph/buffer translation refactoring
   - CCS fixes
   - Restore GPU clock boost on missed vblanks
   - Scatter list updates for userptr allocations
   - Gen9+ transition watermarks
   - Display IPC (Isochronous Priority Control)
   - Private PAT management
   - GVT: improved error handling and pci config sanitizing
   - Execlist refactoring
   - Transparent Huge Page support
   - User defined priorities support
   - HuC/GuC firmware refactoring
   - DP MST fixes
   - eDP power sequencing fixes
   - Use RCU instead of stop_machine
   - PSR state tracking support
   - Eviction fixes
   - BDW DP aux channel timeout fixes
   - LSPCON fixes
   - Cannonlake PLL fixes

  amdgpu:
   - Per VM BO support
   - Powerplay cleanups
   - CI powerplay support
   - PASID mgr for kfd
   - SR-IOV fixes
   - initial GPU reset for vega10
   - Prime mmap support
   - TTM updates
   - Clock query interface for Raven
   - Fence to handle ioctl
   - UVD encode ring support on Polaris
   - Transparent huge page DMA support
   - Compute LRU pipe tweaks
   - BO flag to allow buffers to opt out of implicit sync
   - CTX priority setting API
   - VRAM lost infrastructure plumbing

  qxl:
   - fix flicker since atomic rework

  amdkfd:
   - Further improvements from internal AMD tree
   - Usermode events
   - Drop radeon support

  nouveau:
   - Pascal temperature sensor support
   - Improved BAR2 handling
   - MMU rework to support Pascal MMU

  exynos:
   - Improved HDMI/mixer support
   - HDMI audio interface support

  tegra:
   - Prep work for tegra186
   - Cleanup/fixes

  msm:
   - Preemption support for a5xx
   - Display fixes for 8x96 (snapdragon 820)
   - Async cursor plane fixes
   - FW loading rework
   - GPU debugging improvements

  vc4:
   - Prep for DSI panels
   - fix T-format tiling scanout
   - New madvise ioctl

  Rockchip:
   - LVDS support

  omapdrm:
   - omap4 HDMI CEC support

  etnaviv:
   - GPU performance counters groundwork

  sun4i:
   - refactor driver load + TCON backend
   - HDMI improvements
   - A31 support
   - Misc fixes

  udl:
   - Probe/EDID read fixes.

  tilcdc:
   - Misc fixes.

  pl111:
   - Support more variants

  adv7511:
   - Improve EDID handling.
   - HDMI CEC support

  sii8620:
   - Add remote control support"

* tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux: (1480 commits)
  drm/rockchip: analogix_dp: Use mutex rather than spinlock
  drm/mode_object: fix documentation for object lookups.
  drm/i915: Reorder context-close to avoid calling i915_vma_close() under RCU
  drm/i915: Move init_clock_gating() back to where it was
  drm/i915: Prune the reservation shared fence array
  drm/i915: Idle the GPU before shinking everything
  drm/i915: Lock llist_del_first() vs llist_del_all()
  drm/i915: Calculate ironlake intermediate watermarks correctly, v2.
  drm/i915: Disable lazy PPGTT page table optimization for vGPU
  drm/i915/execlists: Remove the priority "optimisation"
  drm/i915: Filter out spurious execlists context-switch interrupts
  drm/amdgpu: use irq-safe lock for kiq->ring_lock
  drm/amdgpu: bypass lru touch for KIQ ring submission
  drm/amdgpu: Potential uninitialized variable in amdgpu_vm_update_directories()
  drm/amdgpu: potential uninitialized variable in amdgpu_vce_ring_parse_cs()
  drm/amd/powerplay: initialize a variable before using it
  drm/amd/powerplay: suppress KASAN out of bounds warning in vega10_populate_all_memory_levels
  drm/amd/amdgpu: fix evicted VRAM bo adjudgement condition
  drm/vblank: Tune drm_crtc_accurate_vblank_count() WARN down to a debug
  drm/rockchip: add CONFIG_OF dependency for lvds
  ...
This commit is contained in:
Linus Torvalds 2017-11-15 20:42:10 -08:00
Родитель 5d352e69c6 f150891fd9
Коммит e60e1ee606
1040 изменённых файлов: 65899 добавлений и 56430 удалений

Просмотреть файл

@ -857,7 +857,7 @@
The filter can be disabled or changed to another The filter can be disabled or changed to another
driver later using sysfs. driver later using sysfs.
drm_kms_helper.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>] drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
Broken monitors, graphic adapters, KVMs and EDIDless Broken monitors, graphic adapters, KVMs and EDIDless
panels may send no or incorrect EDID data sets. panels may send no or incorrect EDID data sets.
This parameter allows to specify an EDID data sets This parameter allows to specify an EDID data sets

Просмотреть файл

@ -68,6 +68,8 @@ Optional properties:
- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing - adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
generator. The chip will rely on the sync signals in the DSI data lanes, generator. The chip will rely on the sync signals in the DSI data lanes,
rather than generate its own timings for HDMI output. rather than generate its own timings for HDMI output.
- clocks: from common clock binding: reference to the CEC clock.
- clock-names: from common clock binding: must be "cec".
Required nodes: Required nodes:
@ -89,6 +91,8 @@ Example
reg = <39>; reg = <39>;
interrupt-parent = <&gpio3>; interrupt-parent = <&gpio3>;
interrupts = <29 IRQ_TYPE_EDGE_FALLING>; interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
clocks = <&cec_clock>;
clock-names = "cec";
adi,input-depth = <8>; adi,input-depth = <8>;
adi,input-colorspace = "rgb"; adi,input-colorspace = "rgb";

Просмотреть файл

@ -0,0 +1,49 @@
Silicon Image SiI9234 HDMI/MHL bridge bindings
Required properties:
- compatible : "sil,sii9234".
- reg : I2C address for TPI interface, use 0x39
- avcc33-supply : MHL/USB Switch Supply Voltage (3.3V)
- iovcc18-supply : I/O Supply Voltage (1.8V)
- avcc12-supply : TMDS Analog Supply Voltage (1.2V)
- cvcc12-supply : Digital Core Supply Voltage (1.2V)
- interrupts, interrupt-parent: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin (active low)
- video interfaces: Device node can contain two video interface port
nodes for HDMI encoder and connector according to [1].
- port@0 - MHL to HDMI
- port@1 - MHL to connector
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
sii9234@39 {
compatible = "sil,sii9234";
reg = <0x39>;
avcc33-supply = <&vcc33mhl>;
iovcc18-supply = <&vcc18mhl>;
avcc12-supply = <&vsil12>;
cvcc12-supply = <&vsil12>;
reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpf3>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mhl_to_hdmi: endpoint {
remote-endpoint = <&hdmi_to_mhl>;
};
};
port@1 {
reg = <1>;
mhl_to_connector: endpoint {
remote-endpoint = <&connector_to_mhl>;
};
};
};
};

Просмотреть файл

@ -0,0 +1,54 @@
* Faraday TV Encoder TVE200
Required properties:
- compatible: must be one of:
"faraday,tve200"
"cortina,gemini-tvc", "faraday,tve200"
- reg: base address and size of the control registers block
- interrupts: contains an interrupt specifier for the interrupt
line from the TVE200
- clock-names: should contain "PCLK" for the clock line clocking the
silicon and "TVE" for the 27MHz clock to the video driver
- clocks: contains phandle and clock specifier pairs for the entries
in the clock-names property. See
Documentation/devicetree/bindings/clock/clock-bindings.txt
Optional properties:
- resets: contains the reset line phandle for the block
Required sub-nodes:
- port: describes LCD panel signals, following the common binding
for video transmitter interfaces; see
Documentation/devicetree/bindings/media/video-interfaces.txt
This port should have the properties:
reg = <0>;
It should have one endpoint connected to a remote endpoint where
the display is connected.
Example:
display-controller@6a000000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "faraday,tve200";
reg = <0x6a000000 0x1000>;
interrupts = <13 IRQ_TYPE_EDGE_RISING>;
resets = <&syscon GEMINI_RESET_TVC>;
clocks = <&syscon GEMINI_CLK_GATE_TVC>,
<&syscon GEMINI_CLK_TVC>;
clock-names = "PCLK", "TVE";
port@0 {
reg = <0>;
display_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};

Просмотреть файл

@ -13,16 +13,16 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>. - power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. - clocks: Phandles to device clocks.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "mdp_core_clk" * "mdp_core"
* "iface_clk" * "iface"
* "bus_clk" * "bus"
* "core_mmss_clk" * "core_mmss"
* "byte_clk" * "byte"
* "pixel_clk" * "pixel"
* "core_clk" * "core"
For DSIv2, we need an additional clock: For DSIv2, we need an additional clock:
* "src_clk" * "src"
- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform. - assigned-clocks: Parents of "byte" and "pixel" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided - assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings. by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node - vdd-supply: phandle to vdd regulator device node
@ -101,7 +101,7 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>. - power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. See [1] for details on clock bindings. - clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "iface_clk" * "iface"
- vddio-supply: phandle to vdd-io regulator device node - vddio-supply: phandle to vdd-io regulator device node
Optional properties: Optional properties:
@ -123,13 +123,13 @@ Example:
reg = <0xfd922800 0x200>; reg = <0xfd922800 0x200>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = clock-names =
"bus_clk", "bus",
"byte_clk", "byte",
"core_clk", "core",
"core_mmss_clk", "core_mmss",
"iface_clk", "iface",
"mdp_core_clk", "mdp_core",
"pixel_clk"; "pixel";
clocks = clocks =
<&mmcc MDSS_AXI_CLK>, <&mmcc MDSS_AXI_CLK>,
<&mmcc MDSS_BYTE0_CLK>, <&mmcc MDSS_BYTE0_CLK>,
@ -207,7 +207,7 @@ Example:
reg = <0xfd922a00 0xd4>, reg = <0xfd922a00 0xd4>,
<0xfd922b00 0x2b0>, <0xfd922b00 0x2b0>,
<0xfd922d80 0x7b>; <0xfd922d80 0x7b>;
clock-names = "iface_clk"; clock-names = "iface";
clocks = <&mmcc MDSS_AHB_CLK>; clocks = <&mmcc MDSS_AHB_CLK>;
#clock-cells = <1>; #clock-cells = <1>;
vddio-supply = <&pma8084_l12>; vddio-supply = <&pma8084_l12>;

Просмотреть файл

@ -12,11 +12,11 @@ Required properties:
- clocks: device clocks - clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details. See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "core_clk" * "core"
* "iface_clk" * "iface"
* "mdp_core_clk" * "mdp_core"
* "pixel_clk" * "pixel"
* "link_clk" * "link"
- #clock-cells: The value should be 1. - #clock-cells: The value should be 1.
- vdda-supply: phandle to vdda regulator device node - vdda-supply: phandle to vdda regulator device node
- lvl-vdd-supply: phandle to regulator device node which is used to supply power - lvl-vdd-supply: phandle to regulator device node which is used to supply power
@ -41,11 +41,11 @@ Example:
interrupts = <12 0>; interrupts = <12 0>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = clock-names =
"core_clk", "core",
"pixel_clk", "pixel",
"iface_clk", "iface",
"link_clk", "link",
"mdp_core_clk"; "mdp_core";
clocks = clocks =
<&mmcc MDSS_EDPAUX_CLK>, <&mmcc MDSS_EDPAUX_CLK>,
<&mmcc MDSS_EDPPIXEL_CLK>, <&mmcc MDSS_EDPPIXEL_CLK>,

Просмотреть файл

@ -64,9 +64,9 @@ Example:
interrupts = <GIC_SPI 79 0>; interrupts = <GIC_SPI 79 0>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = clock-names =
"core_clk", "core",
"master_iface_clk", "master_iface",
"slave_iface_clk"; "slave_iface";
clocks = clocks =
<&mmcc HDMI_APP_CLK>, <&mmcc HDMI_APP_CLK>,
<&mmcc HDMI_M_AHB_CLK>, <&mmcc HDMI_M_AHB_CLK>,
@ -92,7 +92,7 @@ Example:
<0x4a00500 0x100>; <0x4a00500 0x100>;
#phy-cells = <0>; #phy-cells = <0>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = "slave_iface_clk"; clock-names = "slave_iface";
clocks = <&mmcc HDMI_S_AHB_CLK>; clocks = <&mmcc HDMI_S_AHB_CLK>;
core-vdda-supply = <&pm8921_hdmi_mvs>; core-vdda-supply = <&pm8921_hdmi_mvs>;
}; };

Просмотреть файл

@ -22,16 +22,16 @@ Required properties:
Documentation/devicetree/bindings/power/power_domain.txt Documentation/devicetree/bindings/power/power_domain.txt
- clocks: device clocks. See ../clocks/clock-bindings.txt for details. - clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required. - clock-names: the following clocks are required.
* "iface_clk" * "iface"
* "bus_clk" * "bus"
* "vsync_clk" * "vsync"
- #address-cells: number of address cells for the MDSS children. Should be 1. - #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1. - #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space. - ranges: parent bus address space is the same as the child bus address space.
Optional properties: Optional properties:
- clock-names: the following clocks are optional: - clock-names: the following clocks are optional:
* "lut_clk" * "lut"
MDP5: MDP5:
Required properties: Required properties:
@ -45,10 +45,10 @@ Required properties:
through MDP block through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details. - clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required. - clock-names: the following clocks are required.
- * "bus_clk" - * "bus"
- * "iface_clk" - * "iface"
- * "core_clk" - * "core"
- * "vsync_clk" - * "vsync"
- ports: contains the list of output ports from MDP. These connect to interfaces - ports: contains the list of output ports from MDP. These connect to interfaces
that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
special case since it is a part of the MDP block itself). special case since it is a part of the MDP block itself).
@ -77,7 +77,7 @@ Required properties:
Optional properties: Optional properties:
- clock-names: the following clocks are optional: - clock-names: the following clocks are optional:
* "lut_clk" * "lut"
Example: Example:
@ -95,9 +95,9 @@ Example:
clocks = <&gcc GCC_MDSS_AHB_CLK>, clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>; <&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk", clock-names = "iface",
"bus_clk", "bus",
"vsync_clk" "vsync"
interrupts = <0 72 0>; interrupts = <0 72 0>;
@ -120,10 +120,10 @@ Example:
<&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_MDP_CLK>, <&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>; <&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk", clock-names = "iface",
"bus_clk", "bus",
"core_clk", "core",
"vsync_clk"; "vsync";
ports { ports {
#address-cells = <1>; #address-cells = <1>;

Просмотреть файл

@ -0,0 +1,21 @@
Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
a MIPI-DSI video interface. Its backlight is managed through the DSI link.
Required properties:
- compatible: "orisetech,otm8009a"
- reg: the virtual channel number of a DSI peripheral
Optional properties:
- reset-gpios: a GPIO spec for the reset pin (active low).
Example:
&dsi {
...
panel@0 {
compatible = "orisetech,otm8009a";
reg = <0>;
reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
};
};

Просмотреть файл

@ -0,0 +1,49 @@
This binding covers the official 7" (800x480) Raspberry Pi touchscreen
panel.
This DSI panel contains:
- TC358762 DSI->DPI bridge
- Atmel microcontroller on I2C for power sequencing the DSI bridge and
controlling backlight
- Touchscreen controller on I2C for touch input
and this binding covers the DSI display parts but not its touch input.
Required properties:
- compatible: Must be "raspberrypi,7inch-touchscreen-panel"
- reg: Must be "45"
- port: See panel-common.txt
Example:
dsi1: dsi@7e700000 {
#address-cells = <1>;
#size-cells = <0>;
<...>
port {
dsi_out_port: endpoint {
remote-endpoint = <&panel_dsi_port>;
};
};
};
i2c_dsi: i2c {
compatible = "i2c-gpio";
#address-cells = <1>;
#size-cells = <0>;
gpios = <&gpio 28 0
&gpio 29 0>;
lcd@45 {
compatible = "raspberrypi,7inch-touchscreen-panel";
reg = <0x45>;
port {
panel_dsi_port: endpoint {
remote-endpoint = <&dsi_out_port>;
};
};
};
};

Просмотреть файл

@ -0,0 +1,24 @@
Samsung S6E63J0X03 1.63" 320x320 AMOLED panel (interface: MIPI-DSI command mode)
Required properties:
- compatible: "samsung,s6e63j0x03"
- reg: the virtual channel number of a DSI peripheral
- vdd3-supply: I/O voltage supply
- vci-supply: voltage supply for analog circuits
- reset-gpios: a GPIO spec for the reset pin (active low)
- te-gpios: a GPIO spec for the tearing effect synchronization signal
gpio pin (active high)
Example:
&dsi {
...
panel@0 {
compatible = "samsung,s6e63j0x03";
reg = <0>;
vdd3-supply = <&ldo16_reg>;
vci-supply = <&ldo20_reg>;
reset-gpios = <&gpe0 1 GPIO_ACTIVE_LOW>;
te-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
};
};

Просмотреть файл

@ -0,0 +1,23 @@
Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
Required properties:
- compatible: should be "sii,43wvf1g".
- "dvdd-supply": 3v3 digital regulator.
- "avdd-supply": 5v analog regulator.
Optional properties:
- backlight: phandle for the backlight control.
Example:
panel {
compatible = "sii,43wvf1g";
backlight = <&backlight_display>;
dvdd-supply = <&reg_lcd_3v3>;
avdd-supply = <&reg_lcd_5v>;
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};

Просмотреть файл

@ -0,0 +1,8 @@
Toshiba 8.9" WXGA (1280x768) TFT LCD panel
Required properties:
- compatible: should be "toshiba,lt089ac29000.txt"
- power-supply: as specified in the base binding
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

Просмотреть файл

@ -0,0 +1,99 @@
Rockchip RK3288 LVDS interface
================================
Required properties:
- compatible: matching the soc type, one of
- "rockchip,rk3288-lvds";
- reg: physical base address of the controller and length
of memory mapped region.
- clocks: must include clock specifiers corresponding to entries in the
clock-names property.
- clock-names: must contain "pclk_lvds"
- avdd1v0-supply: regulator phandle for 1.0V analog power
- avdd1v8-supply: regulator phandle for 1.8V analog power
- avdd3v3-supply: regulator phandle for 3.3V analog power
- rockchip,grf: phandle to the general register files syscon
- rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface
Optional properties:
- pinctrl-names: must contain a "lcdc" entry.
- pinctrl-0: pin control group to be used for this controller.
Required nodes:
The lvds has two video ports as described by
Documentation/devicetree/bindings/media/video-interfaces.txt
Their connections are modeled using the OF graph bindings specified in
Documentation/devicetree/bindings/graph.txt.
- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
- video port 1 for either a panel or subsequent encoder
the lvds panel described by
Documentation/devicetree/bindings/display/panel/simple-panel.txt
Panel required properties:
- ports for remote LVDS output
Panel optional properties:
- data-mapping: should be "vesa-24","jeida-24" or "jeida-18".
This describes decribed by:
Documentation/devicetree/bindings/display/panel/panel-lvds.txt
Example:
lvds_panel: lvds-panel {
compatible = "auo,b101ean01";
enable-gpios = <&gpio7 21 GPIO_ACTIVE_HIGH>;
data-mapping = "jeida-24";
ports {
panel_in_lvds: endpoint {
remote-endpoint = <&lvds_out_panel>;
};
};
};
For Rockchip RK3288:
lvds: lvds@ff96c000 {
compatible = "rockchip,rk3288-lvds";
rockchip,grf = <&grf>;
reg = <0xff96c000 0x4000>;
clocks = <&cru PCLK_LVDS_PHY>;
clock-names = "pclk_lvds";
pinctrl-names = "lcdc";
pinctrl-0 = <&lcdc_ctl>;
avdd1v0-supply = <&vdd10_lcd>;
avdd1v8-supply = <&vcc18_lcd>;
avdd3v3-supply = <&vcca_33>;
rockchip,output = "rgb";
ports {
#address-cells = <1>;
#size-cells = <0>;
lvds_in: port@0 {
reg = <0>;
lvds_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_lvds>;
};
lvds_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_lvds>;
};
};
lvds_out: port@1 {
reg = <1>;
lvds_out_panel: endpoint {
remote-endpoint = <&panel_in_lvds>;
};
};
};
};

Просмотреть файл

@ -40,15 +40,19 @@ CEC. It is one end of the pipeline.
Required properties: Required properties:
- compatible: value must be one of: - compatible: value must be one of:
* allwinner,sun4i-a10-hdmi
* allwinner,sun5i-a10s-hdmi * allwinner,sun5i-a10s-hdmi
* allwinner,sun6i-a31-hdmi
- reg: base address and size of memory-mapped region - reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP - interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the HDMI encoder - clocks: phandles to the clocks feeding the HDMI encoder
* ahb: the HDMI interface clock * ahb: the HDMI interface clock
* mod: the HDMI module clock * mod: the HDMI module clock
* ddc: the HDMI ddc clock (A31 only)
* pll-0: the first video PLL * pll-0: the first video PLL
* pll-1: the second video PLL * pll-1: the second video PLL
- clock-names: the clock names mentioned above - clock-names: the clock names mentioned above
- resets: phandle to the reset control for the HDMI encoder (A31 only)
- dmas: phandles to the DMA channels used by the HDMI encoder - dmas: phandles to the DMA channels used by the HDMI encoder
* ddc-tx: The channel for DDC transmission * ddc-tx: The channel for DDC transmission
* ddc-rx: The channel for DDC reception * ddc-rx: The channel for DDC reception
@ -83,9 +87,11 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties: Required properties:
- compatible: value must be either: - compatible: value must be either:
* allwinner,sun4i-a10-tcon
* allwinner,sun5i-a13-tcon * allwinner,sun5i-a13-tcon
* allwinner,sun6i-a31-tcon * allwinner,sun6i-a31-tcon
* allwinner,sun6i-a31s-tcon * allwinner,sun6i-a31s-tcon
* allwinner,sun7i-a20-tcon
* allwinner,sun8i-a33-tcon * allwinner,sun8i-a33-tcon
* allwinner,sun8i-v3s-tcon * allwinner,sun8i-v3s-tcon
- reg: base address and size of memory-mapped region - reg: base address and size of memory-mapped region
@ -150,8 +156,10 @@ system.
Required properties: Required properties:
- compatible: value must be one of: - compatible: value must be one of:
* allwinner,sun4i-a10-display-backend
* allwinner,sun5i-a13-display-backend * allwinner,sun5i-a13-display-backend
* allwinner,sun6i-a31-display-backend * allwinner,sun6i-a31-display-backend
* allwinner,sun7i-a20-display-backend
* allwinner,sun8i-a33-display-backend * allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region. - reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP - interrupts: interrupt associated to this IP
@ -182,8 +190,10 @@ deinterlacing and color space conversion.
Required properties: Required properties:
- compatible: value must be one of: - compatible: value must be one of:
* allwinner,sun4i-a10-display-frontend
* allwinner,sun5i-a13-display-frontend * allwinner,sun5i-a13-display-frontend
* allwinner,sun6i-a31-display-frontend * allwinner,sun6i-a31-display-frontend
* allwinner,sun7i-a20-display-frontend
* allwinner,sun8i-a33-display-frontend * allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region. - reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP - interrupts: interrupt associated to this IP
@ -228,10 +238,12 @@ extra node.
Required properties: Required properties:
- compatible: value must be one of: - compatible: value must be one of:
* allwinner,sun4i-a10-display-engine
* allwinner,sun5i-a10s-display-engine * allwinner,sun5i-a10s-display-engine
* allwinner,sun5i-a13-display-engine * allwinner,sun5i-a13-display-engine
* allwinner,sun6i-a31-display-engine * allwinner,sun6i-a31-display-engine
* allwinner,sun6i-a31s-display-engine * allwinner,sun6i-a31s-display-engine
* allwinner,sun7i-a20-display-engine
* allwinner,sun8i-a33-display-engine * allwinner,sun8i-a33-display-engine
* allwinner,sun8i-v3s-display-engine * allwinner,sun8i-v3s-display-engine

Просмотреть файл

@ -3,6 +3,10 @@ NVIDIA Tegra host1x
Required properties: Required properties:
- compatible: "nvidia,tegra<chip>-host1x" - compatible: "nvidia,tegra<chip>-host1x"
- reg: Physical base address and length of the controller's registers. - reg: Physical base address and length of the controller's registers.
For pre-Tegra186, one entry describing the whole register area.
For Tegra186, one entry for each entry in reg-names:
"vm" - VM region assigned to Linux
"hypervisor" - Hypervisor region (only if Linux acts as hypervisor)
- interrupts: The interrupt outputs from the controller. - interrupts: The interrupt outputs from the controller.
- #address-cells: The number of cells used to represent physical base addresses - #address-cells: The number of cells used to represent physical base addresses
in the host1x address space. Should be 1. in the host1x address space. Should be 1.

Просмотреть файл

@ -254,6 +254,7 @@ opencores OpenCores.org
openrisc OpenRISC.io openrisc OpenRISC.io
option Option NV option Option NV
ORCL Oracle Corporation ORCL Oracle Corporation
orisetech Orise Technology
ortustech Ortus Technology Co., Ltd. ortustech Ortus Technology Co., Ltd.
ovti OmniVision Technologies ovti OmniVision Technologies
oxsemi Oxford Semiconductor, Ltd. oxsemi Oxford Semiconductor, Ltd.

Просмотреть файл

@ -168,6 +168,61 @@ IOCTL Support on Device Nodes
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c .. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:doc: driver specific ioctls :doc: driver specific ioctls
Recommended IOCTL Return Values
-------------------------------
In theory a driver's IOCTL callback is only allowed to return very few error
codes. In practice it's good to abuse a few more. This section documents common
practice within the DRM subsystem:
ENOENT:
Strictly this should only be used when a file doesn't exist e.g. when
calling the open() syscall. We reuse that to signal any kind of object
lookup failure, e.g. for unknown GEM buffer object handles, unknown KMS
object handles and similar cases.
ENOSPC:
Some drivers use this to differentiate "out of kernel memory" from "out
of VRAM". Sometimes also applies to other limited gpu resources used for
rendering (e.g. when you have a special limited compression buffer).
Sometimes resource allocation/reservation issues in command submission
IOCTLs are also signalled through EDEADLK.
Simply running out of kernel/system memory is signalled through ENOMEM.
EPERM/EACCESS:
Returned for an operation that is valid, but needs more privileges.
E.g. root-only or much more common, DRM master-only operations return
this when when called by unpriviledged clients. There's no clear
difference between EACCESS and EPERM.
ENODEV:
Feature (like PRIME, modesetting, GEM) is not supported by the driver.
ENXIO:
Remote failure, either a hardware transaction (like i2c), but also used
when the exporting driver of a shared dma-buf or fence doesn't support a
feature needed.
EINTR:
DRM drivers assume that userspace restarts all IOCTLs. Any DRM IOCTL can
return EINTR and in such a case should be restarted with the IOCTL
parameters left unchanged.
EIO:
The GPU died and couldn't be resurrected through a reset. Modesetting
hardware failures are signalled through the "link status" connector
property.
EINVAL:
Catch-all for anything that is an invalid argument combination which
cannot work.
IOCTL also use other error codes like ETIME, EFAULT, EBUSY, ENOTTY but their
usage is in line with the common meanings. The above list tries to just document
DRM specific patterns. Note that ENOTTY has the slightly unintuitive meaning of
"this IOCTL does not exist", and is used exactly as such in DRM.
.. kernel-doc:: include/drm/drm_ioctl.h .. kernel-doc:: include/drm/drm_ioctl.h
:internal: :internal:

Просмотреть файл

@ -15,6 +15,7 @@ Linux GPU Driver Developer's Guide
pl111 pl111
tegra tegra
tinydrm tinydrm
tve200
vc4 vc4
vga-switcheroo vga-switcheroo
vgaarbiter vgaarbiter

Просмотреть файл

@ -75,17 +75,6 @@ helpers.
Contact: Ville Syrjälä, Daniel Vetter, driver maintainers Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
Implement deferred fbdev setup in the helper
--------------------------------------------
Many (especially embedded drivers) want to delay fbdev setup until there's a
real screen plugged in. This is to avoid the dreaded fallback to the low-res
fbdev default. Many drivers have a hacked-up (and often broken) version of this,
better to do it once in the shared helpers. Thierry has a patch series, but that
one needs to be rebased and final polish applied.
Contact: Thierry Reding, Daniel Vetter, driver maintainers
Convert early atomic drivers to async commit helpers Convert early atomic drivers to async commit helpers
---------------------------------------------------- ----------------------------------------------------
@ -138,6 +127,8 @@ interfaces to fix these issues:
the acquire context explicitly on stack and then also pass it down into the acquire context explicitly on stack and then also pass it down into
drivers explicitly so that the legacy-on-atomic functions can use them. drivers explicitly so that the legacy-on-atomic functions can use them.
Except for some driver code this is done.
* A bunch of the vtable hooks are now in the wrong place: DRM has a split * A bunch of the vtable hooks are now in the wrong place: DRM has a split
between core vfunc tables (named ``drm_foo_funcs``), which are used to between core vfunc tables (named ``drm_foo_funcs``), which are used to
implement the userspace ABI. And then there's the optional hooks for the implement the userspace ABI. And then there's the optional hooks for the
@ -151,6 +142,8 @@ interfaces to fix these issues:
connector at runtime. That's almost all of them, and would allow us to get connector at runtime. That's almost all of them, and would allow us to get
rid of a lot of ``best_encoder`` boilerplate in drivers. rid of a lot of ``best_encoder`` boilerplate in drivers.
This was almost done, but new drivers added a few more cases again.
Contact: Daniel Vetter Contact: Daniel Vetter
Get rid of dev->struct_mutex from GEM drivers Get rid of dev->struct_mutex from GEM drivers
@ -177,15 +170,20 @@ following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and
Contact: Daniel Vetter, respective driver maintainers Contact: Daniel Vetter, respective driver maintainers
Convert instances of dev_info/dev_err/dev_warn to their DRM_DEV_* equivalent
----------------------------------------------------------------------------
For drivers which could have multiple instances, it is necessary to
differentiate between which is which in the logs. Since DRM_INFO/WARN/ERROR
don't do this, drivers used dev_info/warn/err to make this differentiation. We
now have DRM_DEV_* variants of the drm print macros, so we can start to convert
those drivers back to using drm-formwatted specific log messages.
Contact: Sean Paul, Maintainer of the driver you plan to convert
Core refactorings Core refactorings
================= =================
Use new IDR deletion interface to clean up drm_gem_handle_delete()
------------------------------------------------------------------
See the "This is gross" comment -- apparently the IDR system now can return an
error code instead of oopsing.
Clean up the DRM header mess Clean up the DRM header mess
---------------------------- ----------------------------
@ -306,6 +304,18 @@ There's a bunch of issues with it:
Contact: Daniel Vetter Contact: Daniel Vetter
KMS cleanups
------------
Some of these date from the very introduction of KMS in 2008 ...
- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
be renamed to drm_mode_config.object_idr.
- drm_display_mode doesn't need to be derived from drm_mode_object. That's
leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes.
Better Testing Better Testing
============== ==============
@ -353,7 +363,16 @@ those drivers as simple as possible, so lots of room for refactoring:
- backlight helpers, probably best to put them into a new drm_backlight.c. - backlight helpers, probably best to put them into a new drm_backlight.c.
This is because drivers/video is de-facto unmaintained. We could also This is because drivers/video is de-facto unmaintained. We could also
move drivers/video/backlight to drivers/gpu/backlight and take it all move drivers/video/backlight to drivers/gpu/backlight and take it all
over within drm-misc, but that's more work. over within drm-misc, but that's more work. Backlight helpers require a fair
bit of reworking and refactoring. A simple example is the enabling of a backlight.
Tinydrm has helpers for this. It would be good if other drivers can also use the
helper. However, there are various cases we need to consider i.e different
drivers seem to have different ways of enabling/disabling a backlight.
We also need to consider the backlight drivers (like gpio_backlight). The situation
is further complicated by the fact that the backlight is tied to fbdev
via fb_notifier_callback() which has complicated logic. For further details, refer
to the following discussion thread:
https://groups.google.com/forum/#!topic/outreachy-kernel/8rBe30lwtdA
- spi helpers, probably best put into spi core/helper code. Thierry said - spi helpers, probably best put into spi core/helper code. Thierry said
the spi maintainer is fast&reactive, so shouldn't be a big issue. the spi maintainer is fast&reactive, so shouldn't be a big issue.

Просмотреть файл

@ -0,0 +1,6 @@
==================================
drm/tve200 Faraday TV Encoder 200
==================================
.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
:doc: Faraday TV Encoder 200

Просмотреть файл

@ -754,8 +754,6 @@ F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h F: drivers/gpu/drm/amd/include/cik_structs.h
F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
F: drivers/gpu/drm/amd/include/vi_structs.h F: drivers/gpu/drm/amd/include/vi_structs.h
F: drivers/gpu/drm/radeon/radeon_kfd.c
F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h F: include/uapi/linux/kfd_ioctl.h
AMD SEATTLE DEVICE TREE SUPPORT AMD SEATTLE DEVICE TREE SUPPORT
@ -4400,6 +4398,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained S: Maintained
F: drivers/gpu/drm/bochs/ F: drivers/gpu/drm/bochs/
DRM DRIVER FOR FARADAY TVE200 TV ENCODER
M: Linus Walleij <linus.walleij@linaro.org>
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tve200/
DRM DRIVER FOR INTEL I810 VIDEO CARDS DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete S: Orphan / Obsolete
F: drivers/gpu/drm/i810/ F: drivers/gpu/drm/i810/
@ -4543,7 +4547,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported S: Supported
F: drivers/gpu/drm/sun4i/ F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux.git T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR AMLOGIC SOCS DRM DRIVERS FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com> M: Neil Armstrong <narmstrong@baylibre.com>
@ -4727,7 +4731,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
DRM PANEL DRIVERS DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com> M: Thierry Reding <thierry.reding@gmail.com>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/tegra/linux.git T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained S: Maintained
F: drivers/gpu/drm/drm_panel.c F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/ F: drivers/gpu/drm/panel/
@ -5495,6 +5499,7 @@ F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER FRAMEBUFFER LAYER
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org L: linux-fbdev@vger.kernel.org
T: git git://github.com/bzolnier/linux.git T: git git://github.com/bzolnier/linux.git
Q: http://patchwork.kernel.org/project/linux-fbdev/list/ Q: http://patchwork.kernel.org/project/linux-fbdev/list/

Просмотреть файл

@ -1740,15 +1740,3 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
static void fixup_vga(struct pci_dev *pdev)
{
u16 cmd;
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
vga_set_default_device(pdev);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);

Просмотреть файл

@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct sg_table *sg_table = ERR_PTR(-EINVAL); struct sg_table *sg_table;
might_sleep(); might_sleep();

Просмотреть файл

@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* @dst: the destination reservation object * @dst: the destination reservation object
* @src: the source reservation object * @src: the source reservation object
* *
* Copy all fences from src to dst. Both src->lock as well as dst-lock must be * Copy all fences from src to dst. dst-lock must be held.
* held.
*/ */
int reservation_object_copy_fences(struct reservation_object *dst, int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src) struct reservation_object *src)
@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size; size_t size;
unsigned i; unsigned i;
src_list = reservation_object_get_list(src); rcu_read_lock();
src_list = rcu_dereference(src->fence);
retry:
if (src_list) { if (src_list) {
size = offsetof(typeof(*src_list), unsigned shared_count = src_list->shared_count;
shared[src_list->shared_count]);
size = offsetof(typeof(*src_list), shared[shared_count]);
rcu_read_unlock();
dst_list = kmalloc(size, GFP_KERNEL); dst_list = kmalloc(size, GFP_KERNEL);
if (!dst_list) if (!dst_list)
return -ENOMEM; return -ENOMEM;
dst_list->shared_count = src_list->shared_count; rcu_read_lock();
dst_list->shared_max = src_list->shared_count; src_list = rcu_dereference(src->fence);
for (i = 0; i < src_list->shared_count; ++i) if (!src_list || src_list->shared_count > shared_count) {
dst_list->shared[i] = kfree(dst_list);
dma_fence_get(src_list->shared[i]); goto retry;
}
dst_list->shared_count = 0;
dst_list->shared_max = shared_count;
for (i = 0; i < src_list->shared_count; ++i) {
struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
continue;
if (!dma_fence_get_rcu(fence)) {
kfree(dst_list);
src_list = rcu_dereference(src->fence);
goto retry;
}
if (dma_fence_is_signaled(fence)) {
dma_fence_put(fence);
continue;
}
dst_list->shared[dst_list->shared_count++] = fence;
}
} else { } else {
dst_list = NULL; dst_list = NULL;
} }
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
kfree(dst->staged); kfree(dst->staged);
dst->staged = NULL; dst->staged = NULL;
src_list = reservation_object_get_list(dst); src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst); old = reservation_object_get_excl(dst);
new = reservation_object_get_excl(src);
dma_fence_get(new);
preempt_disable(); preempt_disable();
write_seqcount_begin(&dst->seq); write_seqcount_begin(&dst->seq);

Просмотреть файл

@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
static int sw_sync_debugfs_release(struct inode *inode, struct file *file) static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{ {
struct sync_timeline *obj = file->private_data; struct sync_timeline *obj = file->private_data;
struct sync_pt *pt, *next;
smp_wmb(); spin_lock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
dma_fence_set_error(&pt->base, -ENOENT);
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
sync_timeline_put(obj); sync_timeline_put(obj);
return 0; return 0;

Просмотреть файл

@ -110,7 +110,7 @@ config DRM_FBDEV_OVERALLOC
config DRM_LOAD_EDID_FIRMWARE config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it" bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM_KMS_HELPER depends on DRM
help help
Say Y here, if you want to use EDID data to be loaded from the Say Y here, if you want to use EDID data to be loaded from the
/lib/firmware directory or one of the provided built-in /lib/firmware directory or one of the provided built-in
@ -184,6 +184,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE select INTERVAL_TREE
select CHASH
help help
Choose this option if you have a recent AMD Radeon graphics card. Choose this option if you have a recent AMD Radeon graphics card.
@ -191,6 +192,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig" source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/amd/lib/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig" source "drivers/gpu/drm/i915/Kconfig"
@ -278,6 +281,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig" source "drivers/gpu/drm/pl111/Kconfig"
source "drivers/gpu/drm/tve200/Kconfig"
# Keep legacy drivers last # Keep legacy drivers last
menuconfig DRM_LEGACY menuconfig DRM_LEGACY

Просмотреть файл

@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \ drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \ drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_syncobj.o drm_lease.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o drm-$(CONFIG_DRM_VM) += drm_vm.o
@ -29,6 +29,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@ -37,7 +38,6 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
@ -45,14 +45,13 @@ drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/ obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_ARM) += arm/ obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/ obj-$(CONFIG_DRM_R128) += r128/
obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@ -101,3 +100,4 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/

Просмотреть файл

@ -26,7 +26,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
# add asic specific block # add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@ -134,5 +134,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES) amdgpu-y += $(AMD_POWERPLAY_FILES)
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)

Просмотреть файл

@ -65,6 +65,7 @@
#include "amdgpu_uvd.h" #include "amdgpu_uvd.h"
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#include "amdgpu_virt.h" #include "amdgpu_virt.h"
@ -91,7 +92,7 @@ extern int amdgpu_dpm;
extern int amdgpu_fw_load_type; extern int amdgpu_fw_load_type;
extern int amdgpu_aspm; extern int amdgpu_aspm;
extern int amdgpu_runtime_pm; extern int amdgpu_runtime_pm;
extern unsigned amdgpu_ip_block_mask; extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm; extern int amdgpu_bapm;
extern int amdgpu_deep_color; extern int amdgpu_deep_color;
extern int amdgpu_vm_size; extern int amdgpu_vm_size;
@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission; extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict; extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size; extern int amdgpu_direct_gma_size;
extern unsigned amdgpu_pcie_gen_cap; extern uint amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap; extern uint amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask; extern uint amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask; extern uint amdgpu_pg_mask;
extern unsigned amdgpu_sdma_phase_quantum; extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu; extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display; extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask; extern uint amdgpu_pp_feature_mask;
extern int amdgpu_vram_page_split; extern int amdgpu_vram_page_split;
extern int amdgpu_ngg; extern int amdgpu_ngg;
extern int amdgpu_prim_buf_per_se; extern int amdgpu_prim_buf_per_se;
@ -120,6 +121,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se; extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit; extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw; extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support; extern int amdgpu_si_support;
@ -178,6 +180,7 @@ struct amdgpu_cs_parser;
struct amdgpu_job; struct amdgpu_job;
struct amdgpu_irq_src; struct amdgpu_irq_src;
struct amdgpu_fpriv; struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq { enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0, AMDGPU_CP_IRQ_GFX_EOP = 0,
@ -292,14 +295,25 @@ struct amdgpu_buffer_funcs {
/* provided by hw blocks that can write ptes, e.g., sdma */ /* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs { struct amdgpu_vm_pte_funcs {
/* number of dw to reserve per operation */
unsigned copy_pte_num_dw;
/* copy pte entries from GART */ /* copy pte entries from GART */
void (*copy_pte)(struct amdgpu_ib *ib, void (*copy_pte)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src, uint64_t pe, uint64_t src,
unsigned count); unsigned count);
/* write pte one entry at a time with addr mapping */ /* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count, uint64_t value, unsigned count,
uint32_t incr); uint32_t incr);
/* maximum nums of PTEs/PDEs in a single operation */
uint32_t set_max_nums_pte_pde;
/* number of dw to reserve per operation */
unsigned set_pte_pde_num_dw;
/* for linear pte/pde updates without addr mapping */ /* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib, void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t pe,
@ -332,6 +346,7 @@ struct amdgpu_gart_funcs {
struct amdgpu_ih_funcs { struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */ /* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev); u32 (*get_wptr)(struct amdgpu_device *adev);
bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev, void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev); void (*set_rptr)(struct amdgpu_device *adev);
@ -399,6 +414,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock. /* sub-allocation manager, it has to be protected by another lock.
@ -455,9 +471,10 @@ struct amdgpu_sa_bo {
*/ */
void amdgpu_gem_force_release(struct amdgpu_device *adev); void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
struct drm_gem_object **obj); struct reservation_object *resv,
struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv, int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
@ -715,10 +732,14 @@ struct amdgpu_ctx {
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr; struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter; unsigned reset_counter;
uint32_t vram_lost_counter;
spinlock_t ring_lock; spinlock_t ring_lock;
struct dma_fence **fences; struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented; bool preamble_presented;
enum amd_sched_priority init_priority;
enum amd_sched_priority override_priority;
struct mutex lock;
}; };
struct amdgpu_ctx_mgr { struct amdgpu_ctx_mgr {
@ -731,17 +752,22 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx); int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence); struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq); struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
/* /*
* file private structure * file private structure
*/ */
@ -753,7 +779,6 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock; struct mutex bo_list_lock;
struct idr bo_list_handles; struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr; struct amdgpu_ctx_mgr ctx_mgr;
u32 vram_lost_counter;
}; };
/* /*
@ -854,7 +879,7 @@ struct amdgpu_mec {
struct amdgpu_kiq { struct amdgpu_kiq {
u64 eop_gpu_addr; u64 eop_gpu_addr;
struct amdgpu_bo *eop_obj; struct amdgpu_bo *eop_obj;
struct mutex ring_mutex; spinlock_t ring_lock;
struct amdgpu_ring ring; struct amdgpu_ring ring;
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
}; };
@ -1014,11 +1039,14 @@ struct amdgpu_gfx {
/* reset mask */ /* reset mask */
uint32_t grbm_soft_reset; uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
bool in_reset;
/* s3/s4 mask */ /* s3/s4 mask */
bool in_suspend; bool in_suspend;
/* NGG */ /* NGG */
struct amdgpu_ngg ngg; struct amdgpu_ngg ngg;
/* pipe reservation */
struct mutex pipe_reserve_mutex;
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
}; };
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@ -1056,6 +1084,7 @@ struct amdgpu_cs_parser {
/* buffer objects */ /* buffer objects */
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct amdgpu_bo_list *bo_list; struct amdgpu_bo_list *bo_list;
struct amdgpu_mn *mn;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct list_head validated; struct list_head validated;
struct dma_fence *fence; struct dma_fence *fence;
@ -1096,6 +1125,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size; uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size; uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size; uint32_t oa_base, oa_size;
uint32_t vram_lost_counter;
/* user fence handling */ /* user fence handling */
uint64_t uf_addr; uint64_t uf_addr;
@ -1121,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/* /*
* Writeback * Writeback
*/ */
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ #define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
struct amdgpu_wb { struct amdgpu_wb {
struct amdgpu_bo *wb_obj; struct amdgpu_bo *wb_obj;
@ -1183,6 +1213,9 @@ struct amdgpu_firmware {
/* gpu info firmware data pointer */ /* gpu info firmware data pointer */
const struct firmware *gpu_info_fw; const struct firmware *gpu_info_fw;
void *fw_buf_ptr;
uint64_t fw_buf_mc;
}; };
/* /*
@ -1196,20 +1229,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/ */
void amdgpu_test_moves(struct amdgpu_device *adev); void amdgpu_test_moves(struct amdgpu_device *adev);
/*
* MMU Notifier
*/
#if defined(CONFIG_MMU_NOTIFIER)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif
/* /*
* Debugfs * Debugfs
*/ */
@ -1305,6 +1324,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
@ -1370,6 +1391,18 @@ struct amdgpu_atcs {
struct amdgpu_atcs_functions functions; struct amdgpu_atcs_functions functions;
}; };
/*
* Firmware VRAM reservation
*/
struct amdgpu_fw_vram_usage {
u64 start_offset;
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
};
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
/* /*
* CGS * CGS
*/ */
@ -1519,7 +1552,6 @@ struct amdgpu_device {
/* powerplay */ /* powerplay */
struct amd_powerplay powerplay; struct amd_powerplay powerplay;
bool pp_enabled;
bool pp_force_state_enabled; bool pp_force_state_enabled;
/* dpm */ /* dpm */
@ -1575,6 +1607,8 @@ struct amdgpu_device {
struct delayed_work late_init_work; struct delayed_work late_init_work;
struct amdgpu_virt virt; struct amdgpu_virt virt;
/* firmware VRAM reservation */
struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */ /* link all shadow bo */
struct list_head shadow_list; struct list_head shadow_list;
@ -1592,6 +1626,7 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/ /* record last mm index being written through WREG32*/
unsigned long last_mm_index; unsigned long last_mm_index;
bool in_sriov_reset;
}; };
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@ -1759,6 +1794,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
@ -1791,18 +1827,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
@ -1836,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl; extern const int amdgpu_max_kms_ioctl;
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev); void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev); void amdgpu_driver_lastclose_kms(struct drm_device *dev);
@ -1885,10 +1907,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif #endif
struct amdgpu_bo_va_mapping * int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo,
uint64_t addr, struct amdgpu_bo **bo); struct amdgpu_bo_va_mapping **mapping);
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
#include "amdgpu_object.h" #include "amdgpu_object.h"
#endif #endif

Просмотреть файл

@ -35,41 +35,50 @@
#include "acp_gfx_if.h" #include "acp_gfx_if.h"
#define ACP_TILE_ON_MASK 0x03 #define ACP_TILE_ON_MASK 0x03
#define ACP_TILE_OFF_MASK 0x02 #define ACP_TILE_OFF_MASK 0x02
#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
#define ACP_TILE_P1_MASK 0x3e #define ACP_TILE_P1_MASK 0x3e
#define ACP_TILE_P2_MASK 0x3d #define ACP_TILE_P2_MASK 0x3d
#define ACP_TILE_DSP0_MASK 0x3b #define ACP_TILE_DSP0_MASK 0x3b
#define ACP_TILE_DSP1_MASK 0x37 #define ACP_TILE_DSP1_MASK 0x37
#define ACP_TILE_DSP2_MASK 0x2f #define ACP_TILE_DSP2_MASK 0x2f
#define ACP_DMA_REGS_END 0x146c0 #define ACP_DMA_REGS_END 0x146c0
#define ACP_I2S_PLAY_REGS_START 0x14840 #define ACP_I2S_PLAY_REGS_START 0x14840
#define ACP_I2S_PLAY_REGS_END 0x148b4 #define ACP_I2S_PLAY_REGS_END 0x148b4
#define ACP_I2S_CAP_REGS_START 0x148b8 #define ACP_I2S_CAP_REGS_START 0x148b8
#define ACP_I2S_CAP_REGS_END 0x1496c #define ACP_I2S_CAP_REGS_END 0x1496c
#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
#define mmACP_PGFSM_RETAIN_REG 0x51c9 #define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca #define mmACP_PGFSM_CONFIG_REG 0x51ca
#define mmACP_PGFSM_READ_REG_0 0x51cc #define mmACP_PGFSM_READ_REG_0 0x51cc
#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
#define ACP_TIMEOUT_LOOP 0x000000FF #define mmACP_CONTROL 0x5131
#define ACP_DEVS 3 #define mmACP_STATUS 0x5133
#define ACP_SRC_ID 162 #define mmACP_SOFT_RESET 0x5134
#define ACP_CONTROL__ClkEn_MASK 0x1
#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
#define ACP_DEVS 3
#define ACP_SRC_ID 162
enum { enum {
ACP_TILE_P1 = 0, ACP_TILE_P1 = 0,
@ -260,6 +269,8 @@ static int acp_hw_init(void *handle)
{ {
int r, i; int r, i;
uint64_t acp_base; uint64_t acp_base;
u32 val = 0;
u32 count = 0;
struct device *dev; struct device *dev;
struct i2s_platform_data *i2s_pdata; struct i2s_platform_data *i2s_pdata;
@ -402,6 +413,46 @@ static int acp_hw_init(void *handle)
} }
} }
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Enable clock to ACP and wait until the clock is enabled */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val = val | ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Deassert the SOFT RESET flags */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0; return 0;
} }
@ -414,6 +465,8 @@ static int acp_hw_init(void *handle)
static int acp_hw_fini(void *handle) static int acp_hw_fini(void *handle)
{ {
int i, ret; int i, ret;
u32 val = 0;
u32 count = 0;
struct device *dev; struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -421,6 +474,42 @@ static int acp_hw_fini(void *handle)
if (!adev->acp.acp_cell) if (!adev->acp.acp_cell)
return 0; return 0;
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Disable ACP clock */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val &= ~ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
if (adev->acp.acp_genpd) { if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) { for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);

Просмотреть файл

@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size, .get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter, .get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.alloc_pasid = amdgpu_vm_alloc_pasid,
.free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline, .init_pipeline = kgd_init_pipeline,
@ -336,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct cik_mqd *m; struct cik_mqd *m;
uint32_t *mqd_hqd; uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data; uint32_t reg, wptr_val, data;
bool valid_wptr = false;
m = get_mqd(mqd); m = get_mqd(mqd);
@ -354,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (read_user_wptr(mm, wptr, wptr_val)) /* read_user_ptr may take the mm->mmap_sem.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(kgd);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(kgd, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);

Просмотреть файл

@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size, .get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter, .get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.alloc_pasid = amdgpu_vm_alloc_pasid,
.free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline, .init_pipeline = kgd_init_pipeline,
@ -290,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct vi_mqd *m; struct vi_mqd *m;
uint32_t *mqd_hqd; uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data; uint32_t reg, wptr_val, data;
bool valid_wptr = false;
m = get_mqd(mqd); m = get_mqd(mqd);
@ -337,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (read_user_wptr(mm, wptr, wptr_val)) /* read_user_ptr may take the mm->mmap_sem.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(kgd);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(kgd, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);

Просмотреть файл

@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
return true; return true;
} }
/* Atom needs data in little endian format /* Atom needs data in little endian format so swap as appropriate when copying
* so swap as appropriate when copying data to * data to or from atom. Note that atom operates on dw units.
* or from atom. Note that atom operates on *
* dw units. * Use to_le=true when sending data to atom and provide at least
* ALIGN(num_bytes,4) bytes in the dst buffer.
*
* Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
* byes in the src buffer.
*/ */
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{ {
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ u32 src_tmp[5], dst_tmp[5];
u32 *dst32, *src32;
int i; int i;
u8 align_num_bytes = ALIGN(num_bytes, 4);
memcpy(src_tmp, src, num_bytes);
src32 = (u32 *)src_tmp;
dst32 = (u32 *)dst_tmp;
if (to_le) { if (to_le) {
for (i = 0; i < ((num_bytes + 3) / 4); i++) memcpy(src_tmp, src, num_bytes);
dst32[i] = cpu_to_le32(src32[i]); for (i = 0; i < align_num_bytes / 4; i++)
memcpy(dst, dst_tmp, num_bytes); dst_tmp[i] = cpu_to_le32(src_tmp[i]);
memcpy(dst, dst_tmp, align_num_bytes);
} else { } else {
u8 dws = num_bytes & ~3; memcpy(src_tmp, src, align_num_bytes);
for (i = 0; i < ((num_bytes + 3) / 4); i++) for (i = 0; i < align_num_bytes / 4; i++)
dst32[i] = le32_to_cpu(src32[i]); dst_tmp[i] = le32_to_cpu(src_tmp[i]);
memcpy(dst, dst_tmp, dws); memcpy(dst, dst_tmp, num_bytes);
if (num_bytes % 4) {
for (i = 0; i < (num_bytes % 4); i++)
dst[dws+i] = dst_tmp[dws+i];
}
} }
#else #else
memcpy(dst, src, num_bytes); memcpy(dst, src, num_bytes);
@ -1807,6 +1805,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
uint16_t data_offset; uint16_t data_offset;
int usage_bytes = 0; int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
u64 start_addr;
u64 size;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@ -1815,7 +1815,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
} }
ctx->scratch_size_bytes = 0; ctx->scratch_size_bytes = 0;
if (usage_bytes == 0) if (usage_bytes == 0)

Просмотреть файл

@ -71,19 +71,33 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context; struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware); vram_usagebyfirmware);
struct vram_usagebyfirmware_v2_1 * firmware_usage;
uint32_t start_addr, size;
uint16_t data_offset; uint16_t data_offset;
int usage_bytes = 0; int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
struct vram_usagebyfirmware_v2_1 *firmware_usage = firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n", DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb), le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb), le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb)); le16_to_cpu(firmware_usage->used_by_driver_in_kb));
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024; start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
}
} }
ctx->scratch_size_bytes = 0; ctx->scratch_size_bytes = 0;
if (usage_bytes == 0) if (usage_bytes == 0)

Просмотреть файл

@ -42,10 +42,31 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \ struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev ((struct amdgpu_cgs_device *)cgs_device)->adev
static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
int (*call_back_func)(struct amd_pp_init *, void **))
{
CGS_FUNC_ADEV;
struct amd_pp_init pp_init;
struct amd_powerplay *amd_pp;
if (call_back_func == NULL)
return NULL;
amd_pp = &(adev->powerplay);
pp_init.chip_family = adev->family;
pp_init.chip_id = adev->asic_type;
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
pp_init.feature_mask = amdgpu_pp_feature_mask;
pp_init.device = cgs_device;
if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
return NULL;
return adev->powerplay.pp_handle;
}
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align, uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle) cgs_handle_t *handle)
{ {
CGS_FUNC_ADEV; CGS_FUNC_ADEV;
@ -53,13 +74,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
int ret = 0; int ret = 0;
uint32_t domain = 0; uint32_t domain = 0;
struct amdgpu_bo *obj; struct amdgpu_bo *obj;
struct ttm_placement placement;
struct ttm_place place;
if (min_offset > max_offset) {
BUG_ON(1);
return -EINVAL;
}
/* fail if the alignment is not a power of 2 */ /* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1))) if (((align != 1) && (align & (align - 1)))
@ -73,41 +87,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
break; break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
place.lpfn =
min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
}
break; break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE: case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break; break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
TTM_PL_FLAG_UNCACHED;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -116,15 +108,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0; *handle = 0;
placement.placement = &place; ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
placement.num_placement = 1; NULL, NULL, 0, &obj);
placement.busy_placement = &place;
placement.num_busy_placement = 1;
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, NULL,
0, &obj);
if (ret) { if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret); DRM_ERROR("(%d) bo create failed\n", ret);
return ret; return ret;
@ -155,19 +140,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
uint64_t *mcaddr) uint64_t *mcaddr)
{ {
int r; int r;
u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1); WARN_ON_ONCE(obj->placement.num_placement > 1);
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
r = amdgpu_bo_reserve(obj, true); r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj); amdgpu_bo_unreserve(obj);
return r; return r;
} }
@ -675,6 +655,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) { if (!adev->pm.fw) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TAHITI:
strcpy(fw_name, "radeon/tahiti_smc.bin");
break;
case CHIP_PITCAIRN:
if ((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6810) ||
(adev->pdev->device == 0x6811))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
} else {
strcpy(fw_name, "radeon/pitcairn_smc.bin");
}
break;
case CHIP_VERDE:
if (((adev->pdev->device == 0x6820) &&
((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83))) ||
((adev->pdev->device == 0x6821) &&
((adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87))) ||
((adev->pdev->revision == 0x87) &&
((adev->pdev->device == 0x6823) ||
(adev->pdev->device == 0x682b)))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/verde_k_smc.bin");
} else {
strcpy(fw_name, "radeon/verde_smc.bin");
}
break;
case CHIP_OLAND:
if (((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6600) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605) ||
(adev->pdev->device == 0x6610))) ||
((adev->pdev->revision == 0x83) &&
(adev->pdev->device == 0x6610))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/oland_k_smc.bin");
} else {
strcpy(fw_name, "radeon/oland_smc.bin");
}
break;
case CHIP_HAINAN:
if (((adev->pdev->revision == 0x81) &&
(adev->pdev->device == 0x6660)) ||
((adev->pdev->revision == 0x83) &&
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/hainan_k_smc.bin");
} else if ((adev->pdev->revision == 0xc3) &&
(adev->pdev->device == 0x6665)) {
info->is_kicker = true;
strcpy(fw_name, "radeon/banks_k_2_smc.bin");
} else {
strcpy(fw_name, "radeon/hainan_smc.bin");
}
break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
strcpy(fw_name, "radeon/bonaire_k_smc.bin");
} else {
strcpy(fw_name, "radeon/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
strcpy(fw_name, "radeon/hawaii_k_smc.bin");
} else {
strcpy(fw_name, "radeon/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ: case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
@ -838,6 +897,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID: case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
sys_info->value = adev->pdev->subsystem_vendor; sys_info->value = adev->pdev->subsystem_vendor;
break; break;
case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
sys_info->value = adev->pdev->devfn;
break;
default: default:
return -ENODEV; return -ENODEV;
} }
@ -1139,6 +1201,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode, .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
.register_pp_handle = amdgpu_cgs_register_pp_handle,
}; };
static const struct cgs_os_ops amdgpu_cgs_os_ops = { static const struct cgs_os_ops amdgpu_cgs_os_ops = {

Просмотреть файл

@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]); connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;
@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]); connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;
@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid) { kfree(amdgpu_connector->edid);
kfree(amdgpu_connector->edid); amdgpu_connector->edid = NULL;
amdgpu_connector->edid = NULL;
}
} }
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@ -374,7 +372,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
/* pick the encoder ids */ /* pick the encoder ids */
if (enc_id) if (enc_id)
return drm_encoder_find(connector->dev, enc_id); return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL; return NULL;
} }
@ -1079,7 +1077,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;
@ -1136,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;
@ -1155,7 +1153,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */ /* then check use digitial */
/* pick the first one */ /* pick the first one */
if (enc_id) if (enc_id)
return drm_encoder_find(connector->dev, enc_id); return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL; return NULL;
} }
@ -1296,7 +1294,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]); connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;
@ -1325,7 +1323,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]); connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;

Просмотреть файл

@ -25,6 +25,7 @@
* Jerome Glisse <glisse@freedesktop.org> * Jerome Glisse <glisse@freedesktop.org>
*/ */
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h> #include <drm/drm_syncobj.h>
@ -89,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk; goto free_chunk;
} }
mutex_lock(&p->ctx->lock);
/* get chunks */ /* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks); chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user, if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) { sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT; ret = -EFAULT;
goto put_ctx; goto free_chunk;
} }
p->nchunks = cs->in.num_chunks; p->nchunks = cs->in.num_chunks;
@ -102,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
GFP_KERNEL); GFP_KERNEL);
if (!p->chunks) { if (!p->chunks) {
ret = -ENOMEM; ret = -ENOMEM;
goto put_ctx; goto free_chunk;
} }
for (i = 0; i < p->nchunks; i++) { for (i = 0; i < p->nchunks; i++) {
@ -169,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret) if (ret)
goto free_all_kdata; goto free_all_kdata;
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
}
if (p->uf_entry.robj) if (p->uf_entry.robj)
p->job->uf_addr = uf_offset; p->job->uf_addr = uf_offset;
kfree(chunk_array); kfree(chunk_array);
@ -182,8 +190,6 @@ free_partial_kdata:
kfree(p->chunks); kfree(p->chunks);
p->chunks = NULL; p->chunks = NULL;
p->nchunks = 0; p->nchunks = 0;
put_ctx:
amdgpu_ctx_put(p->ctx);
free_chunk: free_chunk:
kfree(chunk_array); kfree(chunk_array);
@ -473,11 +479,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM; return -EPERM;
/* Check if we have user pages and nobody bound the BO already */ /* Check if we have user pages and nobody bound the BO already */
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
size_t size = sizeof(struct page *); lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
size *= bo->tbo.ttm->num_pages; AMDGPU_GEM_DOMAIN_CPU);
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size); r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
false);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true; binding_userptr = true;
} }
@ -502,7 +513,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct list_head duplicates; struct list_head duplicates;
bool need_mmap_lock = false;
unsigned i, tries = 10; unsigned i, tries = 10;
int r; int r;
@ -510,9 +520,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) { if (p->bo_list) {
need_mmap_lock = p->bo_list->first_userptr !=
p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated); amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev);
} }
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
@ -521,9 +531,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj) if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
while (1) { while (1) {
struct list_head need_pages; struct list_head need_pages;
unsigned i; unsigned i;
@ -543,22 +550,24 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages); INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr; for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) { i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo;
e = &p->bo_list->array[i]; e = &p->bo_list->array[i];
bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm, if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) { &e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody /* We acquired a page array, but somebody
* invalidated it. Free it and try again * invalidated it. Free it and try again
*/ */
release_pages(e->user_pages, release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages); bo->tbo.ttm->num_pages);
kvfree(e->user_pages); kvfree(e->user_pages);
e->user_pages = NULL; e->user_pages = NULL;
} }
if (e->robj->tbo.ttm->state != tt_bound && if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) { !e->user_pages) {
list_del(&e->tv.head); list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages); list_add(&e->tv.head, &need_pages);
@ -635,9 +644,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis); p->bytes_moved_vis);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
if (p->bo_list) { if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj;
@ -678,9 +684,6 @@ error_validate:
error_free_pages: error_free_pages:
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
if (p->bo_list) { if (p->bo_list) {
for (i = p->bo_list->first_userptr; for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) { i < p->bo_list->num_entries; ++i) {
@ -705,7 +708,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) { list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv; struct reservation_object *resv = e->robj->tbo.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(e->robj));
if (r) if (r)
return r; return r;
@ -726,11 +730,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{ {
unsigned i; unsigned i;
if (!error) if (error && backoff)
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
else if (backoff)
ttm_eu_backoff_reservation(&parser->ticket, ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated); &parser->validated);
@ -740,8 +740,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence); dma_fence_put(parser->fence);
if (parser->ctx) if (parser->ctx) {
mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx); amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list) if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list); amdgpu_bo_list_put(parser->bo_list);
@ -766,10 +768,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r) if (r)
return r; return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
if (r)
return r;
r = amdgpu_vm_clear_freed(adev, vm, NULL); r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r) if (r)
return r; return r;
@ -823,7 +821,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
} }
r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); r = amdgpu_vm_handle_moved(adev, vm);
if (r)
return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
if (r)
return r;
if (amdgpu_vm_debug && p->bo_list) { if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */ /* Invalidate all BOs to test for userspace bugs */
@ -833,7 +837,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo) if (!bo)
continue; continue;
amdgpu_vm_bo_invalidate(adev, bo); amdgpu_vm_bo_invalidate(adev, bo, false);
} }
} }
@ -846,19 +850,63 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring; struct amdgpu_ring *ring = p->job->ring;
int i, r; int r;
/* Only for UVD/VCE VM emulation */ /* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) { if (p->job->ring->funcs->parse_cs) {
for (i = 0; i < p->job->num_ibs; i++) { unsigned i, j;
r = amdgpu_ring_parse_cs(ring, p, i);
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL;
struct amdgpu_cs_chunk *chunk;
struct amdgpu_ib *ib;
uint64_t offset;
uint8_t *kptr;
chunk = &p->chunks[i];
ib = &p->job->ibs[j];
chunk_ib = chunk->kdata;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
&aobj, &m);
if (r) {
DRM_ERROR("IB va_start is invalid\n");
return r;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
/* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
if (r) {
return r;
}
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
r = amdgpu_ring_parse_cs(ring, p, j);
if (r) if (r)
return r; return r;
j++;
} }
} }
if (p->job->vm) { if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo); p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p); r = amdgpu_bo_vm_update_pte(p);
if (r) if (r)
@ -920,54 +968,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring = ring; parser->job->ring = ring;
if (ring->funcs->parse_cs) { r = amdgpu_ib_get(adev, vm,
struct amdgpu_bo_va_mapping *m; ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
struct amdgpu_bo *aobj = NULL; ib);
uint64_t offset; if (r) {
uint8_t *kptr; DRM_ERROR("Failed to get ib !\n");
return r;
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
&aobj);
if (!aobj) {
DRM_ERROR("IB va_start is invalid\n");
return -EINVAL;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
/* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
if (r) {
return r;
}
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
}
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
} else {
r = amdgpu_ib_get(adev, vm, 0, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
}
} }
ib->gpu_addr = chunk_ib->va_start; ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4; ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags; ib->flags = chunk_ib->flags;
j++; j++;
} }
@ -977,7 +989,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL; return -EINVAL;
return 0; return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
} }
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@ -1131,14 +1143,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring; struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job; struct amdgpu_job *job;
unsigned i;
uint64_t seq;
int r; int r;
amdgpu_mn_lock(p->mn);
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
amdgpu_mn_unlock(p->mn);
return -ERESTARTSYS;
}
}
}
job = p->job; job = p->job;
p->job = NULL; p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) { if (r) {
amdgpu_job_free(job); amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r; return r;
} }
@ -1146,21 +1175,36 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context; job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished); p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
if (r) {
dma_fence_put(p->fence);
dma_fence_put(&job->base.s_fence->finished);
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
}
amdgpu_cs_post_dependencies(p); amdgpu_cs_post_dependencies(p);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); cs->out.handle = seq;
job->uf_sequence = cs->out.handle; job->uf_sequence = seq;
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring,
amd_sched_get_job_priority(&job->base));
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
return 0; return 0;
} }
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {}; struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false; bool reserved_buffers = false;
@ -1168,8 +1212,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working) if (!adev->accel_working)
return -EBUSY; return -EBUSY;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
parser.adev = adev; parser.adev = adev;
parser.filp = filp; parser.filp = filp;
@ -1180,6 +1222,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
} }
r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
r = amdgpu_cs_parser_bos(&parser, data); r = amdgpu_cs_parser_bos(&parser, data);
if (r) { if (r) {
if (r == -ENOMEM) if (r == -ENOMEM)
@ -1190,9 +1236,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
} }
reserved_buffers = true; reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
r = amdgpu_cs_dependencies(adev, &parser); r = amdgpu_cs_dependencies(adev, &parser);
if (r) { if (r) {
@ -1228,16 +1271,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{ {
union drm_amdgpu_wait_cs *wait = data; union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL; struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct dma_fence *fence; struct dma_fence *fence;
long r; long r;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL) if (ctx == NULL)
return -EINVAL; return -EINVAL;
@ -1255,6 +1294,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = PTR_ERR(fence); r = PTR_ERR(fence);
else if (fence) { else if (fence) {
r = dma_fence_wait_timeout(fence, true, timeout); r = dma_fence_wait_timeout(fence, true, timeout);
if (r > 0 && fence->error)
r = fence->error;
dma_fence_put(fence); dma_fence_put(fence);
} else } else
r = 1; r = 1;
@ -1302,6 +1343,62 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
return fence; return fence;
} }
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_fence_to_handle *info = data;
struct dma_fence *fence;
struct drm_syncobj *syncobj;
struct sync_file *sync_file;
int fd, r;
fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
if (IS_ERR(fence))
return PTR_ERR(fence);
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
dma_fence_put(fence);
if (r)
return r;
r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
drm_syncobj_put(syncobj);
return r;
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
r = drm_syncobj_create(&syncobj, 0, fence);
dma_fence_put(fence);
if (r)
return r;
r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
drm_syncobj_put(syncobj);
return r;
case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
dma_fence_put(fence);
return fd;
}
sync_file = sync_file_create(fence);
dma_fence_put(fence);
if (!sync_file) {
put_unused_fd(fd);
return -ENOMEM;
}
fd_install(fd, sync_file->file);
info->out.handle = fd;
return 0;
default:
return -EINVAL;
}
}
/** /**
* amdgpu_cs_wait_all_fence - wait on all fences to signal * amdgpu_cs_wait_all_fence - wait on all fences to signal
* *
@ -1336,6 +1433,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
if (r == 0) if (r == 0)
break; break;
if (fence->error)
return fence->error;
} }
memset(wait, 0, sizeof(*wait)); memset(wait, 0, sizeof(*wait));
@ -1381,6 +1481,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence; array[i] = fence;
} else { /* NULL, the fence has been already signaled */ } else { /* NULL, the fence has been already signaled */
r = 1; r = 1;
first = i;
goto out; goto out;
} }
} }
@ -1395,7 +1496,7 @@ out:
wait->out.status = (r > 0); wait->out.status = (r > 0);
wait->out.first_signaled = first; wait->out.first_signaled = first;
/* set return value 0 to indicate success */ /* set return value 0 to indicate success */
r = 0; r = array[first]->error;
err_free_fence_array: err_free_fence_array:
for (i = 0; i < fence_count; i++) for (i = 0; i < fence_count; i++)
@ -1416,15 +1517,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data; union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count; uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user; struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences; struct drm_amdgpu_fence *fences;
int r; int r;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
/* Get the fences from userspace */ /* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL); GFP_KERNEL);
@ -1460,78 +1558,36 @@ err_free_fences:
* virtual memory address. Returns allocation structure when found, NULL * virtual memory address. Returns allocation structure when found, NULL
* otherwise. * otherwise.
*/ */
struct amdgpu_bo_va_mapping * int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo,
uint64_t addr, struct amdgpu_bo **bo) struct amdgpu_bo_va_mapping **map)
{ {
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
unsigned i; int r;
if (!parser->bo_list)
return NULL;
addr /= AMDGPU_GPU_PAGE_SIZE; addr /= AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < parser->bo_list->num_entries; i++) { mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
struct amdgpu_bo_list_entry *lobj; if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
return -EINVAL;
lobj = &parser->bo_list->array[i]; *bo = mapping->bo_va->base.bo;
if (!lobj->bo_va) *map = mapping;
continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) { /* Double check that the BO is reserved by this CS */
if (mapping->start > addr || if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
addr > mapping->last) return -EINVAL;
continue;
*bo = lobj->bo_va->base.bo; if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
return mapping; (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
} amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { false);
if (mapping->start > addr || if (r)
addr > mapping->last)
continue;
*bo = lobj->bo_va->base.bo;
return mapping;
}
}
return NULL;
}
/**
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
*
* @parser: command submission parser context
*
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
*/
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
{
unsigned i;
int r;
if (!parser->bo_list)
return 0;
for (i = 0; i < parser->bo_list->num_entries; i++) {
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
continue;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r))
return r; return r;
} }
return 0; return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
} }

Просмотреть файл

@ -23,13 +23,41 @@
*/ */
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_auth.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_sched.h"
static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum amd_sched_priority priority)
{
/* NORMAL and below are accessible by everyone */
if (priority <= AMD_SCHED_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
return 0;
if (drm_is_current_master(filp))
return 0;
return -EACCES;
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum amd_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{ {
unsigned i, j; unsigned i, j;
int r; int r;
if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev; ctx->adev = adev;
kref_init(&ctx->refcount); kref_init(&ctx->refcount);
@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
if (!ctx->fences) if (!ctx->fences)
return -ENOMEM; return -ENOMEM;
mutex_init(&ctx->lock);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1; ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
} }
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */ /* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) { for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
struct amd_sched_rq *rq; struct amd_sched_rq *rq;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring) if (ring == &adev->gfx.kiq.ring)
continue; continue;
@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
&ctx->rings[i].entity); &ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
mutex_destroy(&ctx->lock);
} }
static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
enum amd_sched_priority priority,
uint32_t *id) uint32_t *id)
{ {
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
kfree(ctx); kfree(ctx);
return r; return r;
} }
*id = (uint32_t)r; *id = (uint32_t)r;
r = amdgpu_ctx_init(adev, ctx); r = amdgpu_ctx_init(adev, priority, filp, ctx);
if (r) { if (r) {
idr_remove(&mgr->ctx_handles, *id); idr_remove(&mgr->ctx_handles, *id);
*id = 0; *id = 0;
@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{ {
int r; int r;
uint32_t id; uint32_t id;
enum amd_sched_priority priority;
union drm_amdgpu_ctx *args = data; union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0; r = 0;
id = args->in.ctx_id; id = args->in.ctx_id;
priority = amdgpu_to_sched_priority(args->in.priority);
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
if (priority == AMD_SCHED_PRIORITY_INVALID)
priority = AMD_SCHED_PRIORITY_NORMAL;
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX: case AMDGPU_CTX_OP_ALLOC_CTX:
r = amdgpu_ctx_alloc(adev, fpriv, &id); r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id; args->out.alloc.ctx_id = id;
break; break;
case AMDGPU_CTX_OP_FREE_CTX: case AMDGPU_CTX_OP_FREE_CTX:
@ -246,8 +291,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0; return 0;
} }
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence) struct dma_fence *fence, uint64_t* handler)
{ {
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence; uint64_t seq = cring->sequence;
@ -256,12 +301,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
idx = seq & (amdgpu_sched_jobs - 1); idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx]; other = cring->fences[idx];
if (other) { if (other)
signed long r; BUG_ON(!dma_fence_is_signaled(other));
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
dma_fence_get(fence); dma_fence_get(fence);
@ -271,8 +312,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
dma_fence_put(other); dma_fence_put(other);
if (handler)
*handler = seq;
return seq; return 0;
} }
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@ -303,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence; return fence;
} }
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum amd_sched_priority priority)
{
int i;
struct amdgpu_device *adev = ctx->adev;
struct amd_sched_rq *rq;
struct amd_sched_entity *entity;
struct amdgpu_ring *ring;
enum amd_sched_priority ctx_prio;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < adev->num_rings; i++) {
ring = adev->rings[i];
entity = &ctx->rings[i].entity;
rq = &ring->sched.sched_rq[ctx_prio];
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
continue;
amd_sched_entity_set_rq(entity, rq);
}
}
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
{
struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
struct dma_fence *other = cring->fences[idx];
if (other) {
signed long r;
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
return r;
}
}
return 0;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{ {
mutex_init(&mgr->lock); mutex_init(&mgr->lock);

Просмотреть файл

@ -56,6 +56,7 @@
#include "amdgpu_vf_error.h" #include "amdgpu_vf_error.h"
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@ -65,6 +66,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = { static const char *amdgpu_asic_name[] = {
"TAHITI", "TAHITI",
@ -107,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{ {
uint32_t ret; uint32_t ret;
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
BUG_ON(in_interrupt());
return amdgpu_virt_kiq_rreg(adev, reg); return amdgpu_virt_kiq_rreg(adev, reg);
}
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@ -135,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v; adev->last_mm_index = v;
} }
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
BUG_ON(in_interrupt());
return amdgpu_virt_kiq_wreg(adev, reg, v); return amdgpu_virt_kiq_wreg(adev, reg, v);
}
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@ -402,6 +400,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
*/ */
static int amdgpu_doorbell_init(struct amdgpu_device *adev) static int amdgpu_doorbell_init(struct amdgpu_device *adev)
{ {
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
adev->doorbell.size = 0;
adev->doorbell.num_doorbells = 0;
adev->doorbell.ptr = NULL;
return 0;
}
/* doorbell bar mapping */ /* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@ -539,7 +546,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
if (offset < adev->wb.num_wb) { if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used); __set_bit(offset, adev->wb.used);
*wb = offset * 8; /* convert to dw offset */ *wb = offset << 3; /* convert to dw offset */
return 0; return 0;
} else { } else {
return -EINVAL; return -EINVAL;
@ -557,7 +564,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
{ {
if (wb < adev->wb.num_wb) if (wb < adev->wb.num_wb)
__clear_bit(wb, adev->wb.used); __clear_bit(wb >> 3, adev->wb.used);
} }
/** /**
@ -646,6 +653,81 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
mc->gart_size >> 20, mc->gart_start, mc->gart_end); mc->gart_size >> 20, mc->gart_start, mc->gart_end);
} }
/*
* Firmware Reservation functions
*/
/**
* amdgpu_fw_reserve_vram_fini - free fw reserved vram
*
* @adev: amdgpu_device pointer
*
* free fw reserved vram if it has been reserved.
*/
void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
NULL, &adev->fw_vram_usage.va);
}
/**
* amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
*
* @adev: amdgpu_device pointer
*
* create bo vram reservation from fw.
*/
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
{
int r = 0;
u64 gpu_addr;
u64 vram_size = adev->mc.visible_vram_size;
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) {
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
PAGE_SIZE, true, 0,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
&adev->fw_vram_usage.reserved_bo);
if (r)
goto error_create;
r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
if (r)
goto error_reserve;
r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
AMDGPU_GEM_DOMAIN_VRAM,
adev->fw_vram_usage.start_offset,
(adev->fw_vram_usage.start_offset +
adev->fw_vram_usage.size), &gpu_addr);
if (r)
goto error_pin;
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
&adev->fw_vram_usage.va);
if (r)
goto error_kmap;
amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
}
return r;
error_kmap:
amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
error_pin:
amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
error_reserve:
amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
error_create:
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
return r;
}
/* /*
* GPU helpers function. * GPU helpers function.
*/ */
@ -662,27 +744,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
{ {
uint32_t reg; uint32_t reg;
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
}
/* bios scratch used on CIK+ */
if (adev->asic_type >= CHIP_BONAIRE)
return amdgpu_atombios_scratch_need_asic_init(adev);
/* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
return false;
return true;
}
static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return false; return false;
@ -705,7 +766,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true; return true;
} }
} }
return amdgpu_need_post(adev);
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
}
/* bios scratch used on CIK+ */
if (adev->asic_type >= CHIP_BONAIRE)
return amdgpu_atombios_scratch_need_asic_init(adev);
/* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
return false;
return true;
} }
/** /**
@ -887,6 +964,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r; return r;
} }
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct atom_context *ctx = adev->mode_info.atom_context;
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
/** /**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios * amdgpu_atombios_fini - free the driver info and callbacks for atombios
* *
@ -906,6 +997,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL; adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info); kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL; adev->mode_info.atom_card_info = NULL;
device_remove_file(adev->dev, &dev_attr_vbios_version);
} }
/** /**
@ -922,6 +1014,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
{ {
struct card_info *atom_card_info = struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL); kzalloc(sizeof(struct card_info), GFP_KERNEL);
int ret;
if (!atom_card_info) if (!atom_card_info)
return -ENOMEM; return -ENOMEM;
@ -958,6 +1051,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev); amdgpu_atombios_allocate_fb_scratch(adev);
} }
ret = device_create_file(adev->dev, &dev_attr_vbios_version);
if (ret) {
DRM_ERROR("Failed to create device file for VBIOS version\n");
return ret;
}
return 0; return 0;
} }
@ -1757,10 +1857,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false; adev->ip_blocks[i].status.late_initialized = false;
} }
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev))
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
amdgpu_virt_release_full_gpu(adev, false); amdgpu_virt_release_full_gpu(adev, false);
}
return 0; return 0;
} }
@ -1848,6 +1946,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = { static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC, AMD_IP_BLOCK_TYPE_SMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA, AMD_IP_BLOCK_TYPE_SDMA,
@ -1933,12 +2032,17 @@ static int amdgpu_resume(struct amdgpu_device *adev)
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{ {
if (adev->is_atom_fw) { if (amdgpu_sriov_vf(adev)) {
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) if (adev->is_atom_fw) {
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
} else { adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
if (amdgpu_atombios_has_gpu_virtualization_table(adev)) } else {
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; if (amdgpu_atombios_has_gpu_virtualization_table(adev))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
} }
} }
@ -1979,6 +2083,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_num_rings = 0; adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL; adev->gart.gart_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
adev->smc_rreg = &amdgpu_invalid_rreg; adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg; adev->smc_wreg = &amdgpu_invalid_wreg;
@ -2007,8 +2112,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->pm.mutex); mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex); mutex_init(&adev->srbm_mutex);
mutex_init(&adev->gfx.pipe_reserve_mutex);
mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock); mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash); hash_init(adev->mn_hash);
amdgpu_check_arguments(adev); amdgpu_check_arguments(adev);
@ -2051,9 +2158,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
if (adev->asic_type >= CHIP_BONAIRE) /* doorbell bar mapping */
/* doorbell bar mapping */ amdgpu_doorbell_init(adev);
amdgpu_doorbell_init(adev);
/* io port mapping */ /* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@ -2095,7 +2201,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev); r = amdgpu_atombios_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n"); dev_err(adev->dev, "amdgpu_atombios_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed; goto failed;
} }
@ -2103,10 +2209,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev); amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */ /* Post card if necessary */
if (amdgpu_vpost_needed(adev)) { if (amdgpu_need_post(adev)) {
if (!adev->bios) { if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n"); dev_err(adev->dev, "no vBIOS found\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL; r = -EINVAL;
goto failed; goto failed;
} }
@ -2114,7 +2219,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context); r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) { if (r) {
dev_err(adev->dev, "gpu post error!\n"); dev_err(adev->dev, "gpu post error!\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed; goto failed;
} }
} else { } else {
@ -2126,7 +2230,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atomfirmware_get_clock_info(adev); r = amdgpu_atomfirmware_get_clock_info(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed; goto failed;
} }
} else { } else {
@ -2134,7 +2238,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev); r = amdgpu_atombios_get_clock_info(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed; goto failed;
} }
/* init i2c buses */ /* init i2c buses */
@ -2145,7 +2249,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev); r = amdgpu_fence_driver_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed; goto failed;
} }
@ -2155,7 +2259,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev); r = amdgpu_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_init failed\n"); dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev); amdgpu_fini(adev);
goto failed; goto failed;
} }
@ -2175,7 +2279,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev); r = amdgpu_ib_pool_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r); dev_err(adev->dev, "IB initialization failed (%d).\n", r);
amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed; goto failed;
} }
@ -2183,8 +2287,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) if (r)
DRM_ERROR("ib ring test failed (%d).\n", r); DRM_ERROR("ib ring test failed (%d).\n", r);
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
amdgpu_fbdev_init(adev); amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
r = amdgpu_gem_debugfs_init(adev); r = amdgpu_gem_debugfs_init(adev);
if (r) if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r); DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@ -2201,6 +2312,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r); DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
r = amdgpu_debugfs_vbios_dump_init(adev);
if (r)
DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
if ((amdgpu_testing & 1)) { if ((amdgpu_testing & 1)) {
if (adev->accel_working) if (adev->accel_working)
amdgpu_test_moves(adev); amdgpu_test_moves(adev);
@ -2220,7 +2335,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev); r = amdgpu_late_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n"); dev_err(adev->dev, "amdgpu_late_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed; goto failed;
} }
@ -2252,6 +2367,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* evict vram memory */ /* evict vram memory */
amdgpu_bo_evict_vram(adev); amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev); amdgpu_ib_pool_fini(adev);
amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev); amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev); amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev); r = amdgpu_fini(adev);
@ -2276,8 +2392,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL; adev->rio_mem = NULL;
iounmap(adev->rmmio); iounmap(adev->rmmio);
adev->rmmio = NULL; adev->rmmio = NULL;
if (adev->asic_type >= CHIP_BONAIRE) amdgpu_doorbell_fini(adev);
amdgpu_doorbell_fini(adev); amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev); amdgpu_debugfs_regs_cleanup(adev);
} }
@ -2504,6 +2620,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
int i; int i;
bool asic_hang = false; bool asic_hang = false;
if (amdgpu_sriov_vf(adev))
return true;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
continue; continue;
@ -2546,7 +2665,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) { if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n"); DRM_INFO("Some block need full reset!\n");
return true; return true;
@ -2654,7 +2774,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->virt.lock_reset); mutex_lock(&adev->virt.lock_reset);
atomic_inc(&adev->gpu_reset_counter); atomic_inc(&adev->gpu_reset_counter);
adev->gfx.in_reset = true; adev->in_sriov_reset = true;
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@ -2765,7 +2885,7 @@ give_up_reset:
dev_info(adev->dev, "GPU reset successed!\n"); dev_info(adev->dev, "GPU reset successed!\n");
} }
adev->gfx.in_reset = false; adev->in_sriov_reset = false;
mutex_unlock(&adev->virt.lock_reset); mutex_unlock(&adev->virt.lock_reset);
return r; return r;
} }
@ -2902,7 +3022,6 @@ out:
} }
} else { } else {
dev_err(adev->dev, "asic resume failed (%d).\n", r); dev_err(adev->dev, "asic resume failed (%d).\n", r);
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) { if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread); kthread_unpark(adev->rings[i]->sched.thread);
@ -2916,7 +3035,6 @@ out:
if (r) { if (r) {
/* bad news, how to tell it to userspace ? */ /* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n"); dev_info(adev->dev, "GPU reset failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} }
else { else {
dev_info(adev->dev, "GPU reset successed!\n"); dev_info(adev->dev, "GPU reset successed!\n");
@ -3463,10 +3581,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values); valuesize = sizeof(values);
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize); r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
&valuesize);
else else
return -EINVAL; return -EINVAL;
@ -3754,6 +3869,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{ {
return 0; return 0;
} }
static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
seq_write(m, adev->bios, adev->bios_size);
return 0;
}
static const struct drm_info_list amdgpu_vbios_dump_list[] = {
{"amdgpu_vbios",
amdgpu_debugfs_get_vbios_dump,
0, NULL},
};
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
{
return amdgpu_debugfs_add_files(adev,
amdgpu_vbios_dump_list, 1);
}
#else #else
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{ {
@ -3763,5 +3900,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{ {
return 0; return 0;
} }
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
{
return 0;
}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif #endif

Просмотреть файл

@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
} }
struct amd_vce_state* struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx) amdgpu_get_vce_clock_state(void *handle, u32 idx)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (idx < adev->pm.dpm.num_of_vce_states) if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx]; return &adev->pm.dpm.vce_states[idx];

Просмотреть файл

@ -241,179 +241,125 @@ enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN_INVALID = 0xffff AMDGPU_PCIE_GEN_INVALID = 0xffff
}; };
struct amdgpu_dpm_funcs { #define amdgpu_dpm_pre_set_power_state(adev) \
int (*get_temperature)(struct amdgpu_device *adev); ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
int (*pre_set_power_state)(struct amdgpu_device *adev);
int (*set_power_state)(struct amdgpu_device *adev);
void (*post_set_power_state)(struct amdgpu_device *adev);
void (*display_configuration_changed)(struct amdgpu_device *adev);
u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
bool (*vblank_too_short)(struct amdgpu_device *adev);
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
int (*get_sclk_od)(struct amdgpu_device *adev);
int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
int (*get_mclk_od)(struct amdgpu_device *adev);
int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
int (*check_state_equal)(struct amdgpu_device *adev,
struct amdgpu_ps *cps,
struct amdgpu_ps *rps,
bool *equal);
int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
int *size);
struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx); #define amdgpu_dpm_set_power_state(adev) \
int (*reset_power_profile_state)(struct amdgpu_device *adev, ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
struct amd_pp_profile *request);
int (*get_power_profile_state)(struct amdgpu_device *adev,
struct amd_pp_profile *query);
int (*set_power_profile_state)(struct amdgpu_device *adev,
struct amd_pp_profile *request);
int (*switch_power_profile)(struct amdgpu_device *adev,
enum amd_pp_profile_type type);
};
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) #define amdgpu_dpm_post_set_power_state(adev) \
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) #define amdgpu_dpm_display_configuration_changed(adev) \
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) #define amdgpu_dpm_print_power_state(adev, ps) \
((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
#define amdgpu_dpm_vblank_too_short(adev) \
((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \ #define amdgpu_dpm_read_sensor(adev, idx, value, size) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
(adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \ #define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_temperature((adev)))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \ #define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
(adev)->pm.funcs->set_fan_control_mode((adev), (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \ #define amdgpu_dpm_get_fan_control_mode(adev) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_fan_control_mode((adev)))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \ #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \ #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \ #define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
(adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
-EINVAL)
#define amdgpu_dpm_get_sclk(adev, l) \ #define amdgpu_dpm_get_sclk(adev, l) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_sclk((adev), (l)))
#define amdgpu_dpm_get_mclk(adev, l) \ #define amdgpu_dpm_get_mclk(adev, l) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_mclk((adev), (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \ #define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->force_performance_level((adev), (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \ #define amdgpu_dpm_powergate_uvd(adev, g) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_uvd((adev), (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \ #define amdgpu_dpm_powergate_vce(adev, g) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g)))
#define amdgpu_dpm_get_current_power_state(adev) \ #define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_get_pp_num_states(adev, data) \ #define amdgpu_dpm_get_pp_num_states(adev, data) \
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
#define amdgpu_dpm_get_pp_table(adev, table) \ #define amdgpu_dpm_get_pp_table(adev, table) \
(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
#define amdgpu_dpm_set_pp_table(adev, buf, size) \ #define amdgpu_dpm_set_pp_table(adev, buf, size) \
(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \ #define amdgpu_dpm_print_clock_levels(adev, type, buf) \
(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
#define amdgpu_dpm_force_clock_level(adev, type, level) \ #define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
#define amdgpu_dpm_get_sclk_od(adev) \ #define amdgpu_dpm_get_sclk_od(adev) \
(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_sclk_od(adev, value) \ #define amdgpu_dpm_set_sclk_od(adev, value) \
(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_get_mclk_od(adev) \ #define amdgpu_dpm_get_mclk_od(adev) \
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \ #define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ #define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal)) #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \ #define amdgpu_dpm_get_vce_clock_state(adev, i) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
#define amdgpu_dpm_get_performance_level(adev) \ #define amdgpu_dpm_get_performance_level(adev) \
((adev)->pp_enabled ? \ ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
(adev)->pm.dpm.forced_level)
#define amdgpu_dpm_reset_power_profile_state(adev, request) \ #define amdgpu_dpm_reset_power_profile_state(adev, request) \
((adev)->powerplay.pp_funcs->reset_power_profile_state(\ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request)) (adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_get_power_profile_state(adev, query) \ #define amdgpu_dpm_get_power_profile_state(adev, query) \
((adev)->powerplay.pp_funcs->get_power_profile_state(\ ((adev)->powerplay.pp_funcs->get_power_profile_state(\
(adev)->powerplay.pp_handle, query)) (adev)->powerplay.pp_handle, query))
#define amdgpu_dpm_set_power_profile_state(adev, request) \ #define amdgpu_dpm_set_power_profile_state(adev, request) \
((adev)->powerplay.pp_funcs->set_power_profile_state(\ ((adev)->powerplay.pp_funcs->set_power_profile_state(\
(adev)->powerplay.pp_handle, request)) (adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_switch_power_profile(adev, type) \ #define amdgpu_dpm_switch_power_profile(adev, type) \
((adev)->powerplay.pp_funcs->switch_power_profile(\ ((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type)) (adev)->powerplay.pp_handle, type))
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
struct amdgpu_dpm { struct amdgpu_dpm {
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
/* number of valid power states */ /* number of valid power states */
@ -485,7 +431,6 @@ struct amdgpu_pm {
struct amdgpu_dpm dpm; struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */ const struct firmware *fw; /* SMC firmware */
uint32_t fw_version; uint32_t fw_version;
const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask; uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask; uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
@ -551,6 +496,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u8 amdgpu_encode_pci_lane_width(u32 lanes); u8 amdgpu_encode_pci_lane_width(u32 lanes);
struct amd_vce_state* struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx); amdgpu_get_vce_clock_state(void *handle, u32 idx);
#endif #endif

Просмотреть файл

@ -69,9 +69,13 @@
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap * - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode * - 3.19.0 - Add support for UVD MJPEG decode
* - 3.20.0 - Add support for local BOs
* - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
* - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
* - 3.23.0 - Add query for VRAM lost counter
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 19 #define KMS_DRIVER_MINOR 23
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
@ -91,7 +95,7 @@ int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1; int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1; int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1; int amdgpu_runtime_pm = -1;
unsigned amdgpu_ip_block_mask = 0xffffffff; uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1; int amdgpu_bapm = -1;
int amdgpu_deep_color = 0; int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1; int amdgpu_vm_size = -1;
@ -106,14 +110,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0; int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0; int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0; uint amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0; uint amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff; uint amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff; uint amdgpu_pg_mask = 0xffffffff;
unsigned amdgpu_sdma_phase_quantum = 32; uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL; char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL; char *amdgpu_virtual_display = NULL;
unsigned amdgpu_pp_feature_mask = 0xffffffff; uint amdgpu_pp_feature_mask = 0xffffffff;
int amdgpu_ngg = 0; int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0; int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0;
@ -121,6 +125,7 @@ int amdgpu_cntl_sb_buf_per_se = 0;
int amdgpu_param_buf_per_se = 0; int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0; int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1; int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@ -264,6 +269,9 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)"); MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444); module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@ -608,6 +616,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev); drm_dev_unregister(dev);
drm_dev_unref(dev); drm_dev_unref(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
} }
static void static void
@ -852,6 +862,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap, .gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap, .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME, .name = DRIVER_NAME,
.desc = DRIVER_DESC, .desc = DRIVER_DESC,

Просмотреть файл

@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED, AMDGPU_GEM_CREATE_VRAM_CLEARED,
true, &gobj); true, NULL, &gobj);
if (ret) { if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size); pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM; return -ENOMEM;
@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
if (rfb->obj) { if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj); amdgpufb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL; rfb->obj = NULL;
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
} }
drm_fb_helper_fini(&rfbdev->helper); drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0; return 0;
} }

Просмотреть файл

@ -168,6 +168,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
return 0; return 0;
} }
/**
* amdgpu_fence_emit_polling - emit a fence on the requeste ring
*
* @ring: ring the fence is associated with
* @s: resulting sequence number
*
* Emits a fence command on the requested ring (all asics).
* Used For polling fence.
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
{
uint32_t seq;
if (!s)
return -EINVAL;
seq = ++ring->fence_drv.sync_seq;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
*s = seq;
return 0;
}
/** /**
* amdgpu_fence_schedule_fallback - schedule fallback check * amdgpu_fence_schedule_fallback - schedule fallback check
* *
@ -281,6 +307,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
return r; return r;
} }
/**
* amdgpu_fence_wait_polling - busy wait for givn sequence number
*
* @ring: ring index the fence is associated with
* @wait_seq: sequence number to wait
* @timeout: the timeout for waiting in usecs
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns left time if no timeout, 0 or minus if timeout.
*/
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
signed long timeout)
{
uint32_t seq;
do {
seq = amdgpu_fence_read(ring);
udelay(5);
timeout -= 5;
} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
return timeout > 0 ? timeout : 0;
}
/** /**
* amdgpu_fence_count_emitted - get the count of emitted fences * amdgpu_fence_count_emitted - get the count of emitted fences
* *
@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
atomic_read(&ring->fence_drv.last_seq)); atomic_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%08x\n", seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq); ring->fence_drv.sync_seq);
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
continue;
/* set in CP_VMID_PREEMPT and preemption occurred */
seq_printf(m, "Last preempted 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
/* set in CP_VMID_RESET and reset occurred */
seq_printf(m, "Last reset 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
/* Both preemption and reset occurred */
seq_printf(m, "Last both 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
} }
return 0; return 0;
} }

Просмотреть файл

@ -332,12 +332,13 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
adev->gart.pages[p] = pagelist[i]; adev->gart.pages[p] = pagelist[i];
#endif #endif
if (adev->gart.ptr) { if (!adev->gart.ptr)
r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, return 0;
adev->gart.ptr);
if (r) r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
return r; adev->gart.ptr);
} if (r)
return r;
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_gart_flush_gpu_tlb(adev, 0);

Просмотреть файл

@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
} }
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
struct drm_gem_object **obj) struct reservation_object *resv,
struct drm_gem_object **obj)
{ {
struct amdgpu_bo *robj; struct amdgpu_bo *bo;
int r; int r;
*obj = NULL; *obj = NULL;
@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, NULL, 0, &robj); flags, NULL, resv, 0, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@ -71,7 +72,7 @@ retry:
} }
return r; return r;
} }
*obj = &robj->gem_base; *obj = &bo->gem_base;
return 0; return 0;
} }
@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct mm_struct *mm;
int r; int r;
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
if (mm && mm != current->mm)
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
abo->tbo.resv != vm->root.base.bo->tbo.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, false);
if (r) if (r)
return r; return r;
@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0; return 0;
} }
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_vm_check(NULL, bo))
return false;
}
return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
}
void amdgpu_gem_object_close(struct drm_gem_object *obj, void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct list_head list; struct list_head list, duplicates;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo; tv.bo = &bo->tbo;
tv.shared = true; tv.shared = true;
@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) { if (r) {
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va && --bo_va->ref_count == 0) { if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va); amdgpu_vm_bo_rmv(adev, bo_va);
if (amdgpu_gem_vm_ready(adev, vm, &list)) { if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence); r = amdgpu_vm_clear_freed(adev, vm, &fence);
@ -214,18 +197,24 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data; union drm_amdgpu_gem_create *args = data;
uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size; uint64_t size = args->in.bo_size;
struct reservation_object *resv = NULL;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle;
bool kernel = false;
int r; int r;
/* reject invalid gem flags */ /* reject invalid gem flags */
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED)) AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
return -EINVAL; return -EINVAL;
/* reject invalid gem domains */ /* reject invalid gem domains */
@ -240,7 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
kernel = true; flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT; size = size << AMDGPU_GDS_SHIFT;
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@ -252,10 +241,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
} }
size = roundup(size, PAGE_SIZE); size = roundup(size, PAGE_SIZE);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
r = amdgpu_bo_reserve(vm->root.base.bo, false);
if (r)
return r;
resv = vm->root.base.bo->tbo.resv;
}
r = amdgpu_gem_object_create(adev, size, args->in.alignment, r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains), (u32)(0xffffffff & args->in.domains),
args->in.domain_flags, flags, false, resv, &gobj);
kernel, &gobj); if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
abo->parent = amdgpu_bo_ref(vm->root.base.bo);
}
amdgpu_bo_unreserve(vm->root.base.bo);
}
if (r) if (r)
return r; return r;
@ -297,9 +301,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
AMDGPU_GEM_DOMAIN_CPU, 0, 0, 0, NULL, &gobj);
0, &gobj);
if (r) if (r)
return r; return r;
@ -317,8 +320,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
down_read(&current->mm->mmap_sem);
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages); bo->tbo.ttm->pages);
if (r) if (r)
@ -333,8 +334,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
if (r) if (r)
goto free_pages; goto free_pages;
up_read(&current->mm->mmap_sem);
} }
r = drm_gem_handle_create(filp, gobj, &handle); r = drm_gem_handle_create(filp, gobj, &handle);
@ -511,10 +510,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list, struct list_head *list,
uint32_t operation) uint32_t operation)
{ {
int r = -ERESTARTSYS; int r;
if (!amdgpu_gem_vm_ready(adev, vm, list)) if (!amdgpu_vm_ready(vm))
goto error; return;
r = amdgpu_vm_update_directories(adev, vm); r = amdgpu_vm_update_directories(adev, vm);
if (r) if (r)
@ -551,7 +550,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list; struct list_head list, duplicates;
uint64_t va_flags; uint64_t va_flags;
int r = 0; int r = 0;
@ -580,13 +579,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation); args->operation);
return -EINVAL; return -EINVAL;
} }
if ((args->operation == AMDGPU_VA_OP_MAP) ||
(args->operation == AMDGPU_VA_OP_REPLACE)) {
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
}
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) && if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) { !(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle); gobj = drm_gem_object_lookup(filp, args->handle);
@ -603,7 +598,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) if (r)
goto error_unref; goto error_unref;
@ -669,6 +664,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data; struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_bo *robj; struct amdgpu_bo *robj;
@ -716,6 +712,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(adev, robj, true);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(robj);
break; break;
default: default:
@ -745,8 +744,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
ttm_bo_type_device, false, NULL, &gobj);
&gobj);
if (r) if (r)
return -ENOMEM; return -ENOMEM;

Просмотреть файл

@ -109,9 +109,26 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
} }
} }
static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
/* FIXME: spreading the queues across pipes causes perf regressions
* on POLARIS11 compute workloads */
if (adev->asic_type == CHIP_POLARIS11)
return false;
return adev->gfx.mec.num_mec > 1;
}
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{ {
int i, queue, pipe, mec; int i, queue, pipe, mec;
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
/* policy for amdgpu compute queue ownership */ /* policy for amdgpu compute queue ownership */
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
@ -125,8 +142,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec) if (mec >= adev->gfx.mec.num_mec)
break; break;
/* FIXME: spreading the queues across pipes causes perf regressions */ if (multipipe_policy) {
if (0) {
/* policy: amdgpu owns the first two queues of the first MEC */ /* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2) if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap); set_bit(i, adev->gfx.mec.queue_bitmap);
@ -185,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
int r = 0; int r = 0;
mutex_init(&kiq->ring_mutex); spin_lock_init(&kiq->ring_lock);
r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
if (r) if (r)
@ -260,8 +276,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for KIQ */ /* create MQD for KIQ */
ring = &adev->gfx.kiq.ring; ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) { if (!ring->mqd_obj) {
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
* deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
* KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr); &ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);

Просмотреть файл

@ -169,7 +169,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r; int r;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
if (atomic64_read(&mgr->available) < mem->num_pages) { if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
return 0; return 0;
} }
@ -244,8 +245,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
{ {
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
s64 result = man->size - atomic64_read(&mgr->available);
return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE; return (result > 0 ? result : 0) * PAGE_SIZE;
} }
/** /**
@ -265,7 +267,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
drm_mm_print(&mgr->mm, printer); drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n", drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
man->size, (u64)atomic64_read(&mgr->available), man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20); amdgpu_gtt_mgr_usage(man) >> 20);
} }

Просмотреть файл

@ -169,6 +169,12 @@ restart_ih:
while (adev->irq.ih.rptr != wptr) { while (adev->irq.ih.rptr != wptr) {
u32 ring_index = adev->irq.ih.rptr >> 2; u32 ring_index = adev->irq.ih.rptr >> 2;
/* Prescreening of high-frequency interrupts */
if (!amdgpu_ih_prescreen_iv(adev)) {
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
continue;
}
/* Before dispatching irq to IP blocks, send it to amdkfd */ /* Before dispatching irq to IP blocks, send it to amdkfd */
amdgpu_amdkfd_interrupt(adev, amdgpu_amdkfd_interrupt(adev,
(const void *) &adev->irq.ih.ring[ring_index]); (const void *) &adev->irq.ih.ring[ring_index]);
@ -190,3 +196,79 @@ restart_ih:
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/**
* amdgpu_ih_add_fault - Add a page fault record
*
* @adev: amdgpu device pointer
* @key: 64-bit encoding of PASID and address
*
* This should be called when a retry page fault interrupt is
* received. If this is a new page fault, it will be added to a hash
* table. The return value indicates whether this is a new fault, or
* a fault that was already known and is already being handled.
*
* If there are too many pending page faults, this will fail. Retry
* interrupts should be ignored in this case until there is enough
* free space.
*
* Returns 0 if the fault was added, 1 if the fault was already known,
* -ENOSPC if there are too many pending faults.
*/
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
{
unsigned long flags;
int r = -ENOSPC;
if (WARN_ON_ONCE(!adev->irq.ih.faults))
/* Should be allocated in <IP>_ih_sw_init on GPUs that
* support retry faults and require retry filtering.
*/
return r;
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
/* Only let the hash table fill up to 50% for best performance */
if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
goto unlock_out;
r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
if (!r)
adev->irq.ih.faults->count++;
/* chash_table_copy_in should never fail unless we're losing count */
WARN_ON_ONCE(r < 0);
unlock_out:
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
return r;
}
/**
* amdgpu_ih_clear_fault - Remove a page fault record
*
* @adev: amdgpu device pointer
* @key: 64-bit encoding of PASID and address
*
* This should be called when a page fault has been handled. Any
* future interrupt with this key will be processed as a new
* page fault.
*/
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
{
unsigned long flags;
int r;
if (!adev->irq.ih.faults)
return;
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
if (!WARN_ON_ONCE(r < 0)) {
adev->irq.ih.faults->count--;
WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
}
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
}

Просмотреть файл

@ -24,6 +24,8 @@
#ifndef __AMDGPU_IH_H__ #ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__ #define __AMDGPU_IH_H__
#include <linux/chash.h>
struct amdgpu_device; struct amdgpu_device;
/* /*
* vega10+ IH clients * vega10+ IH clients
@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
#define AMDGPU_IH_CLIENTID_LEGACY 0 #define AMDGPU_IH_CLIENTID_LEGACY 0
#define AMDGPU_PAGEFAULT_HASH_BITS 8
struct amdgpu_retryfault_hashtable {
DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
spinlock_t lock;
int count;
};
/* /*
* R6xx+ IH ring * R6xx+ IH ring
*/ */
@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
bool use_doorbell; bool use_doorbell;
bool use_bus_addr; bool use_bus_addr;
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
struct amdgpu_retryfault_hashtable *faults;
}; };
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4 #define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
bool use_bus_addr); bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev); void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
int amdgpu_ih_process(struct amdgpu_device *adev); int amdgpu_ih_process(struct amdgpu_device *adev);
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
#endif #endif

Просмотреть файл

@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->dep_sync); amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync); amdgpu_sync_create(&(*job)->sched_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
return 0; return 0;
} }
@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{ {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence); dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync); amdgpu_sync_free(&job->dep_sync);
@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context; job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished); *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring,
amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
return 0; return 0;
@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_device *adev;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_fpriv *fpriv = NULL;
int r; int r;
if (!sched_job) { if (!sched_job) {
@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL; return NULL;
} }
job = to_amdgpu_job(sched_job); job = to_amdgpu_job(sched_job);
adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job); trace_amdgpu_sched_run_job(job);
if (job->vm)
fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
/* skip ib schedule when vram is lost */ /* skip ib schedule when vram is lost */
if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv)) if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
DRM_ERROR("Skip scheduling IBs!\n"); DRM_ERROR("Skip scheduling IBs!\n");
else { } else {
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
&fence);
if (r) if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
} }
/* if gpu reset, hw fence will be replaced here */ /* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence); dma_fence_put(job->fence);
job->fence = dma_fence_get(fence); job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
return fence; return fence;
} }

Просмотреть файл

@ -28,6 +28,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include "amdgpu_sched.h"
#include "amdgpu_uvd.h" #include "amdgpu_uvd.h"
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_amdgpu_info *info = data; struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info; struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer; void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer) if (!info->return_size || !info->return_pointer)
return -EINVAL; return -EINVAL;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
switch (info->query) { switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING: case AMDGPU_INFO_ACCEL_WORKING:
@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
} }
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
} }
case AMDGPU_INFO_VRAM_LOST_COUNTER:
ui32 = atomic_read(&adev->vram_lost_counter);
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
default: default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query); DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL; return -EINVAL;
@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
vga_switcheroo_process_delayed_switch(); vga_switcheroo_process_delayed_switch();
} }
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv)
{
return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
}
/** /**
* amdgpu_driver_open_kms - drm callback for open * amdgpu_driver_open_kms - drm callback for open
* *
@ -825,7 +820,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
} }
r = amdgpu_vm_init(adev, &fpriv->vm, r = amdgpu_vm_init(adev, &fpriv->vm,
AMDGPU_VM_CONTEXT_GFX); AMDGPU_VM_CONTEXT_GFX, 0);
if (r) { if (r) {
kfree(fpriv); kfree(fpriv);
goto out_suspend; goto out_suspend;
@ -841,8 +836,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r) if (r) {
amdgpu_vm_fini(adev, &fpriv->vm);
kfree(fpriv);
goto out_suspend; goto out_suspend;
}
} }
mutex_init(&fpriv->bo_list_lock); mutex_init(&fpriv->bo_list_lock);
@ -850,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
file_priv->driver_priv = fpriv; file_priv->driver_priv = fpriv;
out_suspend: out_suspend:
@ -1020,7 +1017,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */ /* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),

Просмотреть файл

@ -50,8 +50,10 @@ struct amdgpu_mn {
struct hlist_node node; struct hlist_node node;
/* objects protected by lock */ /* objects protected by lock */
struct mutex lock; struct rw_semaphore lock;
struct rb_root_cached objects; struct rb_root_cached objects;
struct mutex read_lock;
atomic_t recursion;
}; };
struct amdgpu_mn_node { struct amdgpu_mn_node {
@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo; struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock); mutex_lock(&adev->mn_lock);
mutex_lock(&rmn->lock); down_write(&rmn->lock);
hash_del(&rmn->node); hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) { &rmn->objects.rb_root, it.rb) {
@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
} }
kfree(node); kfree(node);
} }
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn); kfree(rmn);
@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work); schedule_work(&rmn->work);
} }
/**
* amdgpu_mn_lock - take the write side lock for this mn
*/
void amdgpu_mn_lock(struct amdgpu_mn *mn)
{
if (mn)
down_write(&mn->lock);
}
/**
* amdgpu_mn_unlock - drop the write side lock for this mn
*/
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
{
if (mn)
up_write(&mn->lock);
}
/**
* amdgpu_mn_read_lock - take the rmn read lock
*
* @rmn: our notifier
*
* Take the rmn read side lock.
*/
static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
{
mutex_lock(&rmn->read_lock);
if (atomic_inc_return(&rmn->recursion) == 1)
down_read_non_owner(&rmn->lock);
mutex_unlock(&rmn->read_lock);
}
/**
* amdgpu_mn_read_unlock - drop the rmn read lock
*
* @rmn: our notifier
*
* Drop the rmn read side lock.
*/
static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
{
if (atomic_dec_return(&rmn->recursion) == 0)
up_read_non_owner(&rmn->lock);
}
/** /**
* amdgpu_mn_invalidate_node - unmap all BOs of a node * amdgpu_mn_invalidate_node - unmap all BOs of a node
* *
@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue; continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT); true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0) if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r); DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
} }
} }
@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end -= 1;
mutex_lock(&rmn->lock); amdgpu_mn_read_lock(rmn);
it = interval_tree_iter_first(&rmn->objects, start, end); it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) { while (it) {
@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_mn_invalidate_node(node, start, end); amdgpu_mn_invalidate_node(node, start, end);
} }
}
mutex_unlock(&rmn->lock); /**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
amdgpu_mn_read_unlock(rmn);
} }
static const struct mmu_notifier_ops amdgpu_mn_ops = { static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release, .release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start, .invalidate_range_start = amdgpu_mn_invalidate_range_start,
.invalidate_range_end = amdgpu_mn_invalidate_range_end,
}; };
/** /**
@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
* *
* Creates a notifier context for current->mm. * Creates a notifier context for current->mm.
*/ */
static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn; struct amdgpu_mn *rmn;
@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev; rmn->adev = adev;
rmn->mm = mm; rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops; rmn->mn.ops = &amdgpu_mn_ops;
mutex_init(&rmn->lock); init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT_CACHED; rmn->objects = RB_ROOT_CACHED;
mutex_init(&rmn->read_lock);
atomic_set(&rmn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm); r = __mmu_notifier_register(&rmn->mn, mm);
if (r) if (r)
@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos); INIT_LIST_HEAD(&bos);
mutex_lock(&rmn->lock); down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node); kfree(node);
@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) { if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) { if (!node) {
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
return -ENOMEM; return -ENOMEM;
} }
} }
@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects); interval_tree_insert(&node->it, &rmn->objects);
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
return 0; return 0;
} }
@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return; return;
} }
mutex_lock(&rmn->lock); down_write(&rmn->lock);
/* save the next list entry for later */ /* save the next list entry for later */
head = bo->mn_list.next; head = bo->mn_list.next;
@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node); kfree(node);
} }
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
} }

Просмотреть файл

@ -0,0 +1,52 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_MN_H__
#define __AMDGPU_MN_H__
/*
* MMU Notifier
*/
struct amdgpu_mn;
#if defined(CONFIG_MMU_NOTIFIER)
void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
return NULL;
}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif
#endif

Просмотреть файл

@ -40,9 +40,7 @@
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo; struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
bo = container_of(tbo, struct amdgpu_bo, tbo);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
@ -64,11 +62,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
return false; return false;
} }
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
struct ttm_placement *placement,
struct ttm_place *places,
u32 domain, u64 flags)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
struct ttm_place *places = abo->placements;
u64 flags = abo->flags;
u32 c = 0; u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) { if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
@ -151,27 +150,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places; placement->busy_placement = places;
} }
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
struct ttm_placement *placement)
{
BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
memcpy(bo->placements, placement->placement,
placement->num_placement * sizeof(struct ttm_place));
bo->placement.num_placement = placement->num_placement;
bo->placement.num_busy_placement = placement->num_busy_placement;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
}
/** /**
* amdgpu_bo_create_reserved - create reserved BO for kernel use * amdgpu_bo_create_reserved - create reserved BO for kernel use
* *
@ -303,14 +281,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL; *cpu_addr = NULL;
} }
int amdgpu_bo_create_restricted(struct amdgpu_device *adev, static int amdgpu_bo_do_create(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct ttm_placement *placement, struct reservation_object *resv,
struct reservation_object *resv, uint64_t init_value,
uint64_t init_value, struct amdgpu_bo **bo_ptr)
struct amdgpu_bo **bo_ptr)
{ {
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
@ -384,13 +361,17 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
#endif #endif
amdgpu_fill_placement_to_bo(bo, placement); bo->tbo.bdev = &adev->mman.bdev;
/* Kernel allocation are uninterruptible */ amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
/* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL, &bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy); acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
bytes_moved = atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
@ -400,9 +381,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
else else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
if (unlikely(r != 0))
return r;
if (kernel) if (kernel)
bo->tbo.priority = 1; bo->tbo.priority = 1;
@ -442,27 +420,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
int r; int r;
if (bo->shadow) if (bo->shadow)
return 0; return 0;
memset(&placements, 0, sizeof(placements)); r = amdgpu_bo_do_create(adev, size, byte_align, true,
amdgpu_ttm_placement_init(adev, &placement, placements, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_SHADOW,
AMDGPU_GEM_CREATE_SHADOW); NULL, bo->tbo.resv, 0,
&bo->shadow);
r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
NULL, &placement,
bo->tbo.resv,
0,
&bo->shadow);
if (!r) { if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo); bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
@ -484,18 +452,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
uint64_t init_value, uint64_t init_value,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r; int r;
memset(&placements, 0, sizeof(placements)); r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
amdgpu_ttm_placement_init(adev, &placement, placements, parent_flags, sg, resv, init_value, bo_ptr);
domain, parent_flags);
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
parent_flags, sg, &placement, resv,
init_value, bo_ptr);
if (r) if (r)
return r; return r;
@ -672,7 +633,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i; int r, i;
unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM; return -EPERM;
@ -704,22 +664,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
} }
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) { for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */ unsigned fpfn, lpfn;
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && fpfn = min_offset >> PAGE_SHIFT;
(!max_offset || max_offset > lpfn = max_offset >> PAGE_SHIFT;
adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
}
if (fpfn > bo->placements[i].fpfn) if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn; bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn || if (!bo->placements[i].lpfn ||
@ -928,8 +882,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return; return;
abo = container_of(bo, struct amdgpu_bo, tbo); abo = ttm_to_amdgpu_bo(bo);
amdgpu_vm_bo_invalidate(adev, abo); amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo); amdgpu_bo_kunmap(abo);
@ -955,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0; return 0;
abo = container_of(bo, struct amdgpu_bo, tbo); abo = ttm_to_amdgpu_bo(bo);
/* Remember that this BO was accessed by the CPU */ /* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;

Просмотреть файл

@ -35,6 +35,7 @@
/* bo virtual addresses in a vm */ /* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping { struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va *bo_va;
struct list_head list; struct list_head list;
struct rb_node rb; struct rb_node rb;
uint64_t start; uint64_t start;
@ -49,12 +50,17 @@ struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base; struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */ /* protected by bo being reserved */
struct dma_fence *last_pt_update;
unsigned ref_count; unsigned ref_count;
/* all other members protected by the VM PD being reserved */
struct dma_fence *last_pt_update;
/* mappings for this bo_va */ /* mappings for this bo_va */
struct list_head invalids; struct list_head invalids;
struct list_head valids; struct list_head valids;
/* If the mappings are cleared or filled */
bool cleared;
}; };
struct amdgpu_bo { struct amdgpu_bo {
@ -88,6 +94,11 @@ struct amdgpu_bo {
}; };
}; };
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
}
/** /**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type * @mem_type: ttm memory type
@ -182,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
} }
} }
/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
{
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
int amdgpu_bo_create(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
@ -189,14 +208,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value, uint64_t init_value,
struct amdgpu_bo **bo_ptr); struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev, int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align, unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr, u32 domain, struct amdgpu_bo **bo_ptr,

Просмотреть файл

@ -64,17 +64,13 @@ static const struct cg_flag_name clocks[] = {
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{ {
if (adev->pp_enabled)
/* TODO */
return;
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0) if (power_supply_is_system_supplied() > 0)
adev->pm.dpm.ac_power = true; adev->pm.dpm.ac_power = true;
else else
adev->pm.dpm.ac_power = false; adev->pm.dpm.ac_power = false;
if (adev->pm.funcs->enable_bapm) if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} }
@ -88,9 +84,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm; enum amd_pm_state_type pm;
if (adev->pp_enabled) { if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev); pm = amdgpu_dpm_get_current_power_state(adev);
} else else
pm = adev->pm.dpm.user_state; pm = adev->pm.dpm.user_state;
return snprintf(buf, PAGE_SIZE, "%s\n", return snprintf(buf, PAGE_SIZE, "%s\n",
@ -118,8 +114,8 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail; goto fail;
} }
if (adev->pp_enabled) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state; adev->pm.dpm.user_state = state;
@ -140,13 +136,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level; enum amd_dpm_forced_level level = 0xff;
if ((adev->flags & AMD_IS_PX) && if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n"); return snprintf(buf, PAGE_SIZE, "off\n");
level = amdgpu_dpm_get_performance_level(adev); if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
return snprintf(buf, PAGE_SIZE, "%s\n", return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@ -167,7 +167,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level; enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level; enum amd_dpm_forced_level current_level = 0xff;
int ret = 0; int ret = 0;
/* Can't force performance level when the card is off */ /* Can't force performance level when the card is off */
@ -175,7 +175,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL; return -EINVAL;
current_level = amdgpu_dpm_get_performance_level(adev); if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) { if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW; level = AMD_DPM_FORCED_LEVEL_LOW;
@ -203,9 +204,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level) if (current_level == level)
return count; return count;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->force_performance_level) {
amdgpu_dpm_force_performance_level(adev, level);
else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
count = -EINVAL; count = -EINVAL;
@ -233,7 +232,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data; struct pp_states_info data;
int i, buf_len; int i, buf_len;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data); amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@ -257,8 +256,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0; enum amd_pm_state_type pm = 0;
int i = 0; int i = 0;
if (adev->pp_enabled) { if (adev->powerplay.pp_funcs->get_current_power_state
&& adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev); pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data); amdgpu_dpm_get_pp_num_states(adev, &data);
@ -280,25 +279,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
enum amd_pm_state_type pm = 0;
int i;
if (adev->pp_force_state_enabled && adev->pp_enabled) { if (adev->pp_force_state_enabled)
pm = amdgpu_dpm_get_current_power_state(adev); return amdgpu_get_pp_cur_state(dev, attr, buf);
amdgpu_dpm_get_pp_num_states(adev, &data); else
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
}
if (i == data.nums)
i = -EINVAL;
return snprintf(buf, PAGE_SIZE, "%d\n", i);
} else
return snprintf(buf, PAGE_SIZE, "\n"); return snprintf(buf, PAGE_SIZE, "\n");
} }
@ -315,7 +299,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1) if (strlen(buf) == 1)
adev->pp_force_state_enabled = false; adev->pp_force_state_enabled = false;
else if (adev->pp_enabled) { else if (adev->powerplay.pp_funcs->dispatch_tasks &&
adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data; struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx); ret = kstrtoul(buf, 0, &idx);
@ -330,7 +315,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT && if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) { state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev, amdgpu_dpm_dispatch_task(adev,
AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
adev->pp_force_state_enabled = true; adev->pp_force_state_enabled = true;
} }
} }
@ -347,7 +332,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL; char *table = NULL;
int size; int size;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table); size = amdgpu_dpm_get_pp_table(adev, &table);
else else
return 0; return 0;
@ -368,7 +353,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count); amdgpu_dpm_set_pp_table(adev, buf, count);
return count; return count;
@ -380,14 +365,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
ssize_t size = 0;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->print_clock_levels)
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else if (adev->pm.funcs->print_clock_levels) else
size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); return snprintf(buf, PAGE_SIZE, "\n");
return size;
} }
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
@ -416,10 +398,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
mask |= 1 << level; mask |= 1 << level;
} }
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
else if (adev->pm.funcs->force_clock_level)
adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
fail: fail:
return count; return count;
} }
@ -430,14 +411,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
ssize_t size = 0;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->print_clock_levels)
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else if (adev->pm.funcs->print_clock_levels) else
size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); return snprintf(buf, PAGE_SIZE, "\n");
return size;
} }
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@ -465,11 +443,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
} }
mask |= 1 << level; mask |= 1 << level;
} }
if (adev->powerplay.pp_funcs->force_clock_level)
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
else if (adev->pm.funcs->force_clock_level)
adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
fail: fail:
return count; return count;
} }
@ -480,14 +456,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
ssize_t size = 0;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->print_clock_levels)
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else if (adev->pm.funcs->print_clock_levels) else
size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); return snprintf(buf, PAGE_SIZE, "\n");
return size;
} }
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@ -515,11 +488,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
} }
mask |= 1 << level; mask |= 1 << level;
} }
if (adev->powerplay.pp_funcs->force_clock_level)
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
else if (adev->pm.funcs->force_clock_level)
adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
fail: fail:
return count; return count;
} }
@ -532,10 +503,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0; uint32_t value = 0;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev); value = amdgpu_dpm_get_sclk_od(adev);
else if (adev->pm.funcs->get_sclk_od)
value = adev->pm.funcs->get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value); return snprintf(buf, PAGE_SIZE, "%d\n", value);
} }
@ -556,12 +525,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
} }
if (adev->powerplay.pp_funcs->set_sclk_od)
if (adev->pp_enabled) {
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
} else if (adev->pm.funcs->set_sclk_od) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev); amdgpu_pm_compute_clocks(adev);
} }
@ -578,10 +547,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0; uint32_t value = 0;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev); value = amdgpu_dpm_get_mclk_od(adev);
else if (adev->pm.funcs->get_mclk_od)
value = adev->pm.funcs->get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value); return snprintf(buf, PAGE_SIZE, "%d\n", value);
} }
@ -602,12 +569,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
} }
if (adev->powerplay.pp_funcs->set_mclk_od)
if (adev->pp_enabled) {
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
} else if (adev->pm.funcs->set_mclk_od) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev); amdgpu_pm_compute_clocks(adev);
} }
@ -621,14 +588,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
int ret = 0; int ret = 0xff;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->get_power_profile_state)
ret = amdgpu_dpm_get_power_profile_state( ret = amdgpu_dpm_get_power_profile_state(
adev, query); adev, query);
else if (adev->pm.funcs->get_power_profile_state)
ret = adev->pm.funcs->get_power_profile_state(
adev, query);
if (ret) if (ret)
return ret; return ret;
@ -675,15 +639,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
char *sub_str, buf_cpy[128], *tmp_str; char *sub_str, buf_cpy[128], *tmp_str;
const char delimiter[3] = {' ', '\n', '\0'}; const char delimiter[3] = {' ', '\n', '\0'};
long int value; long int value;
int ret = 0; int ret = 0xff;
if (strncmp("reset", buf, strlen("reset")) == 0) { if (strncmp("reset", buf, strlen("reset")) == 0) {
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->reset_power_profile_state)
ret = amdgpu_dpm_reset_power_profile_state( ret = amdgpu_dpm_reset_power_profile_state(
adev, request); adev, request);
else if (adev->pm.funcs->reset_power_profile_state)
ret = adev->pm.funcs->reset_power_profile_state(
adev, request);
if (ret) { if (ret) {
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
@ -692,12 +653,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
} }
if (strncmp("set", buf, strlen("set")) == 0) { if (strncmp("set", buf, strlen("set")) == 0) {
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->set_power_profile_state)
ret = amdgpu_dpm_set_power_profile_state( ret = amdgpu_dpm_set_power_profile_state(
adev, request); adev, request);
else if (adev->pm.funcs->set_power_profile_state)
ret = adev->pm.funcs->set_power_profile_state(
adev, request);
if (ret) { if (ret) {
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
@ -745,13 +704,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
loop++; loop++;
} }
if (adev->powerplay.pp_funcs->set_power_profile_state)
if (adev->pp_enabled) ret = amdgpu_dpm_set_power_profile_state(adev, request);
ret = amdgpu_dpm_set_power_profile_state(
adev, request);
else if (adev->pm.funcs->set_power_profile_state)
ret = adev->pm.funcs->set_power_profile_state(
adev, request);
if (ret) if (ret)
count = -EINVAL; count = -EINVAL;
@ -831,7 +785,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL; return -EINVAL;
if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) if (!adev->powerplay.pp_funcs->get_temperature)
temp = 0; temp = 0;
else else
temp = amdgpu_dpm_get_temperature(adev); temp = amdgpu_dpm_get_temperature(adev);
@ -862,7 +816,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0; u32 pwm_mode = 0;
if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode) if (!adev->powerplay.pp_funcs->get_fan_control_mode)
return -EINVAL; return -EINVAL;
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
@ -879,7 +833,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err; int err;
int value; int value;
if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode) if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL; return -EINVAL;
err = kstrtoint(buf, 10, &value); err = kstrtoint(buf, 10, &value);
@ -919,9 +873,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255; value = (value * 100) / 255;
err = amdgpu_dpm_set_fan_speed_percent(adev, value); if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
if (err) err = amdgpu_dpm_set_fan_speed_percent(adev, value);
return err; if (err)
return err;
}
return count; return count;
} }
@ -932,11 +888,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
int err; int err;
u32 speed; u32 speed = 0;
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
if (err) err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
return err; if (err)
return err;
}
speed = (speed * 255) / 100; speed = (speed * 255) / 100;
@ -949,11 +907,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
int err; int err;
u32 speed; u32 speed = 0;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
if (err) err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
return err; if (err)
return err;
}
return sprintf(buf, "%i\n", speed); return sprintf(buf, "%i\n", speed);
} }
@ -996,9 +956,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0; return 0;
if (adev->pp_enabled)
return effective_mode;
/* Skip fan attributes if fan is not present */ /* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr || (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@ -1008,21 +965,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0; return 0;
/* mask fan attributes if we have no bindings for this asic to expose */ /* mask fan attributes if we have no bindings for this asic to expose */
if ((!adev->pm.funcs->get_fan_speed_percent && if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
(!adev->pm.funcs->get_fan_control_mode && (!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO; effective_mode &= ~S_IRUGO;
if ((!adev->pm.funcs->set_fan_speed_percent && if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
(!adev->pm.funcs->set_fan_control_mode && (!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR; effective_mode &= ~S_IWUSR;
/* hide max/min values if we can't both query and manage the fan */ /* hide max/min values if we can't both query and manage the fan */
if ((!adev->pm.funcs->set_fan_speed_percent && if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
!adev->pm.funcs->get_fan_speed_percent) && !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0; return 0;
@ -1055,7 +1012,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
return; return;
if (adev->pm.funcs->get_temperature) { if (adev->powerplay.pp_funcs->get_temperature) {
int temp = amdgpu_dpm_get_temperature(adev); int temp = amdgpu_dpm_get_temperature(adev);
if (temp < adev->pm.dpm.thermal.min_temp) if (temp < adev->pm.dpm.thermal.min_temp)
@ -1087,7 +1044,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
true : false; true : false;
/* check if the vblank period is too short to adjust the mclk */ /* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->pm.funcs->vblank_too_short) { if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
if (amdgpu_dpm_vblank_too_short(adev)) if (amdgpu_dpm_vblank_too_short(adev))
single_display = false; single_display = false;
} }
@ -1216,7 +1173,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state; enum amd_pm_state_type dpm_state;
int ret; int ret;
bool equal; bool equal = false;
/* if dpm init failed */ /* if dpm init failed */
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
@ -1236,7 +1193,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else else
return; return;
if (amdgpu_dpm == 1) { if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
printk("switching from power state:\n"); printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n"); printk("switching to power state:\n");
@ -1245,15 +1202,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */ /* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active; ps->vce_active = adev->pm.dpm.vce_active;
if (adev->powerplay.pp_funcs->display_configuration_changed)
amdgpu_dpm_display_configuration_changed(adev); amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev); ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret) if (ret)
return; return;
if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) if (adev->powerplay.pp_funcs->check_state_equal) {
equal = false; if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
equal = false;
}
if (equal) if (equal)
return; return;
@ -1264,7 +1223,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->pm.funcs->force_performance_level) { if (adev->powerplay.pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */ /* force low perf level for thermal */
@ -1280,7 +1239,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{ {
if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { if (adev->powerplay.pp_funcs->powergate_uvd) {
/* enable/disable UVD */ /* enable/disable UVD */
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable); amdgpu_dpm_powergate_uvd(adev, !enable);
@ -1302,7 +1261,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{ {
if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { if (adev->powerplay.pp_funcs->powergate_vce) {
/* enable/disable VCE */ /* enable/disable VCE */
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable); amdgpu_dpm_powergate_vce(adev, !enable);
@ -1337,8 +1296,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{ {
int i; int i;
if (adev->pp_enabled) if (adev->powerplay.pp_funcs->print_power_state == NULL)
/* TO DO */
return; return;
for (i = 0; i < adev->pm.dpm.num_ps; i++) for (i = 0; i < adev->pm.dpm.num_ps; i++)
@ -1353,10 +1311,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized) if (adev->pm.sysfs_initialized)
return 0; return 0;
if (!adev->pp_enabled) { if (adev->pm.dpm_enabled == 0)
if (adev->pm.funcs->get_temperature == NULL) return 0;
return 0;
} if (adev->powerplay.pp_funcs->get_temperature == NULL)
return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev, DRIVER_NAME, adev,
@ -1379,27 +1338,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret; return ret;
} }
if (adev->pp_enabled) {
ret = device_create_file(adev->dev, &dev_attr_pp_num_states); ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
if (ret) { if (ret) {
DRM_ERROR("failed to create device file pp_num_states\n"); DRM_ERROR("failed to create device file pp_num_states\n");
return ret; return ret;
} }
ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
if (ret) { if (ret) {
DRM_ERROR("failed to create device file pp_cur_state\n"); DRM_ERROR("failed to create device file pp_cur_state\n");
return ret; return ret;
} }
ret = device_create_file(adev->dev, &dev_attr_pp_force_state); ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
if (ret) { if (ret) {
DRM_ERROR("failed to create device file pp_force_state\n"); DRM_ERROR("failed to create device file pp_force_state\n");
return ret; return ret;
} }
ret = device_create_file(adev->dev, &dev_attr_pp_table); ret = device_create_file(adev->dev, &dev_attr_pp_table);
if (ret) { if (ret) {
DRM_ERROR("failed to create device file pp_table\n"); DRM_ERROR("failed to create device file pp_table\n");
return ret; return ret;
}
} }
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
@ -1455,16 +1413,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{ {
if (adev->pm.dpm_enabled == 0)
return;
if (adev->pm.int_hwmon_dev) if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev); hwmon_device_unregister(adev->pm.int_hwmon_dev);
device_remove_file(adev->dev, &dev_attr_power_dpm_state); device_remove_file(adev->dev, &dev_attr_power_dpm_state);
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
if (adev->pp_enabled) {
device_remove_file(adev->dev, &dev_attr_pp_num_states); device_remove_file(adev->dev, &dev_attr_pp_num_states);
device_remove_file(adev->dev, &dev_attr_pp_cur_state); device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state); device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table); device_remove_file(adev->dev, &dev_attr_pp_table);
}
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
@ -1495,8 +1456,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring); amdgpu_fence_wait_empty(ring);
} }
if (adev->pp_enabled) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0; adev->pm.dpm.new_active_crtcs = 0;
@ -1630,15 +1591,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) && if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n"); seq_printf(m, "PX asic powered off\n");
} else if (adev->pp_enabled) { } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
return amdgpu_debugfs_pm_info_pp(m, adev);
} else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level) if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->pm.funcs->debugfs_print_current_performance_level(adev, m); adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else else
seq_printf(m, "Debugfs support not implemented for this asic\n"); seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} else {
return amdgpu_debugfs_pm_info_pp(m, adev);
} }
return 0; return 0;

Просмотреть файл

@ -34,24 +34,6 @@
#include "cik_dpm.h" #include "cik_dpm.h"
#include "vi_dpm.h" #include "vi_dpm.h"
static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
{
struct amd_pp_init pp_init;
struct amd_powerplay *amd_pp;
int ret;
amd_pp = &(adev->powerplay);
pp_init.chip_family = adev->family;
pp_init.chip_id = adev->asic_type;
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
pp_init.feature_mask = amdgpu_pp_feature_mask;
pp_init.device = amdgpu_cgs_create_device(adev);
ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
if (ret)
return -EINVAL;
return 0;
}
static int amdgpu_pp_early_init(void *handle) static int amdgpu_pp_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -59,7 +41,6 @@ static int amdgpu_pp_early_init(void *handle)
int ret = 0; int ret = 0;
amd_pp = &(adev->powerplay); amd_pp = &(adev->powerplay);
adev->pp_enabled = false;
amd_pp->pp_handle = (void *)adev; amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) { switch (adev->asic_type) {
@ -73,9 +54,7 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_STONEY: case CHIP_STONEY:
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_RAVEN: case CHIP_RAVEN:
adev->pp_enabled = true; amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
if (amdgpu_create_pp_handle(adev))
return -EINVAL;
amd_pp->ip_funcs = &pp_ip_funcs; amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs; amd_pp->pp_funcs = &pp_dpm_funcs;
break; break;
@ -87,17 +66,26 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_OLAND: case CHIP_OLAND:
case CHIP_HAINAN: case CHIP_HAINAN:
amd_pp->ip_funcs = &si_dpm_ip_funcs; amd_pp->ip_funcs = &si_dpm_ip_funcs;
amd_pp->pp_funcs = &si_dpm_funcs;
break; break;
#endif #endif
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII: case CHIP_HAWAII:
amd_pp->ip_funcs = &ci_dpm_ip_funcs; if (amdgpu_dpm == -1) {
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
amd_pp->pp_funcs = &ci_dpm_funcs;
} else {
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
}
break; break;
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
case CHIP_KAVERI: case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs; amd_pp->ip_funcs = &kv_dpm_ip_funcs;
amd_pp->pp_funcs = &kv_dpm_funcs;
break; break;
#endif #endif
default: default:
@ -107,12 +95,9 @@ static int amdgpu_pp_early_init(void *handle)
if (adev->powerplay.ip_funcs->early_init) if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init( ret = adev->powerplay.ip_funcs->early_init(
adev->powerplay.pp_handle); amd_pp->cgs_device ? amd_pp->cgs_device :
amd_pp->pp_handle);
if (ret == PP_DPM_DISABLED) {
adev->pm.dpm_enabled = false;
return 0;
}
return ret; return ret;
} }
@ -126,11 +111,6 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init( ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
return ret; return ret;
} }
@ -165,21 +145,13 @@ static int amdgpu_pp_hw_init(void *handle)
int ret = 0; int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev); amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init) if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init( ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
if (ret == PP_DPM_DISABLED) {
adev->pm.dpm_enabled = false;
return 0;
}
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
adev->pm.dpm_enabled = true;
return ret; return ret;
} }
@ -188,14 +160,11 @@ static int amdgpu_pp_hw_fini(void *handle)
int ret = 0; int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled && adev->pm.dpm_enabled)
amdgpu_pm_sysfs_fini(adev);
if (adev->powerplay.ip_funcs->hw_fini) if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini( ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev); amdgpu_ucode_fini_bo(adev);
return ret; return ret;
@ -209,9 +178,8 @@ static void amdgpu_pp_late_fini(void *handle)
adev->powerplay.ip_funcs->late_fini( adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
if (adev->powerplay.cgs_device)
if (adev->pp_enabled) amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
amd_powerplay_destroy(adev->powerplay.pp_handle);
} }
static int amdgpu_pp_suspend(void *handle) static int amdgpu_pp_suspend(void *handle)

Просмотреть файл

@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap); ttm_bo_kunmap(&bo->dma_buf_vmap);
} }
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
unsigned asize = amdgpu_bo_size(bo);
int ret;
if (!vma->vm_file)
return -ENODEV;
if (adev == NULL)
return -ENODEV;
/* Check for valid size. */
if (asize < vma->vm_end - vma->vm_start)
return -EINVAL;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
return -EPERM;
}
vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
/* prime mmap does not need to check access, so allow here */
ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
if (ret)
return ret;
ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
return ret;
}
struct drm_gem_object * struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev, amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct dma_buf_attachment *attach,
@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags); return drm_gem_prime_export(dev, gobj, flags);

Просмотреть файл

@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf; psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init; psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create; psp->ring_create = psp_v3_1_ring_create;
psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy; psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit; psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data; psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
psp->mode1_reset = psp_v3_1_mode1_reset;
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
#if 0
psp->init_microcode = psp_v10_0_init_microcode; psp->init_microcode = psp_v10_0_init_microcode;
#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf; psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init; psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create; psp->ring_create = psp_v10_0_ring_create;
psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy; psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit; psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data; psp->compare_sram_data = psp_v10_0_compare_sram_data;
psp->mode1_reset = psp_v10_0_mode1_reset;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
static int psp_sw_fini(void *handle) static int psp_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
return 0; return 0;
} }
@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
static int psp_hw_start(struct psp_context *psp) static int psp_hw_start(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev;
int ret; int ret;
ret = psp_bootloader_load_sysdrv(psp); if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
if (ret) ret = psp_bootloader_load_sysdrv(psp);
return ret; if (ret)
return ret;
ret = psp_bootloader_load_sos(psp); ret = psp_bootloader_load_sos(psp);
if (ret) if (ret)
return ret; return ret;
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM); ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret) if (ret)
@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
static int psp_suspend(void *handle) static int psp_suspend(void *handle)
{ {
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
return ret;
}
return 0; return 0;
} }
@ -487,6 +508,22 @@ failed:
return ret; return ret;
} }
static bool psp_check_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
return true;
return false;
}
static int psp_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return psp_mode1_reset(&adev->psp);
}
static bool psp_check_fw_loading_status(struct amdgpu_device *adev, static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type) enum AMDGPU_UCODE_ID ucode_type)
{ {
@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend, .suspend = psp_suspend,
.resume = psp_resume, .resume = psp_resume,
.is_idle = NULL, .is_idle = NULL,
.check_soft_reset = psp_check_reset,
.wait_for_idle = NULL, .wait_for_idle = NULL,
.soft_reset = NULL, .soft_reset = psp_reset,
.set_clockgating_state = psp_set_clockgating_state, .set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state, .set_powergating_state = psp_set_powergating_state,
}; };

Просмотреть файл

@ -66,6 +66,8 @@ struct psp_context
struct psp_gfx_cmd_resp *cmd); struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_stop)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp, int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type); enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
@ -74,6 +76,7 @@ struct psp_context
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type); enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp); bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
/* fence buffer */ /* fence buffer */
struct amdgpu_bo *fw_pri_bo; struct amdgpu_bo *fw_pri_bo;
@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) #define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) #define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type)) #define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type))) #define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ #define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0) ((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \ #define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false) ((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amd_ip_funcs psp_ip_funcs;

Просмотреть файл

@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev, static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper, struct amdgpu_queue_mapper *mapper,
int user_ring, int user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
int r, i, j; int r, i, j;
@ -139,7 +139,7 @@ static int amdgpu_lru_map(struct amdgpu_device *adev,
} }
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist, r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
j, out_ring); j, lru_pipe_order, out_ring);
if (r) if (r)
return r; return r;
@ -284,8 +284,10 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
r = amdgpu_identity_map(adev, mapper, ring, out_ring); r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
break;
case AMDGPU_HW_IP_COMPUTE: case AMDGPU_HW_IP_COMPUTE:
r = amdgpu_lru_map(adev, mapper, ring, out_ring); r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
break; break;
default: default:
*out_ring = NULL; *out_ring = NULL;

Просмотреть файл

@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
if (ring->funcs->end_use) if (ring->funcs->end_use)
ring->funcs->end_use(ring); ring->funcs->end_use(ring);
amdgpu_ring_lru_touch(ring->adev, ring); if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
amdgpu_ring_lru_touch(ring->adev, ring);
} }
/** /**
@ -154,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
ring->funcs->end_use(ring); ring->funcs->end_use(ring);
} }
/**
* amdgpu_ring_priority_put - restore a ring's priority
*
* @ring: amdgpu_ring structure holding the information
* @priority: target priority
*
* Release a request for executing at @priority
*/
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
enum amd_sched_priority priority)
{
int i;
if (!ring->funcs->set_priority)
return;
if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
return;
/* no need to restore if the job is already at the lowest priority */
if (priority == AMD_SCHED_PRIORITY_NORMAL)
return;
mutex_lock(&ring->priority_mutex);
/* something higher prio is executing, no need to decay */
if (ring->priority > priority)
goto out_unlock;
/* decay priority to the next level with a job available */
for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
if (i == AMD_SCHED_PRIORITY_NORMAL
|| atomic_read(&ring->num_jobs[i])) {
ring->priority = i;
ring->funcs->set_priority(ring, i);
break;
}
}
out_unlock:
mutex_unlock(&ring->priority_mutex);
}
/**
* amdgpu_ring_priority_get - change the ring's priority
*
* @ring: amdgpu_ring structure holding the information
* @priority: target priority
*
* Request a ring's priority to be raised to @priority (refcounted).
*/
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
enum amd_sched_priority priority)
{
if (!ring->funcs->set_priority)
return;
atomic_inc(&ring->num_jobs[priority]);
mutex_lock(&ring->priority_mutex);
if (priority <= ring->priority)
goto out_unlock;
ring->priority = priority;
ring->funcs->set_priority(ring, priority);
out_unlock:
mutex_unlock(&ring->priority_mutex);
}
/** /**
* amdgpu_ring_init - init driver ring struct. * amdgpu_ring_init - init driver ring struct.
* *
@ -169,7 +239,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, struct amdgpu_irq_src *irq_src, unsigned max_dw, struct amdgpu_irq_src *irq_src,
unsigned irq_type) unsigned irq_type)
{ {
int r; int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission; int sched_hw_submission = amdgpu_sched_hw_submission;
/* Set the hw submission limit higher for KIQ because /* Set the hw submission limit higher for KIQ because
@ -247,9 +317,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
} }
ring->max_dw = max_dw; ring->max_dw = max_dw;
ring->priority = AMD_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list); INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring); amdgpu_ring_lru_touch(adev, ring);
for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) { if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n"); DRM_ERROR("Failed to register debugfs file for rings !\n");
} }
@ -315,14 +390,16 @@ static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
* @type: amdgpu_ring_type enum * @type: amdgpu_ring_type enum
* @blacklist: blacklisted ring ids array * @blacklist: blacklisted ring ids array
* @num_blacklist: number of entries in @blacklist * @num_blacklist: number of entries in @blacklist
* @lru_pipe_order: find a ring from the least recently used pipe
* @ring: output ring * @ring: output ring
* *
* Retrieve the amdgpu_ring structure for the least recently used ring of * Retrieve the amdgpu_ring structure for the least recently used ring of
* a specific IP block (all asics). * a specific IP block (all asics).
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
*/ */
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist, int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
int num_blacklist, struct amdgpu_ring **ring) int *blacklist, int num_blacklist,
bool lru_pipe_order, struct amdgpu_ring **ring)
{ {
struct amdgpu_ring *entry; struct amdgpu_ring *entry;
@ -337,10 +414,23 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist)) if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
continue; continue;
*ring = entry; if (!*ring) {
amdgpu_ring_lru_touch_locked(adev, *ring); *ring = entry;
break;
/* We are done for ring LRU */
if (!lru_pipe_order)
break;
}
/* Move all rings on the same pipe to the end of the list */
if (entry->pipe == (*ring)->pipe)
amdgpu_ring_lru_touch_locked(adev, entry);
} }
/* Move the ring we found to the end of the list */
if (*ring)
amdgpu_ring_lru_touch_locked(adev, *ring);
spin_unlock(&adev->ring_lru_list_lock); spin_unlock(&adev->ring_lru_list_lock);
if (!*ring) { if (!*ring) {

Просмотреть файл

@ -24,6 +24,7 @@
#ifndef __AMDGPU_RING_H__ #ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__ #define __AMDGPU_RING_H__
#include <drm/amdgpu_drm.h>
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
/* max number of rings */ /* max number of rings */
@ -56,6 +57,7 @@ struct amdgpu_device;
struct amdgpu_ring; struct amdgpu_ring;
struct amdgpu_ib; struct amdgpu_ib;
struct amdgpu_cs_parser; struct amdgpu_cs_parser;
struct amdgpu_job;
/* /*
* Fences. * Fences.
@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev); void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
void amdgpu_fence_process(struct amdgpu_ring *ring); void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/* /*
@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start); void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
enum amd_sched_priority priority);
}; };
struct amdgpu_ring { struct amdgpu_ring {
@ -187,6 +196,12 @@ struct amdgpu_ring {
volatile u32 *cond_exe_cpu_addr; volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng; unsigned vm_inv_eng;
bool has_compute_vm_bug; bool has_compute_vm_bug;
atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct dentry *ent; struct dentry *ent;
#endif #endif
@ -197,12 +212,17 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
enum amd_sched_priority priority);
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
enum amd_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type); unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring); void amdgpu_ring_fini(struct amdgpu_ring *ring);
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist, int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
int num_blacklist, struct amdgpu_ring **ring); int *blacklist, int num_blacklist,
bool lru_pipe_order, struct amdgpu_ring **ring);
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring); void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{ {

Просмотреть файл

@ -0,0 +1,109 @@
/*
* Copyright 2017 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/
#include <linux/fdtable.h>
#include <linux/pid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMD_SCHED_PRIORITY_HIGH_HW;
case AMDGPU_CTX_PRIORITY_HIGH:
return AMD_SCHED_PRIORITY_HIGH_SW;
case AMDGPU_CTX_PRIORITY_NORMAL:
return AMD_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return AMD_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_UNSET:
return AMD_SCHED_PRIORITY_UNSET;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
return AMD_SCHED_PRIORITY_INVALID;
}
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
enum amd_sched_priority priority)
{
struct file *filp = fcheck(fd);
struct drm_file *file;
struct pid *pid;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
if (!filp)
return -EINVAL;
pid = get_pid(((struct drm_file *)filp->private_data)->pid);
mutex_lock(&adev->ddev->filelist_mutex);
list_for_each_entry(file, &adev->ddev->filelist, lhead) {
if (file->pid != pid)
continue;
fpriv = file->driver_priv;
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
}
mutex_unlock(&adev->ddev->filelist_mutex);
put_pid(pid);
return 0;
}
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private;
enum amd_sched_priority priority;
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
r = amdgpu_sched_process_priority_override(adev,
args->in.fd,
priority);
break;
default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
r = -EINVAL;
break;
}
return r;
}

Просмотреть файл

@ -1,5 +1,5 @@
/* /*
* Copyright 2015 Advanced Micro Devices, Inc. * Copyright 2017 Valve Corporation
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@ -19,16 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/ */
#ifndef _EVENTINIT_H_ #ifndef __AMDGPU_SCHED_H__
#define _EVENTINIT_H_ #define __AMDGPU_SCHED_H__
#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4 #include <drm/drmP.h>
void pem_init_feature_info(struct pp_eventmgr *eventmgr); enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr); int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
int pem_register_interrupts(struct pp_eventmgr *eventmgr); struct drm_file *filp);
int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
#endif /* _EVENTINIT_H_ */ #endif // __AMDGPU_SCHED_H__

Просмотреть файл

@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
* *
* @sync: sync object to add fences from reservation object to * @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence * @resv: reservation object with embedded fence
* @shared: true if we should only sync to the exclusive fence * @explicit_sync: true if we should only sync to the exclusive fence
* *
* Sync to the fence * Sync to the fence
*/ */
int amdgpu_sync_resv(struct amdgpu_device *adev, int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync, struct amdgpu_sync *sync,
struct reservation_object *resv, struct reservation_object *resv,
void *owner) void *owner, bool explicit_sync)
{ {
struct reservation_object_list *flist; struct reservation_object_list *flist;
struct dma_fence *f; struct dma_fence *f;
@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv); f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f); r = amdgpu_sync_fence(adev, sync, f);
if (explicit_sync)
return r;
flist = reservation_object_get_list(resv); flist = reservation_object_get_list(resv);
if (!flist || r) if (!flist || r)
return r; return r;

Просмотреть файл

@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
int amdgpu_sync_resv(struct amdgpu_device *adev, int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync, struct amdgpu_sync *sync,
struct reservation_object *resv, struct reservation_object *resv,
void *owner); void *owner,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring); struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);

Просмотреть файл

@ -15,62 +15,6 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
TRACE_EVENT(amdgpu_ttm_tt_populate,
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
TP_ARGS(adev, dma_address, phys_address),
TP_STRUCT__entry(
__field(uint16_t, domain)
__field(uint8_t, bus)
__field(uint8_t, slot)
__field(uint8_t, func)
__field(uint64_t, dma)
__field(uint64_t, phys)
),
TP_fast_assign(
__entry->domain = pci_domain_nr(adev->pdev->bus);
__entry->bus = adev->pdev->bus->number;
__entry->slot = PCI_SLOT(adev->pdev->devfn);
__entry->func = PCI_FUNC(adev->pdev->devfn);
__entry->dma = dma_address;
__entry->phys = phys_address;
),
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
(unsigned)__entry->domain,
(unsigned)__entry->bus,
(unsigned)__entry->slot,
(unsigned)__entry->func,
(unsigned long long)__entry->dma,
(unsigned long long)__entry->phys)
);
TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
TP_ARGS(adev, dma_address, phys_address),
TP_STRUCT__entry(
__field(uint16_t, domain)
__field(uint8_t, bus)
__field(uint8_t, slot)
__field(uint8_t, func)
__field(uint64_t, dma)
__field(uint64_t, phys)
),
TP_fast_assign(
__entry->domain = pci_domain_nr(adev->pdev->bus);
__entry->bus = adev->pdev->bus->number;
__entry->slot = PCI_SLOT(adev->pdev->devfn);
__entry->func = PCI_FUNC(adev->pdev->devfn);
__entry->dma = dma_address;
__entry->phys = phys_address;
),
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
(unsigned)__entry->domain,
(unsigned)__entry->bus,
(unsigned)__entry->slot,
(unsigned)__entry->func,
(unsigned long long)__entry->dma,
(unsigned long long)__entry->phys)
);
TRACE_EVENT(amdgpu_mm_rreg, TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value), TP_ARGS(did, reg, value),
@ -474,5 +418,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
/* This part must be outside protection */ /* This part must be outside protection */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h> #include <trace/define_trace.h>

Просмотреть файл

@ -1,5 +1,24 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010. /* Copyright Red Hat Inc 2010.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author : Dave Airlie <airlied@redhat.com> * Author : Dave Airlie <airlied@redhat.com>
*/ */
#include <drm/drmP.h> #include <drm/drmP.h>

Просмотреть файл

@ -42,7 +42,9 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/iommu.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h" #include "bif/bif_4_1_d.h"
@ -208,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1; placement->num_busy_placement = 1;
return; return;
} }
abo = container_of(bo, struct amdgpu_bo, tbo); abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
if (adev->mman.buffer_funcs && if (adev->mman.buffer_funcs &&
@ -256,7 +258,7 @@ gtt:
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{ {
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm)) if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM; return -EPERM;
@ -288,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
return addr; return addr;
} }
static int amdgpu_move_blit(struct ttm_buffer_object *bo, /**
bool evict, bool no_wait_gpu, * amdgpu_find_mm_node - Helper function finds the drm_mm_node
struct ttm_mem_reg *new_mem, * corresponding to @offset. It also modifies the offset to be
struct ttm_mem_reg *old_mem) * within the drm_mm_node returned
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
while (*offset >= (mm_node->size << PAGE_SHIFT)) {
*offset -= (mm_node->size << PAGE_SHIFT);
++mm_node;
}
return mm_node;
}
/**
* amdgpu_copy_ttm_mem_to_mem - Helper function for copy
*
* The function copies @size bytes from {src->mem + src->offset} to
* {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
* move and different for a BO to BO copy.
*
* @f: Returns the last fence if multiple jobs are submitted.
*/
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
struct reservation_object *resv,
struct dma_fence **f)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *src_mm, *dst_mm;
struct drm_mm_node *old_mm, *new_mm; uint64_t src_node_start, dst_node_start, src_node_size,
uint64_t old_start, old_size, new_start, new_size; dst_node_size, src_page_offset, dst_page_offset;
unsigned long num_pages;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
int r; int r = 0;
const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); AMDGPU_GPU_PAGE_SIZE);
if (!ring->ready) { if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n"); DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL; return -EINVAL;
} }
old_mm = old_mem->mm_node; src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
old_size = old_mm->size; src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); src->offset;
src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
src_page_offset = src_node_start & (PAGE_SIZE - 1);
new_mm = new_mem->mm_node; dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
new_size = new_mm->size; dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); dst->offset;
dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
num_pages = new_mem->num_pages;
mutex_lock(&adev->mman.gtt_window_lock); mutex_lock(&adev->mman.gtt_window_lock);
while (num_pages) {
unsigned long cur_pages = min(min(old_size, new_size), while (size) {
(u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); unsigned long cur_size;
uint64_t from = old_start, to = new_start; uint64_t from = src_node_start, to = dst_node_start;
struct dma_fence *next; struct dma_fence *next;
if (old_mem->mem_type == TTM_PL_TT && /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
!amdgpu_gtt_mgr_is_allocated(old_mem)) { * begins at an offset, then adjust the size accordingly
r = amdgpu_map_buffer(bo, old_mem, cur_pages, */
old_start, 0, ring, &from); cur_size = min3(min(src_node_size, dst_node_size), size,
GTT_MAX_BYTES);
if (cur_size + src_page_offset > GTT_MAX_BYTES ||
cur_size + dst_page_offset > GTT_MAX_BYTES)
cur_size -= max(src_page_offset, dst_page_offset);
/* Map only what needs to be accessed. Map src to window 0 and
* dst to window 1
*/
if (src->mem->mem_type == TTM_PL_TT &&
!amdgpu_gtt_mgr_is_allocated(src->mem)) {
r = amdgpu_map_buffer(src->bo, src->mem,
PFN_UP(cur_size + src_page_offset),
src_node_start, 0, ring,
&from);
if (r) if (r)
goto error; goto error;
/* Adjust the offset because amdgpu_map_buffer returns
* start of mapped page
*/
from += src_page_offset;
} }
if (new_mem->mem_type == TTM_PL_TT && if (dst->mem->mem_type == TTM_PL_TT &&
!amdgpu_gtt_mgr_is_allocated(new_mem)) { !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
r = amdgpu_map_buffer(bo, new_mem, cur_pages, r = amdgpu_map_buffer(dst->bo, dst->mem,
new_start, 1, ring, &to); PFN_UP(cur_size + dst_page_offset),
dst_node_start, 1, ring,
&to);
if (r) if (r)
goto error; goto error;
to += dst_page_offset;
} }
r = amdgpu_copy_buffer(ring, from, to, r = amdgpu_copy_buffer(ring, from, to, cur_size,
cur_pages * PAGE_SIZE, resv, &next, false, true);
bo->resv, &next, false, true);
if (r) if (r)
goto error; goto error;
dma_fence_put(fence); dma_fence_put(fence);
fence = next; fence = next;
num_pages -= cur_pages; size -= cur_size;
if (!num_pages) if (!size)
break; break;
old_size -= cur_pages; src_node_size -= cur_size;
if (!old_size) { if (!src_node_size) {
old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
old_size = old_mm->size; src->mem);
src_node_size = (src_mm->size << PAGE_SHIFT);
} else { } else {
old_start += cur_pages * PAGE_SIZE; src_node_start += cur_size;
src_page_offset = src_node_start & (PAGE_SIZE - 1);
} }
dst_node_size -= cur_size;
new_size -= cur_pages; if (!dst_node_size) {
if (!new_size) { dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); dst->mem);
new_size = new_mm->size; dst_node_size = (dst_mm->size << PAGE_SHIFT);
} else { } else {
new_start += cur_pages * PAGE_SIZE; dst_node_start += cur_size;
dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
} }
} }
error:
mutex_unlock(&adev->mman.gtt_window_lock); mutex_unlock(&adev->mman.gtt_window_lock);
if (f)
*f = dma_fence_get(fence);
dma_fence_put(fence);
return r;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL;
int r;
src.bo = bo;
dst.bo = bo;
src.mem = old_mem;
dst.mem = new_mem;
src.offset = 0;
dst.offset = 0;
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
bo->resv, &fence);
if (r)
goto error;
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence); dma_fence_put(fence);
return r; return r;
error: error:
mutex_unlock(&adev->mman.gtt_window_lock);
if (fence) if (fence)
dma_fence_wait(fence, false); dma_fence_wait(fence, false);
dma_fence_put(fence); dma_fence_put(fence);
@ -483,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
int r; int r;
/* Can't move a pinned BO */ /* Can't move a pinned BO */
abo = container_of(bo, struct amdgpu_bo, tbo); abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0)) if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL; return -EINVAL;
@ -581,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset) unsigned long page_offset)
{ {
struct drm_mm_node *mm = bo->mem.mm_node; struct drm_mm_node *mm;
uint64_t size = mm->size; unsigned long offset = (page_offset << PAGE_SHIFT);
uint64_t offset = page_offset;
page_offset = do_div(offset, size); mm = amdgpu_find_mm_node(&bo->mem, &offset);
mm += offset; return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset; (offset >> PAGE_SHIFT);
} }
/* /*
@ -608,6 +689,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock; spinlock_t guptasklock;
struct list_head guptasks; struct list_head guptasks;
atomic_t mmu_invalidations; atomic_t mmu_invalidations;
uint32_t last_set_pages;
struct list_head list; struct list_head list;
}; };
@ -621,6 +703,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
flags |= FOLL_WRITE; flags |= FOLL_WRITE;
down_read(&current->mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory /* check that we only use anonymous memory
to prevent problems with writeback */ to prevent problems with writeback */
@ -628,8 +712,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
struct vm_area_struct *vma; struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr); vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end) if (!vma || vma->vm_file || vma->vm_end < end) {
up_read(&current->mm->mmap_sem);
return -EPERM; return -EPERM;
}
} }
do { do {
@ -656,42 +742,44 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
} while (pinned < ttm->num_pages); } while (pinned < ttm->num_pages);
up_read(&current->mm->mmap_sem);
return 0; return 0;
release_pages: release_pages:
release_pages(pages, pinned); release_pages(pages, pinned);
up_read(&current->mm->mmap_sem);
return r; return r;
} }
static void amdgpu_trace_dma_map(struct ttm_tt *ttm) void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i; unsigned i;
if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) { gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
for (i = 0; i < ttm->num_pages; i++) { for (i = 0; i < ttm->num_pages; ++i) {
trace_amdgpu_ttm_tt_populate( if (ttm->pages[i])
adev, put_page(ttm->pages[i]);
gtt->ttm.dma_address[i],
page_to_phys(ttm->pages[i])); ttm->pages[i] = pages ? pages[i] : NULL;
}
} }
} }
static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm) void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i; unsigned i;
if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) { for (i = 0; i < ttm->num_pages; ++i) {
for (i = 0; i < ttm->num_pages; i++) { struct page *page = ttm->pages[i];
trace_amdgpu_ttm_tt_unpopulate(
adev, if (!page)
gtt->ttm.dma_address[i], continue;
page_to_phys(ttm->pages[i]));
} if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
} }
} }
@ -721,8 +809,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages); gtt->ttm.dma_address, ttm->num_pages);
amdgpu_trace_dma_map(ttm);
return 0; return 0;
release_sg: release_sg:
@ -734,7 +820,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ? enum dma_data_direction direction = write ?
@ -747,16 +832,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */ /* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { amdgpu_ttm_tt_mark_user_pages(ttm);
struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
amdgpu_trace_dma_unmap(ttm);
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
} }
@ -818,7 +894,6 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg tmp; struct ttm_mem_reg tmp;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_place placements; struct ttm_place placements;
int r; int r;
@ -834,7 +909,8 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r)) if (unlikely(r))
@ -941,8 +1017,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
@ -962,52 +1036,26 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages); gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound; ttm->state = tt_unbound;
r = 0; return 0;
goto trace_mappings;
} }
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) { if (swiotlb_nr_tbl()) {
r = ttm_dma_populate(&gtt->ttm, adev->dev); return ttm_dma_populate(&gtt->ttm, adev->dev);
goto trace_mappings;
} }
#endif #endif
r = ttm_pool_populate(ttm); return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
while (i--) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
r = 0;
trace_mappings:
if (likely(!r))
amdgpu_trace_dma_map(ttm);
return r;
} }
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) { if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg); kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG; ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return; return;
@ -1018,8 +1066,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev); adev = amdgpu_ttm_adev(ttm->bdev);
amdgpu_trace_dma_unmap(ttm);
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) { if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev); ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@ -1027,14 +1073,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
} }
#endif #endif
for (i = 0; i < ttm->num_pages; i++) { ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
if (gtt->ttm.dma_address[i]) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
ttm_pool_unpopulate(ttm);
} }
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@ -1051,6 +1090,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(&gtt->guptasklock); spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks); INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0); atomic_set(&gtt->mmu_invalidations, 0);
gtt->last_set_pages = 0;
return 0; return 0;
} }
@ -1103,6 +1143,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated; return prev_invalidated != *last_invalidated;
} }
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL || !gtt->userptr)
return false;
return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
}
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1143,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
unsigned long num_pages = bo->mem.num_pages; unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node; struct drm_mm_node *node = bo->mem.mm_node;
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
return ttm_bo_eviction_valuable(bo, place);
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_TT: case TTM_PL_TT:
return true; return true;
@ -1160,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size; num_pages -= node->size;
++node; ++node;
} }
break; return false;
default: default:
break; break;
@ -1173,9 +1220,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset, unsigned long offset,
void *buf, int len, int write) void *buf, int len, int write)
{ {
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct drm_mm_node *nodes = abo->tbo.mem.mm_node; struct drm_mm_node *nodes;
uint32_t value = 0; uint32_t value = 0;
int ret = 0; int ret = 0;
uint64_t pos; uint64_t pos;
@ -1184,10 +1231,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM) if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO; return -EIO;
while (offset >= (nodes->size << PAGE_SHIFT)) { nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
offset -= nodes->size << PAGE_SHIFT;
++nodes;
}
pos = (nodes->start << PAGE_SHIFT) + offset; pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) { while (len && pos < adev->mc.mc_vram_size) {
@ -1202,14 +1246,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
} }
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32(mmMM_INDEX_HI, aligned_pos >> 31); WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
if (!write || mask != 0xffffffff) if (!write || mask != 0xffffffff)
value = RREG32(mmMM_DATA); value = RREG32_NO_KIQ(mmMM_DATA);
if (write) { if (write) {
value &= ~mask; value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask; value |= (*(uint32_t *)buf << shift) & mask;
WREG32(mmMM_DATA, value); WREG32_NO_KIQ(mmMM_DATA, value);
} }
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
if (!write) { if (!write) {
@ -1286,6 +1330,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */ /* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
/*
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
r = amdgpu_fw_reserve_vram_init(adev);
if (r) {
return r;
}
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory, &adev->stolen_vga_memory,
@ -1510,7 +1563,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
job->vm_needs_flush = vm_needs_flush; job->vm_needs_flush = vm_needs_flush;
if (resv) { if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv, r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED); AMDGPU_FENCE_OWNER_UNDEFINED,
false);
if (r) { if (r) {
DRM_ERROR("sync failed (%d).\n", r); DRM_ERROR("sync failed (%d).\n", r);
goto error_free; goto error_free;
@ -1557,8 +1611,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
/* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/ uint32_t max_bytes = 8 *
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node; struct drm_mm_node *mm_node;
@ -1590,8 +1644,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
++mm_node; ++mm_node;
} }
/* 10 double words for each SDMA_OP_PTEPDE cmd */ /* num of dwords for each SDMA_OP_PTEPDE cmd */
num_dw = num_loops * 10; num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */ /* for IB padding */
num_dw += 64; num_dw += 64;
@ -1602,7 +1656,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) { if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv, r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED); AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) { if (r) {
DRM_ERROR("sync failed (%d).\n", r); DRM_ERROR("sync failed (%d).\n", r);
goto error_free; goto error_free;
@ -1697,9 +1751,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result; return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
WREG32(mmMM_INDEX_HI, *pos >> 31); WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
value = RREG32(mmMM_DATA); value = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
@ -1715,10 +1769,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result; return result;
} }
static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
if (*pos >= adev->mc.mc_vram_size)
return -ENXIO;
while (size) {
unsigned long flags;
uint32_t value;
if (*pos >= adev->mc.mc_vram_size)
return result;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
WREG32_NO_KIQ(mmMM_DATA, value);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static const struct file_operations amdgpu_ttm_vram_fops = { static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read, .read = amdgpu_ttm_vram_read,
.llseek = default_llseek .write = amdgpu_ttm_vram_write,
.llseek = default_llseek,
}; };
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
@ -1770,6 +1864,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif #endif
static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
int r;
uint64_t phys;
struct iommu_domain *dom;
// always return 8 bytes
if (size != 8)
return -EINVAL;
// only accept page addresses
if (*pos & 0xFFF)
return -EINVAL;
dom = iommu_get_domain_for_dev(adev->dev);
if (dom)
phys = iommu_iova_to_phys(dom, *pos);
else
phys = *pos;
r = copy_to_user(buf, &phys, 8);
if (r)
return -EFAULT;
return 8;
}
static const struct file_operations amdgpu_ttm_iova_fops = {
.owner = THIS_MODULE,
.read = amdgpu_iova_to_phys_read,
.llseek = default_llseek
};
static const struct {
char *name;
const struct file_operations *fops;
int domain;
} ttm_debugfs_entries[] = {
{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
#endif
{ "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
};
#endif #endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
@ -1780,22 +1921,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary; struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root; struct dentry *ent, *root = minor->debugfs_root;
ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
adev, &amdgpu_ttm_vram_fops); ent = debugfs_create_file(
if (IS_ERR(ent)) ttm_debugfs_entries[count].name,
return PTR_ERR(ent); S_IFREG | S_IRUGO, root,
i_size_write(ent->d_inode, adev->mc.mc_vram_size); adev,
adev->mman.vram = ent; ttm_debugfs_entries[count].fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.debugfs_entries[count] = ent;
}
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.gtt = ent;
#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
@ -1805,7 +1945,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else #else
return 0; return 0;
#endif #endif
} }
@ -1813,14 +1952,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
unsigned i;
debugfs_remove(adev->mman.vram); for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
adev->mman.vram = NULL; debugfs_remove(adev->mman.debugfs_entries[i]);
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
debugfs_remove(adev->mman.gtt);
adev->mman.gtt = NULL;
#endif
#endif #endif
} }

Просмотреть файл

@ -24,6 +24,7 @@
#ifndef __AMDGPU_TTM_H__ #ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__ #define __AMDGPU_TTM_H__
#include "amdgpu.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@ -45,8 +46,7 @@ struct amdgpu_mman {
bool initialized; bool initialized;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct dentry *vram; struct dentry *debugfs_entries[8];
struct dentry *gtt;
#endif #endif
/* buffer handling */ /* buffer handling */
@ -58,6 +58,12 @@ struct amdgpu_mman {
struct amd_sched_entity entity; struct amd_sched_entity entity;
}; };
struct amdgpu_copy_mem {
struct ttm_buffer_object *bo;
struct ttm_mem_reg *mem;
unsigned long offset;
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit, struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush); bool vm_needs_flush);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
struct reservation_object *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data, uint64_t src_data,
struct reservation_object *resv, struct reservation_object *resv,
@ -82,4 +94,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
#endif #endif

Просмотреть файл

@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else else
return AMDGPU_FW_LOAD_SMU; return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10: case CHIP_VEGA10:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN: case CHIP_RAVEN:
if (load_type != 2) if (!load_type)
return AMDGPU_FW_LOAD_DIRECT; return AMDGPU_FW_LOAD_DIRECT;
else else
return AMDGPU_FW_LOAD_PSP; return AMDGPU_FW_LOAD_PSP;
@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev) int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{ {
struct amdgpu_bo **bo = &adev->firmware.fw_buf; struct amdgpu_bo **bo = &adev->firmware.fw_buf;
uint64_t fw_mc_addr;
void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0; uint64_t fw_offset = 0;
int i, err; int i, err;
struct amdgpu_firmware_info *ucode = NULL; struct amdgpu_firmware_info *ucode = NULL;
@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0; return 0;
} }
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
NULL, NULL, 0, bo); AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
if (err) { NULL, NULL, 0, bo);
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); if (err) {
goto failed; dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
}
err = amdgpu_bo_reserve(*bo, false);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.fw_buf_mc);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
}
err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
goto failed_kmap;
}
amdgpu_bo_unreserve(*bo);
} }
err = amdgpu_bo_reserve(*bo, false); memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
}
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
goto failed_kmap;
}
amdgpu_bo_unreserve(*bo);
memset(fw_buf_ptr, 0, adev->firmware.fw_size);
/* /*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
ucode = &adev->firmware.ucode[i]; ucode = &adev->firmware.ucode[i];
if (ucode->fw) { if (ucode->fw) {
header = (const struct common_firmware_header *)ucode->fw->data; header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset, amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
(void *)((uint8_t *)fw_buf_ptr + fw_offset)); adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 && if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr; const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset, amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
fw_buf_ptr + fw_offset); adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
} }
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE); fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);

Просмотреть файл

@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i;
kfree(adev->uvd.saved_bo); kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.ring); amdgpu_ring_fini(&adev->uvd.ring);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
release_firmware(adev->uvd.fw); release_firmware(adev->uvd.fw);
return 0; return 0;
@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0; int r = 0;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL; return r;
} }
if (!ctx->parser->adev->uvd.address_64_bit) { if (!ctx->parser->adev->uvd.address_64_bit) {
@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r; int r;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL; return r;
} }
start = amdgpu_bo_gpu_offset(bo); start = amdgpu_bo_gpu_offset(bo);
@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL; return -EINVAL;
} }
r = amdgpu_cs_sysvm_access_required(parser);
if (r)
return r;
ctx.parser = parser; ctx.parser = parser;
ctx.buf_sizes = buf_sizes; ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx; ctx.ib_idx = ib_idx;

Просмотреть файл

@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint64_t addr; uint64_t addr;
int r;
if (index == 0xffffffff) if (index == 0xffffffff)
index = 0; index = 0;
@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index); addr += ((uint64_t)size) * ((uint64_t)index);
mapping = amdgpu_cs_find_mapping(p, addr, &bo); r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index); addr, lo, hi, size, index);
return -EINVAL; return r;
} }
if ((addr + (uint64_t)size) > if ((addr + (uint64_t)size) >
@ -647,15 +648,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0; uint32_t allocated = 0;
uint32_t tmp, handle = 0; uint32_t tmp, handle = 0;
uint32_t *size = &tmp; uint32_t *size = &tmp;
int i, r, idx = 0; int i, r = 0, idx = 0;
p->job->vm = NULL; p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
while (idx < ib->length_dw) { while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);

Просмотреть файл

@ -25,30 +25,26 @@
#include "amdgpu_vf_error.h" #include "amdgpu_vf_error.h"
#include "mxgpu_ai.h" #include "mxgpu_ai.h"
#define AMDGPU_VF_ERROR_ENTRY_SIZE 16 void amdgpu_vf_error_put(struct amdgpu_device *adev,
uint16_t sub_error_code,
/* struct error_entry - amdgpu VF error information. */ uint16_t error_flags,
struct amdgpu_vf_error_buffer { uint64_t error_data)
int read_count;
int write_count;
uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
struct amdgpu_vf_error_buffer admgpu_vf_errors;
void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
{ {
int index; int index;
uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code); uint16_t error_code;
index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE; if (!amdgpu_sriov_vf(adev))
admgpu_vf_errors.code [index] = error_code; return;
admgpu_vf_errors.flags [index] = error_flags;
admgpu_vf_errors.data [index] = error_data; error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
admgpu_vf_errors.write_count ++;
mutex_lock(&adev->virt.vf_errors.lock);
index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
adev->virt.vf_errors.code [index] = error_code;
adev->virt.vf_errors.flags [index] = error_flags;
adev->virt.vf_errors.data [index] = error_data;
adev->virt.vf_errors.write_count ++;
mutex_unlock(&adev->virt.vf_errors.lock);
} }
@ -58,7 +54,8 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
u32 data1, data2, data3; u32 data1, data2, data3;
int index; int index;
if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) { if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
(!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
return; return;
} }
/* /*
@ -68,18 +65,22 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
return; return;
} }
*/ */
mutex_lock(&adev->virt.vf_errors.lock);
/* The errors are overlay of array, correct read_count as full. */ /* The errors are overlay of array, correct read_count as full. */
if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) { if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE; adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
} }
while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) { while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE; index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]); data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF; adev->virt.vf_errors.flags[index]);
data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF; data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3); adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
admgpu_vf_errors.read_count ++; adev->virt.vf_errors.read_count ++;
} }
mutex_unlock(&adev->virt.vf_errors.lock);
} }

Просмотреть файл

@ -56,7 +56,10 @@ enum AMDGIM_ERROR_CATEGORY {
AMDGIM_ERROR_CATEGORY_MAX AMDGIM_ERROR_CATEGORY_MAX
}; };
void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data); void amdgpu_vf_error_put(struct amdgpu_device *adev,
uint16_t sub_error_code,
uint16_t error_flags,
uint64_t error_data);
void amdgpu_vf_error_trans_all (struct amdgpu_device *adev); void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
#endif /* __VF_ERROR_H__ */ #endif /* __VF_ERROR_H__ */

Просмотреть файл

@ -22,7 +22,7 @@
*/ */
#include "amdgpu.h" #include "amdgpu.h"
#define MAX_KIQ_REG_WAIT 100000 #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
int amdgpu_allocate_static_csa(struct amdgpu_device *adev) int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
{ {
@ -114,27 +114,25 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{ {
signed long r; signed long r;
uint32_t val; unsigned long flags;
struct dma_fence *f; uint32_t val, seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring; struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg); BUG_ON(!ring->funcs->emit_rreg);
mutex_lock(&kiq->ring_mutex); spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32); amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_rreg(ring, reg); amdgpu_ring_emit_rreg(ring, reg);
amdgpu_fence_emit(ring, &f); amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
mutex_unlock(&kiq->ring_mutex); spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
dma_fence_put(f);
if (r < 1) { if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r); DRM_ERROR("wait for kiq fence error: %ld\n", r);
return ~0; return ~0;
} }
val = adev->wb.wb[adev->virt.reg_val_offs]; val = adev->wb.wb[adev->virt.reg_val_offs];
return val; return val;
@ -143,23 +141,23 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{ {
signed long r; signed long r;
struct dma_fence *f; unsigned long flags;
uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring; struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg); BUG_ON(!ring->funcs->emit_wreg);
mutex_lock(&kiq->ring_mutex); spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32); amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v); amdgpu_ring_emit_wreg(ring, reg, v);
amdgpu_fence_emit(ring, &f); amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
mutex_unlock(&kiq->ring_mutex); spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1) if (r < 1)
DRM_ERROR("wait for kiq fence error: %ld.\n", r); DRM_ERROR("wait for kiq fence error: %ld\n", r);
dma_fence_put(f);
} }
/** /**
@ -274,3 +272,80 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
(void *)&adev->virt.mm_table.cpu_addr); (void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0; adev->virt.mm_table.gpu_addr = 0;
} }
int amdgpu_virt_fw_reserve_get_checksum(void *obj,
unsigned long obj_size,
unsigned int key,
unsigned int chksum)
{
unsigned int ret = key;
unsigned long i = 0;
unsigned char *pos;
pos = (char *)obj;
/* calculate checksum */
for (i = 0; i < obj_size; ++i)
ret += *(pos + i);
/* minus the chksum itself */
pos = (char *)&chksum;
for (i = 0; i < sizeof(chksum); ++i)
ret -= *(pos + i);
return ret;
}
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
uint32_t pf2vf_ver = 0;
uint32_t pf2vf_size = 0;
uint32_t checksum = 0;
uint32_t checkval;
char *str;
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amdgim_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
/* pf2vf message must be in 4K */
if (pf2vf_size > 0 && pf2vf_size < 4096) {
checkval = amdgpu_virt_fw_reserve_get_checksum(
adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
adev->virt.fw_reserve.checksum_key, checksum);
if (checkval == checksum) {
adev->virt.fw_reserve.p_vf2pf =
((void *)adev->virt.fw_reserve.p_pf2vf +
pf2vf_size);
memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
sizeof(amdgim_vf2pf_info));
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
AMDGPU_FW_VRAM_VF2PF_VER);
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
sizeof(amdgim_vf2pf_info));
AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
&str);
#ifdef MODULE
if (THIS_MODULE->version != NULL)
strcpy(str, THIS_MODULE->version);
else
#endif
strcpy(str, "N/A");
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
0);
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
amdgpu_virt_fw_reserve_get_checksum(
adev->virt.fw_reserve.p_vf2pf,
pf2vf_size,
adev->virt.fw_reserve.checksum_key, 0));
}
}
}
}

Просмотреть файл

@ -36,6 +36,18 @@ struct amdgpu_mm_table {
uint64_t gpu_addr; uint64_t gpu_addr;
}; };
#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
/* struct error_entry - amdgpu VF error information. */
struct amdgpu_vf_error_buffer {
struct mutex lock;
int read_count;
int write_count;
uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
/** /**
* struct amdgpu_virt_ops - amdgpu device virt operations * struct amdgpu_virt_ops - amdgpu device virt operations
*/ */
@ -46,6 +58,179 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
}; };
/*
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
struct amdgim_pf2vf_info_header *p_pf2vf;
struct amdgim_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
* Defination between PF and VF
* Structures forcibly aligned to 4 to keep the same style as PF.
*/
#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
(total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
enum AMDGIM_FEATURE_FLAG {
/* GIM supports feature of Error log collecting */
AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
/* GIM supports feature of loading uCodes */
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
};
struct amdgim_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
struct amdgim_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
unsigned int uvd_enc_max_bandwidth;
/* max_width * max_height */
unsigned int vce_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
unsigned int vce_enc_max_bandwidth;
/* MEC FW position in kb from the start of visible frame buffer */
unsigned int mecfw_kboffset;
/* The features flags of the GIM driver supports. */
unsigned int feature_flags;
/* use private key from mailbox 2 to create chueksum */
unsigned int checksum;
} __aligned(4);
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
struct amdgim_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
uint32_t feature_flags;
/* max_width * max_height */
uint32_t uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
uint32_t uvd_enc_max_bandwidth;
/* max_width * max_height */
uint32_t vce_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
uint32_t vce_enc_max_bandwidth;
/* MEC FW position in kb from the start of VF visible frame buffer */
uint64_t mecfw_kboffset;
/* MEC FW size in KB */
uint32_t mecfw_ksize;
/* UVD FW position in kb from the start of VF visible frame buffer */
uint64_t uvdfw_kboffset;
/* UVD FW size in KB */
uint32_t uvdfw_ksize;
/* VCE FW position in kb from the start of VF visible frame buffer */
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
struct amdgim_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
struct amdgim_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
unsigned int driver_cert;
/* guest OS type and version: need a define */
unsigned int os_info;
/* in the unit of 1M */
unsigned int fb_usage;
/* guest gfx engine usage percentage */
unsigned int gfx_usage;
/* guest gfx engine health percentage */
unsigned int gfx_health;
/* guest compute engine usage percentage */
unsigned int compute_usage;
/* guest compute engine health percentage */
unsigned int compute_health;
/* guest vce engine usage percentage. 0xffff means N/A. */
unsigned int vce_enc_usage;
/* guest vce engine health percentage. 0xffff means N/A. */
unsigned int vce_enc_health;
/* guest uvd engine usage percentage. 0xffff means N/A. */
unsigned int uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
unsigned int uvd_enc_health;
unsigned int checksum;
} __aligned(4);
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
struct amdgim_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
/* driver certification, 1=WHQL, 0=None */
uint32_t driver_cert;
/* guest OS type and version: need a define */
uint32_t os_info;
/* in the unit of 1M */
uint32_t fb_usage;
/* guest gfx engine usage percentage */
uint32_t gfx_usage;
/* guest gfx engine health percentage */
uint32_t gfx_health;
/* guest compute engine usage percentage */
uint32_t compute_usage;
/* guest compute engine health percentage */
uint32_t compute_health;
/* guest vce engine usage percentage. 0xffff means N/A. */
uint32_t vce_enc_usage;
/* guest vce engine health percentage. 0xffff means N/A. */
uint32_t vce_enc_health;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
do { \
((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
} while (0)
#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
do { \
(*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
} while (0)
#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
do { \
if (!adev->virt.fw_reserve.p_pf2vf) \
*(val) = 0; \
else { \
if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
*(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
*(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
} \
} while (0)
/* GPU virtualization */ /* GPU virtualization */
struct amdgpu_virt { struct amdgpu_virt {
uint32_t caps; uint32_t caps;
@ -59,6 +244,8 @@ struct amdgpu_virt {
struct work_struct flr_work; struct work_struct flr_work;
struct amdgpu_mm_table mm_table; struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops; const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
}; };
#define AMDGPU_CSA_SIZE (8 * 1024) #define AMDGPU_CSA_SIZE (8 * 1024)
@ -101,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job); int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
#endif #endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -25,6 +25,7 @@
#define __AMDGPU_VM_H__ #define __AMDGPU_VM_H__
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/idr.h>
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#include "amdgpu_sync.h" #include "amdgpu_sync.h"
@ -72,6 +73,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
/* For Raven */
#define AMDGPU_MTYPE_CC 2
#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
| AMDGPU_PTE_SNOOPED \
| AMDGPU_PTE_EXECUTABLE \
| AMDGPU_PTE_READABLE \
| AMDGPU_PTE_WRITEABLE \
| AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
/* How to programm VM fault handling */ /* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0 #define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_FIRST 1
@ -105,17 +116,24 @@ struct amdgpu_vm_bo_base {
/* protected by spinlock */ /* protected by spinlock */
struct list_head vm_status; struct list_head vm_status;
/* protected by the BO being reserved */
bool moved;
}; };
struct amdgpu_vm_pt { struct amdgpu_vm_pt {
struct amdgpu_bo *bo; struct amdgpu_vm_bo_base base;
uint64_t addr; uint64_t addr;
/* array of page tables, one for each directory entry */ /* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries; struct amdgpu_vm_pt *entries;
unsigned last_entry_used; unsigned last_entry_used;
}; };
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
struct amdgpu_vm { struct amdgpu_vm {
/* tree of virtual addresses mapped */ /* tree of virtual addresses mapped */
struct rb_root_cached va; struct rb_root_cached va;
@ -123,19 +141,21 @@ struct amdgpu_vm {
/* protecting invalidated */ /* protecting invalidated */
spinlock_t status_lock; spinlock_t status_lock;
/* BOs who needs a validation */
struct list_head evicted;
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
/* BOs moved, but not yet updated in the PT */ /* BOs moved, but not yet updated in the PT */
struct list_head moved; struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */ /* BO mappings freed, but not yet updated in the PT */
struct list_head freed; struct list_head freed;
/* contains the page directory */ /* contains the page directory */
struct amdgpu_vm_pt root; struct amdgpu_vm_pt root;
struct dma_fence *last_dir_update; struct dma_fence *last_update;
uint64_t last_eviction_counter;
/* protecting freed */ /* protecting freed */
spinlock_t freed_lock; spinlock_t freed_lock;
@ -143,8 +163,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */ /* Scheduler entity for page table updates */
struct amd_sched_entity entity; struct amd_sched_entity entity;
/* client id */ /* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id; u64 client_id;
unsigned int pasid;
/* dedicated to vm */ /* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
@ -153,6 +174,12 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */ /* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats; bool pte_support_ats;
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
/* Limit non-retry fault storms */
unsigned int fault_credit;
}; };
struct amdgpu_vm_id { struct amdgpu_vm_id {
@ -215,16 +242,27 @@ struct amdgpu_vm_manager {
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
*/ */
int vm_update_mode; int vm_update_mode;
/* PASID to VM mapping, will be used in interrupt context to
* look up VM of a page fault
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
}; };
int amdgpu_vm_alloc_pasid(unsigned int bits);
void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context); int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated, struct list_head *validated,
struct amdgpu_bo_list_entry *entry); struct amdgpu_bo_list_entry *entry);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo), int (*callback)(void *p, struct amdgpu_bo *bo),
void *param); void *param);
@ -243,13 +281,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct dma_fence **fence); struct dma_fence **fence);
int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_sync *sync); struct amdgpu_vm *vm);
int amdgpu_vm_bo_update(struct amdgpu_device *adev, int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
bool clear); bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo); struct amdgpu_bo *bo, bool evicted);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo); struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@ -269,6 +307,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size); uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va); struct amdgpu_bo_va *bo_va);
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,

Просмотреть файл

@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
idx = 0x80; idx = 0x80;
str = CSTR(idx); str = CSTR(idx);
if (*str != '\0') if (*str != '\0') {
pr_info("ATOM BIOS: %s\n", str); pr_info("ATOM BIOS: %s\n", str);
strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
}
return ctx; return ctx;
} }

Просмотреть файл

@ -140,6 +140,7 @@ struct atom_context {
int io_mode; int io_mode;
uint32_t *scratch; uint32_t *scratch;
int scratch_size_bytes; int scratch_size_bytes;
char vbios_version[20];
}; };
extern int amdgpu_atom_debug; extern int amdgpu_atom_debug;

Просмотреть файл

@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
u32 target_tdp); u32 target_tdp);
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate); static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev); static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
@ -883,8 +882,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev)
return ret; return ret;
} }
static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) static void ci_dpm_powergate_uvd(void *handle, bool gate)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
pi->uvd_power_gated = gate; pi->uvd_power_gated = gate;
@ -901,8 +901,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
} }
} }
static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) static bool ci_dpm_vblank_too_short(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
@ -1210,11 +1211,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
} }
} }
static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev, static int ci_dpm_get_fan_speed_percent(void *handle,
u32 *speed) u32 *speed)
{ {
u32 duty, duty100; u32 duty, duty100;
u64 tmp64; u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan) if (adev->pm.no_fan)
return -ENOENT; return -ENOENT;
@ -1237,12 +1239,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0; return 0;
} }
static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev, static int ci_dpm_set_fan_speed_percent(void *handle,
u32 speed) u32 speed)
{ {
u32 tmp; u32 tmp;
u32 duty, duty100; u32 duty, duty100;
u64 tmp64; u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.no_fan) if (adev->pm.no_fan)
@ -1271,8 +1274,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0; return 0;
} }
static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (mode) { switch (mode) {
case AMD_FAN_CTRL_NONE: case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control) if (adev->pm.dpm.fan.ucode_fan_control)
@ -1292,8 +1297,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
} }
} }
static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev) static u32 ci_dpm_get_fan_control_mode(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
if (pi->fan_is_controlled_by_smc) if (pi->fan_is_controlled_by_smc)
@ -4378,9 +4384,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
} }
static int ci_dpm_force_performance_level(struct amdgpu_device *adev, static int ci_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level) enum amd_dpm_forced_level level)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp, levels, i; u32 tmp, levels, i;
int ret; int ret;
@ -5291,8 +5298,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps; adev->pm.dpm.requested_ps = &pi->requested_rps;
} }
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) static int ci_dpm_pre_set_power_state(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps; struct amdgpu_ps *new_ps = &requested_ps;
@ -5304,8 +5312,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0; return 0;
} }
static void ci_dpm_post_set_power_state(struct amdgpu_device *adev) static void ci_dpm_post_set_power_state(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps; struct amdgpu_ps *new_ps = &pi->requested_rps;
@ -5479,8 +5488,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps); ci_update_current_ps(adev, boot_ps);
} }
static int ci_dpm_set_power_state(struct amdgpu_device *adev) static int ci_dpm_set_power_state(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps; struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps; struct amdgpu_ps *old_ps = &pi->current_rps;
@ -5551,8 +5561,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev)
} }
#endif #endif
static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev) static void ci_dpm_display_configuration_changed(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ci_program_display_gap(adev); ci_program_display_gap(adev);
} }
@ -6105,9 +6117,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
} }
static void static void
ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, ci_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m) struct seq_file *m)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *rps = &pi->current_rps; struct amdgpu_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(adev); u32 sclk = ci_get_average_sclk_freq(adev);
@ -6131,12 +6144,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
seq_printf(m, "GPU load: %u %%\n", activity_percent); seq_printf(m, "GPU load: %u %%\n", activity_percent);
} }
static void ci_dpm_print_power_state(struct amdgpu_device *adev, static void ci_dpm_print_power_state(void *handle, void *current_ps)
struct amdgpu_ps *rps)
{ {
struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct ci_ps *ps = ci_get_ps(rps); struct ci_ps *ps = ci_get_ps(rps);
struct ci_pl *pl; struct ci_pl *pl;
int i; int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2); amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps); amdgpu_dpm_print_cap_info(rps->caps);
@ -6158,20 +6172,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane)); (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
} }
static int ci_check_state_equal(struct amdgpu_device *adev, static int ci_check_state_equal(void *handle,
struct amdgpu_ps *cps, void *current_ps,
struct amdgpu_ps *rps, void *request_ps,
bool *equal) bool *equal)
{ {
struct ci_ps *ci_cps; struct ci_ps *ci_cps;
struct ci_ps *ci_rps; struct ci_ps *ci_rps;
int i; int i;
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL; return -EINVAL;
ci_cps = ci_get_ps(cps); ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
ci_rps = ci_get_ps(rps); ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
if (ci_cps == NULL) { if (ci_cps == NULL) {
*equal = false; *equal = false;
@ -6199,8 +6216,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev,
return 0; return 0;
} }
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) static u32 ci_dpm_get_sclk(void *handle, bool low)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@ -6210,8 +6228,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
} }
static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low) static u32 ci_dpm_get_mclk(void *handle, bool low)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@ -6222,10 +6241,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
} }
/* get temperature in millidegrees */ /* get temperature in millidegrees */
static int ci_dpm_get_temp(struct amdgpu_device *adev) static int ci_dpm_get_temp(void *handle)
{ {
u32 temp; u32 temp;
int actual_temp = 0; int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@ -6261,7 +6281,6 @@ static int ci_dpm_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ci_dpm_set_dpm_funcs(adev);
ci_dpm_set_irq_funcs(adev); ci_dpm_set_irq_funcs(adev);
return 0; return 0;
@ -6346,7 +6365,6 @@ static int ci_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work); flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev); ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
@ -6551,9 +6569,10 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0; return 0;
} }
static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, static int ci_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf) enum pp_clock_type type, char *buf)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
@ -6618,9 +6637,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
return size; return size;
} }
static int ci_dpm_force_clock_level(struct amdgpu_device *adev, static int ci_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask) enum pp_clock_type type, uint32_t mask)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO | if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
@ -6664,8 +6684,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
return 0; return 0;
} }
static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) static int ci_dpm_get_sclk_od(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
struct ci_single_dpm_table *golden_sclk_table = struct ci_single_dpm_table *golden_sclk_table =
@ -6680,8 +6701,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
return value; return value;
} }
static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_sclk_table = struct ci_single_dpm_table *golden_sclk_table =
@ -6698,8 +6720,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
return 0; return 0;
} }
static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) static int ci_dpm_get_mclk_od(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
struct ci_single_dpm_table *golden_mclk_table = struct ci_single_dpm_table *golden_mclk_table =
@ -6714,8 +6737,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
return value; return value;
} }
static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_mclk_table = struct ci_single_dpm_table *golden_mclk_table =
@ -6732,9 +6756,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
return 0; return 0;
} }
static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev, static int ci_dpm_get_power_profile_state(void *handle,
struct amd_pp_profile *query) struct amd_pp_profile *query)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !query) if (!pi || !query)
@ -6851,9 +6876,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev,
return result; return result;
} }
static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev, static int ci_dpm_set_power_profile_state(void *handle,
struct amd_pp_profile *request) struct amd_pp_profile *request)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
int ret = -1; int ret = -1;
@ -6906,9 +6932,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
return 0; return 0;
} }
static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev, static int ci_dpm_reset_power_profile_state(void *handle,
struct amd_pp_profile *request) struct amd_pp_profile *request)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !request) if (!pi || !request)
@ -6927,9 +6954,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
} }
static int ci_dpm_switch_power_profile(struct amdgpu_device *adev, static int ci_dpm_switch_power_profile(void *handle,
enum amd_pp_profile_type type) enum amd_pp_profile_type type)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
struct amd_pp_profile request = {0}; struct amd_pp_profile request = {0};
@ -6944,11 +6972,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
return 0; return 0;
} }
static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx, static int ci_dpm_read_sensor(void *handle, int idx,
void *value, int *size) void *value, int *size)
{ {
u32 activity_percent = 50; u32 activity_percent = 50;
int ret; int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* size must be at least 4 bytes for all sensors */ /* size must be at least 4 bytes for all sensors */
if (*size < 4) if (*size < 4)
@ -7003,7 +7032,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state, .set_powergating_state = ci_dpm_set_powergating_state,
}; };
static const struct amdgpu_dpm_funcs ci_dpm_funcs = { const struct amd_pm_funcs ci_dpm_funcs = {
.get_temperature = &ci_dpm_get_temp, .get_temperature = &ci_dpm_get_temp,
.pre_set_power_state = &ci_dpm_pre_set_power_state, .pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state, .set_power_state = &ci_dpm_set_power_state,
@ -7035,12 +7064,6 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.read_sensor = ci_dpm_read_sensor, .read_sensor = ci_dpm_read_sensor,
}; };
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
{
if (adev->pm.funcs == NULL)
adev->pm.funcs = &ci_dpm_funcs;
}
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = { static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
.set = ci_dpm_set_interrupt_state, .set = ci_dpm_set_interrupt_state,
.process = ci_dpm_process_interrupt, .process = ci_dpm_process_interrupt,

Просмотреть файл

@ -26,5 +26,6 @@
extern const struct amd_ip_funcs ci_dpm_ip_funcs; extern const struct amd_ip_funcs ci_dpm_ip_funcs;
extern const struct amd_ip_funcs kv_dpm_ip_funcs; extern const struct amd_ip_funcs kv_dpm_ip_funcs;
extern const struct amd_pm_funcs ci_dpm_funcs;
extern const struct amd_pm_funcs kv_dpm_funcs;
#endif #endif

Просмотреть файл

@ -228,6 +228,34 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved * [127:96] - reserved
*/ */
/**
* cik_ih_prescreen_iv - prescreen an interrupt vector
*
* @adev: amdgpu_device pointer
*
* Returns true if the interrupt vector should be further processed.
*/
static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
{
u32 ring_index = adev->irq.ih.rptr >> 2;
u16 pasid;
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
case 146:
case 147:
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
return true;
break;
default:
/* Not a VM fault */
return true;
}
adev->irq.ih.rptr += 16;
return false;
}
/** /**
* cik_ih_decode_iv - decode an interrupt vector * cik_ih_decode_iv - decode an interrupt vector
* *
@ -433,6 +461,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = { static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr, .get_wptr = cik_ih_get_wptr,
.prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv, .decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr .set_rptr = cik_ih_set_rptr
}; };

Просмотреть файл

@ -1387,8 +1387,13 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
} }
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte, .copy_pte = cik_sdma_vm_copy_pte,
.write_pte = cik_sdma_vm_write_pte, .write_pte = cik_sdma_vm_write_pte,
.set_max_nums_pte_pde = 0x1fffff >> 3,
.set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde, .set_pte_pde = cik_sdma_vm_set_pte_pde,
}; };

Просмотреть файл

@ -207,6 +207,34 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask); return (wptr & adev->irq.ih.ptr_mask);
} }
/**
* cz_ih_prescreen_iv - prescreen an interrupt vector
*
* @adev: amdgpu_device pointer
*
* Returns true if the interrupt vector should be further processed.
*/
static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
{
u32 ring_index = adev->irq.ih.rptr >> 2;
u16 pasid;
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
case 146:
case 147:
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
return true;
break;
default:
/* Not a VM fault */
return true;
}
adev->irq.ih.rptr += 16;
return false;
}
/** /**
* cz_ih_decode_iv - decode an interrupt vector * cz_ih_decode_iv - decode an interrupt vector
* *
@ -414,6 +442,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = { static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr, .get_wptr = cz_ih_get_wptr,
.prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv, .decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr .set_rptr = cz_ih_set_rptr
}; };

Просмотреть файл

@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0) if (connector->encoder_ids[i] == 0)
break; break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder) if (!encoder)
continue; continue;
@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
/* pick the first one */ /* pick the first one */
if (enc_id) if (enc_id)
return drm_encoder_find(connector->dev, enc_id); return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL; return NULL;
} }

Просмотреть файл

@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#include <linux/kernel.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
@ -125,24 +126,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin"); MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin"); MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me.bin"); MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin"); MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin"); MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me.bin"); MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me.bin"); MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
@ -918,8 +934,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
BUG(); BUG();
} }
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
}
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->gfx.pfp_fw); err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
@ -929,8 +954,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
}
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->gfx.me_fw); err = amdgpu_ucode_validate(adev->gfx.me_fw);
@ -941,8 +975,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
}
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->gfx.ce_fw); err = amdgpu_ucode_validate(adev->gfx.ce_fw);
@ -1012,8 +1055,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
}
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw); err = amdgpu_ucode_validate(adev->gfx.mec_fw);
@ -1025,8 +1077,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if ((adev->asic_type != CHIP_STONEY) && if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) { (adev->asic_type != CHIP_TOPAZ)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
}
if (!err) { if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw); err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err) if (err)
@ -2053,6 +2114,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev); amdgpu_gfx_kiq_fini(adev);
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v8_0_mec_fini(adev); gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev); gfx_v8_0_rlc_fini(adev);
@ -3891,10 +3953,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2, adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indices, unique_indices,
&indices_count, &indices_count,
sizeof(unique_indices) / sizeof(int), ARRAY_SIZE(unique_indices),
indirect_start_offsets, indirect_start_offsets,
&offset_count, &offset_count,
sizeof(indirect_start_offsets)/sizeof(int)); ARRAY_SIZE(indirect_start_offsets));
/* save and restore list */ /* save and restore list */
WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@ -3916,14 +3978,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
/* starting offsets starts */ /* starting offsets starts */
WREG32(mmRLC_GPM_SCRATCH_ADDR, WREG32(mmRLC_GPM_SCRATCH_ADDR,
adev->gfx.rlc.starting_offsets_start); adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++) for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(mmRLC_GPM_SCRATCH_DATA, WREG32(mmRLC_GPM_SCRATCH_DATA,
indirect_start_offsets[i]); indirect_start_offsets[i]);
/* unique indices */ /* unique indices */
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0; temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0; data = mmRLC_SRM_INDEX_CNTL_DATA_0;
for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) { for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
if (unique_indices[i] != 0) { if (unique_indices[i] != 0) {
WREG32(temp + i, unique_indices[i] & 0x3FFFF); WREG32(temp + i, unique_indices[i] & 0x3FFFF);
WREG32(data + i, unique_indices[i] >> 20); WREG32(data + i, unique_indices[i] >> 20);
@ -4071,18 +4133,12 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
gfx_v8_0_rlc_reset(adev); gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev); gfx_v8_0_init_pg(adev);
if (!adev->pp_enabled) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy rlc firmware loading */ /* legacy rlc firmware loading */
r = gfx_v8_0_rlc_load_microcode(adev); r = gfx_v8_0_rlc_load_microcode(adev);
if (r) if (r)
return r; return r;
} else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_RLC_G);
if (r)
return -EINVAL;
}
} }
gfx_v8_0_rlc_start(adev); gfx_v8_0_rlc_start(adev);
@ -4577,12 +4633,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se2 = 0xffffffff; mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff; mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003; mqd->compute_misc_reserved = 0x00000003;
if (!(adev->flags & AMD_IS_APU)) { mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
}
eop_base_addr = ring->eop_gpu_addr >> 8; eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@ -4753,7 +4807,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring); gfx_v8_0_kiq_setting(ring);
if (adev->gfx.in_reset) { /* for GPU_RESET case */ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@ -4790,7 +4844,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr; struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@ -4802,7 +4856,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@ -4900,43 +4954,15 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU)) if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false); gfx_v8_0_enable_gui_idle_interrupt(adev, false);
if (!adev->pp_enabled) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
/* legacy firmware loading */ /* legacy firmware loading */
r = gfx_v8_0_cp_gfx_load_microcode(adev); r = gfx_v8_0_cp_gfx_load_microcode(adev);
if (r) if (r)
return r; return r;
r = gfx_v8_0_cp_compute_load_microcode(adev); r = gfx_v8_0_cp_compute_load_microcode(adev);
if (r) if (r)
return r; return r;
} else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_CE);
if (r)
return -EINVAL;
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_PFP);
if (r)
return -EINVAL;
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_ME);
if (r)
return -EINVAL;
if (adev->asic_type == CHIP_TOPAZ) {
r = gfx_v8_0_cp_compute_load_microcode(adev);
if (r)
return r;
} else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_MEC1);
if (r)
return -EINVAL;
}
}
} }
r = gfx_v8_0_cp_gfx_resume(adev); r = gfx_v8_0_cp_gfx_resume(adev);
@ -4975,12 +5001,69 @@ static int gfx_v8_0_hw_init(void *handle)
return r; return r;
} }
static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t scratch, tmp = 0;
int r, i;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(kiq_ring, 10);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
/* unmap queues */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
/* write to scratch for completion */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
amdgpu_ring_commit(kiq_ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
if (i >= adev->usec_timeout) {
DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
r = -EINVAL;
}
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
static int gfx_v8_0_hw_fini(void *handle) static int gfx_v8_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n"); pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0; return 0;
@ -5902,7 +5985,6 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
{ {
uint32_t msg_id, pp_state = 0; uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0; uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@ -5920,7 +6002,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG, PP_BLOCK_GFX_CG,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@ -5941,7 +6024,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG, PP_BLOCK_GFX_MG,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
return 0; return 0;
@ -5953,7 +6037,6 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
uint32_t msg_id, pp_state = 0; uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0; uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@ -5971,7 +6054,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG, PP_BLOCK_GFX_CG,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@ -5990,7 +6074,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D, PP_BLOCK_GFX_3D,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@ -6011,7 +6096,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG, PP_BLOCK_GFX_MG,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@ -6026,7 +6112,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC, PP_BLOCK_GFX_RLC,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@ -6040,7 +6127,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP, PP_BLOCK_GFX_CP,
pp_support_state, pp_support_state,
pp_state); pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id); if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
return 0; return 0;
@ -6307,6 +6395,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} }
static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
bool acquire)
{
struct amdgpu_device *adev = ring->adev;
int pipe_num, tmp, reg;
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
/* first me only has 2 entries, GFX and HP3D */
if (ring->me > 0)
pipe_num -= 2;
reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
tmp = RREG32(reg);
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
WREG32(reg, tmp);
}
static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
bool acquire)
{
int i, pipe;
bool reserve;
struct amdgpu_ring *iring;
mutex_lock(&adev->gfx.pipe_reserve_mutex);
pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
if (acquire)
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
else
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
/* Clear all reservations - everyone reacquires all resources */
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
true);
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
true);
} else {
/* Lower all pipes without a current reservation */
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
iring = &adev->gfx.gfx_ring[i];
pipe = amdgpu_gfx_queue_to_bit(adev,
iring->me,
iring->pipe,
0);
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
}
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
iring = &adev->gfx.compute_ring[i];
pipe = amdgpu_gfx_queue_to_bit(adev,
iring->me,
iring->pipe,
0);
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
}
}
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
}
static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
bool acquire)
{
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
uint32_t queue_priority = acquire ? 0xf : 0x0;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
enum amd_sched_priority priority)
{
struct amdgpu_device *adev = ring->adev;
bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
return;
gfx_v8_0_hqd_set_priority(adev, ring, acquire);
gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
}
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq, u64 addr, u64 seq,
unsigned flags) unsigned flags)
@ -6752,6 +6938,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib, .test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.set_priority = gfx_v8_0_ring_set_priority_compute,
}; };
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@ -6960,7 +7147,7 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{ {
uint64_t ce_payload_addr; uint64_t ce_payload_addr;
int cnt_ce; int cnt_ce;
static union { union {
struct vi_ce_ib_state regular; struct vi_ce_ib_state regular;
struct vi_ce_ib_state_chained_ib chained; struct vi_ce_ib_state_chained_ib chained;
} ce_payload = {}; } ce_payload = {};
@ -6989,7 +7176,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{ {
uint64_t de_payload_addr, gds_addr, csa_addr; uint64_t de_payload_addr, gds_addr, csa_addr;
int cnt_de; int cnt_de;
static union { union {
struct vi_de_ib_state regular; struct vi_de_ib_state regular;
struct vi_de_ib_state_chained_ib chained; struct vi_de_ib_state_chained_ib chained;
} de_payload = {}; } de_payload = {};

Просмотреть файл

@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#include <linux/kernel.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
@ -66,38 +67,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{ {
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)}, SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE), { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)}, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE), SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)} SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
}; };
static const u32 golden_settings_gc_9_0[] = static const u32 golden_settings_gc_9_0[] =
@ -352,6 +385,25 @@ err1:
return r; return r;
} }
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
release_firmware(adev->gfx.me_fw);
adev->gfx.me_fw = NULL;
release_firmware(adev->gfx.ce_fw);
adev->gfx.ce_fw = NULL;
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
release_firmware(adev->gfx.mec2_fw);
adev->gfx.mec2_fw = NULL;
kfree(adev->gfx.rlc.register_list_format);
}
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{ {
const char *chip_name; const char *chip_name;
@ -1120,30 +1172,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
int r; int r;
u32 data; u32 data, base;
u32 size;
u32 base;
if (!amdgpu_ngg) if (!amdgpu_ngg)
return 0; return 0;
/* Program buffer size */ /* Program buffer size */
data = 0; data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
size = adev->gfx.ngg.buf[NGG_PRIM].size / 256; adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size); data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
adev->gfx.ngg.buf[NGG_POS].size >> 8);
size = adev->gfx.ngg.buf[NGG_POS].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
data = 0; data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
size = adev->gfx.ngg.buf[NGG_CNTL].size / 256; adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size); data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
/* Program buffer base address */ /* Program buffer base address */
@ -1306,7 +1350,10 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i]; ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL; ring->ring_obj = NULL;
sprintf(ring->name, "gfx"); if (!i)
sprintf(ring->name, "gfx");
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true; ring->use_doorbell = true;
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
r = amdgpu_ring_init(adev, ring, 1024, r = amdgpu_ring_init(adev, ring, 1024,
@ -1346,7 +1393,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r; return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */ /* create MQD for all compute queues as wel as KIQ for SRIOV case */
r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd)); r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
if (r) if (r)
return r; return r;
@ -1398,9 +1445,11 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev); amdgpu_gfx_kiq_fini(adev);
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v9_0_mec_fini(adev); gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev); gfx_v9_0_ngg_fini(adev);
gfx_v9_0_free_microcode(adev);
return 0; return 0;
} }
@ -1682,10 +1731,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2, adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs, unique_indirect_regs,
&unique_indirect_reg_count, &unique_indirect_reg_count,
sizeof(unique_indirect_regs)/sizeof(int), ARRAY_SIZE(unique_indirect_regs),
indirect_start_offsets, indirect_start_offsets,
&indirect_start_offsets_count, &indirect_start_offsets_count,
sizeof(indirect_start_offsets)/sizeof(int)); ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */ /* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@ -1722,12 +1771,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
/* write the starting offsets to RLC scratch ram */ /* write the starting offsets to RLC scratch ram */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.starting_offsets_start); adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++) for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
indirect_start_offsets[i]); indirect_start_offsets[i]);
/* load unique indirect regs*/ /* load unique indirect regs*/
for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) { for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i, WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
unique_indirect_regs[i] & 0x3FFFF); unique_indirect_regs[i] & 0x3FFFF);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i, WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
@ -1740,11 +1789,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev) static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{ {
u32 tmp = 0; WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
} }
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
@ -1822,16 +1867,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev
uint32_t default_data = 0; uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
if (enable == true) { SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; enable ? 1 : 0);
if (default_data != data) if (default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} else {
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
} }
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
@ -1841,16 +1881,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad
uint32_t default_data = 0; uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
if (enable == true) { SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; enable ? 1 : 0);
if(default_data != data) if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} else {
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
} }
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
@ -1860,16 +1895,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
uint32_t default_data = 0; uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
if (enable == true) { CP_PG_DISABLE,
data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; enable ? 0 : 1);
if(default_data != data) if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} else {
data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
} }
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
@ -1878,10 +1908,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
uint32_t data, default_data; uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) data = REG_SET_FIELD(data, RLC_PG_CNTL,
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; GFX_POWER_GATING_ENABLE,
else enable ? 1 : 0);
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
if(default_data != data) if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} }
@ -1892,10 +1921,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
uint32_t data, default_data; uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) data = REG_SET_FIELD(data, RLC_PG_CNTL,
data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; GFX_PIPELINE_PG_ENABLE,
else enable ? 1 : 0);
data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
if(default_data != data) if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
@ -1910,10 +1938,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
uint32_t data, default_data; uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) data = REG_SET_FIELD(data, RLC_PG_CNTL,
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; STATIC_PER_CU_PG_ENABLE,
else enable ? 1 : 0);
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
if(default_data != data) if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} }
@ -1924,10 +1951,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
uint32_t data, default_data; uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) data = REG_SET_FIELD(data, RLC_PG_CNTL,
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; DYN_PER_CU_PG_ENABLE,
else enable ? 1 : 0);
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
if(default_data != data) if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} }
@ -1967,13 +1993,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{ {
u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL); WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
gfx_v9_0_enable_gui_idle_interrupt(adev, false); gfx_v9_0_enable_gui_idle_interrupt(adev, false);
gfx_v9_0_wait_for_rlc_serdes(adev); gfx_v9_0_wait_for_rlc_serdes(adev);
} }
@ -2045,8 +2066,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{ {
int r; int r;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_init_csb(adev);
return 0; return 0;
}
gfx_v9_0_rlc_stop(adev); gfx_v9_0_rlc_stop(adev);
@ -2463,6 +2486,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se3 = 0xffffffff; mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003; mqd->compute_misc_reserved = 0x00000003;
mqd->dynamic_cu_mask_addr_lo =
lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi =
upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8; eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@ -2486,10 +2516,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
DOORBELL_SOURCE, 0); DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0); DOORBELL_HIT, 0);
} } else {
else
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0); DOORBELL_EN, 0);
}
mqd->cp_hqd_pq_doorbell_control = tmp; mqd->cp_hqd_pq_doorbell_control = tmp;
@ -2692,10 +2722,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring); gfx_v9_0_kiq_setting(ring);
if (adev->gfx.in_reset) { /* for GPU_RESET case */ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */ /* reset ring buffer */
ring->wptr = 0; ring->wptr = 0;
@ -2707,7 +2737,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} else { } else {
memset((void *)mqd, 0, sizeof(*mqd)); memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring); gfx_v9_0_mqd_init(ring);
@ -2716,7 +2748,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
} }
return 0; return 0;
@ -2728,8 +2760,10 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr; struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd)); memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring); gfx_v9_0_mqd_init(ring);
@ -2737,11 +2771,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */ /* reset ring buffer */
ring->wptr = 0; ring->wptr = 0;
@ -2882,12 +2916,70 @@ static int gfx_v9_0_hw_init(void *handle)
return r; return r;
} }
static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t scratch, tmp = 0;
int r, i;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(kiq_ring, 10);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
/* unmap queues */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
/* write to scratch for completion */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
amdgpu_ring_commit(kiq_ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
if (i >= adev->usec_timeout) {
DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
r = -EINVAL;
}
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
static int gfx_v9_0_hw_fini(void *handle) static int gfx_v9_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n"); pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0; return 0;
@ -2930,15 +3022,10 @@ static bool gfx_v9_0_is_idle(void *handle)
static int gfx_v9_0_wait_for_idle(void *handle) static int gfx_v9_0_wait_for_idle(void *handle)
{ {
unsigned i; unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */ if (gfx_v9_0_is_idle(handle))
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
GRBM_STATUS__GUI_ACTIVE_MASK;
if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
return 0; return 0;
udelay(1); udelay(1);
} }
@ -3497,9 +3584,11 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{ {
u32 ref_and_mask, reg_mem_engine; u32 ref_and_mask, reg_mem_engine;
struct nbio_hdp_flush_reg *nbio_hf_reg; const struct nbio_hdp_flush_reg *nbio_hf_reg;
if (ring->adev->asic_type == CHIP_VEGA10) if (ring->adev->flags & AMD_IS_APU)
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
else
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg; nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
@ -3528,7 +3617,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{ {
gfx_v9_0_write_data_to_reg(ring, 0, true, gfx_v9_0_write_data_to_reg(ring, 0, true,
SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1); SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
} }
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@ -3718,7 +3807,7 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{ {
static struct v9_ce_ib_state ce_payload = {0}; struct v9_ce_ib_state ce_payload = {0};
uint64_t csa_addr; uint64_t csa_addr;
int cnt; int cnt;
@ -3737,7 +3826,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{ {
static struct v9_de_ib_state de_payload = {0}; struct v9_de_ib_state de_payload = {0};
uint64_t csa_addr, gds_addr; uint64_t csa_addr, gds_addr;
int cnt; int cnt;
@ -3757,6 +3846,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
} }
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{ {
uint32_t dw2 = 0; uint32_t dw2 = 0;
@ -3764,6 +3859,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev)) if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring); gfx_v9_0_ring_emit_ce_meta(ring);
gfx_v9_0_ring_emit_tmz(ring, true);
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) { if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */ /* set load_global_config & load_global_uconfig */
@ -3814,12 +3911,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur; ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
} }
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
}
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;

Просмотреть файл

@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
} }

Просмотреть файл

@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
amdgpu_vm_adjust_size(adev, 64, 4); amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL; adev->mc.mc_mask = 0xffffffffffULL;
@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle)
gmc_v6_0_gart_fini(adev); gmc_v6_0_gart_fini(adev);
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw);
adev->mc.fw = NULL;
return 0; return 0;
} }

Просмотреть файл

@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages). * Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits. * Max GPUVM size for cayman and SI is 40 bits.
*/ */
amdgpu_vm_adjust_size(adev, 64, 4); amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask /* Set the internal MC address mask
@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle)
gmc_v7_0_gart_fini(adev); gmc_v7_0_gart_fini(adev);
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw);
adev->mc.fw = NULL;
return 0; return 0;
} }

Просмотреть файл

@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages). * Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits. * Max GPUVM size for cayman and SI is 40 bits.
*/ */
amdgpu_vm_adjust_size(adev, 64, 4); amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask /* Set the internal MC address mask
@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle)
gmc_v8_0_gart_fini(adev); gmc_v8_0_gart_fini(adev);
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw);
adev->mc.fw = NULL;
return 0; return 0;
} }

Просмотреть файл

@ -32,6 +32,8 @@
#include "vega10/DC/dce_12_0_offset.h" #include "vega10/DC/dce_12_0_offset.h"
#include "vega10/DC/dce_12_0_sh_mask.h" #include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h" #include "vega10/vega10_enum.h"
#include "vega10/MMHUB/mmhub_1_0_offset.h"
#include "vega10/ATHUB/athub_1_0_offset.h"
#include "soc15_common.h" #include "soc15_common.h"
@ -71,13 +73,25 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000, 0xf6e, 0x0fffffff, 0x00000000,
}; };
static const u32 golden_settings_mmhub_1_0_0[] =
{
SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
};
static const u32 golden_settings_athub_1_0_0[] =
{
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
};
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, struct amdgpu_irq_src *src,
unsigned type, unsigned type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
struct amdgpu_vmhub *hub; struct amdgpu_vmhub *hub;
u32 tmp, reg, bits, i; u32 tmp, reg, bits, i, j;
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@ -89,43 +103,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) { switch (state) {
case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
hub = &adev->vmhub[AMDGPU_MMHUB]; hub = &adev->vmhub[j];
for (i = 0; i< 16; i++) { for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i; reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg); tmp = RREG32(reg);
tmp &= ~bits; tmp &= ~bits;
WREG32(reg, tmp); WREG32(reg, tmp);
} }
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp &= ~bits;
WREG32(reg, tmp);
} }
break; break;
case AMDGPU_IRQ_STATE_ENABLE: case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
hub = &adev->vmhub[AMDGPU_MMHUB]; hub = &adev->vmhub[j];
for (i = 0; i< 16; i++) { for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i; reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg); tmp = RREG32(reg);
tmp |= bits; tmp |= bits;
WREG32(reg, tmp); WREG32(reg, tmp);
}
} }
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp |= bits;
WREG32(reg, tmp);
}
break;
default: default:
break; break;
} }
@ -682,8 +679,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
amdgpu_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
(const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
amdgpu_program_register_sequence(adev,
golden_settings_athub_1_0_0,
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
amdgpu_program_register_sequence(adev,
golden_settings_athub_1_0_0,
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break; break;
default: default:
break; break;
@ -713,12 +719,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
/* After HDP is initialized, flush HDP.*/
if (adev->flags & AMD_IS_APU)
nbio_v7_0_hdp_flush(adev);
else
nbio_v6_1_hdp_flush(adev);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_RAVEN: case CHIP_RAVEN:
mmhub_v1_0_initialize_power_gating(adev); mmhub_v1_0_initialize_power_gating(adev);
@ -736,13 +736,16 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/
if (adev->flags & AMD_IS_APU)
nbio_v7_0_hdp_flush(adev);
else
nbio_v6_1_hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false; value = false;
@ -751,7 +754,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value); gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value); mmhub_v1_0_set_fault_enable_default(adev, value);
gmc_v9_0_gart_flush_gpu_tlb(adev, 0); gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@ -770,17 +772,11 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev); gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) { if (adev->mode_info.num_crtc) {
u32 tmp;
/* Lockout access through VGA aperture*/ /* Lockout access through VGA aperture*/
tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL); WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
/* disable VGA render */ /* disable VGA render */
tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL); WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
} }
r = gmc_v9_0_gart_enable(adev); r = gmc_v9_0_gart_enable(adev);
@ -822,9 +818,7 @@ static int gmc_v9_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_hw_fini(adev); return gmc_v9_0_hw_fini(adev);
return 0;
} }
static int gmc_v9_0_resume(void *handle) static int gmc_v9_0_resume(void *handle)

Просмотреть файл

@ -207,6 +207,34 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask); return (wptr & adev->irq.ih.ptr_mask);
} }
/**
* iceland_ih_prescreen_iv - prescreen an interrupt vector
*
* @adev: amdgpu_device pointer
*
* Returns true if the interrupt vector should be further processed.
*/
static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
{
u32 ring_index = adev->irq.ih.rptr >> 2;
u16 pasid;
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
case 146:
case 147:
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
return true;
break;
default:
/* Not a VM fault */
return true;
}
adev->irq.ih.rptr += 16;
return false;
}
/** /**
* iceland_ih_decode_iv - decode an interrupt vector * iceland_ih_decode_iv - decode an interrupt vector
* *
@ -412,6 +440,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = { static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr, .get_wptr = iceland_ih_get_wptr,
.prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv, .decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr .set_rptr = iceland_ih_set_rptr
}; };

Просмотреть файл

@ -42,7 +42,6 @@
#define KV_MINIMUM_ENGINE_CLOCK 800 #define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000 #define SMC_RAM_END 0x40000
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev, static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable); bool enable);
@ -64,7 +63,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp); int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev); static int kv_init_fps_limits(struct amdgpu_device *adev);
static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_uvd(void *handle, bool gate);
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@ -1245,8 +1244,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps; adev->pm.dpm.requested_ps = &pi->requested_rps;
} }
static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) static void kv_dpm_enable_bapm(void *handle, bool enable)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
int ret; int ret;
@ -1672,8 +1672,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
return kv_enable_acp_dpm(adev, !gate); return kv_enable_acp_dpm(adev, !gate);
} }
static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) static void kv_dpm_powergate_uvd(void *handle, bool gate)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
int ret; int ret;
@ -1868,10 +1869,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
return ret; return ret;
} }
static int kv_dpm_force_performance_level(struct amdgpu_device *adev, static int kv_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level) enum amd_dpm_forced_level level)
{ {
int ret; int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) { if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev); ret = kv_force_dpm_highest(adev);
@ -1892,8 +1894,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
return 0; return 0;
} }
static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) static int kv_dpm_pre_set_power_state(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps; struct amdgpu_ps *new_ps = &requested_ps;
@ -1907,8 +1910,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0; return 0;
} }
static int kv_dpm_set_power_state(struct amdgpu_device *adev) static int kv_dpm_set_power_state(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps; struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps; struct amdgpu_ps *old_ps = &pi->current_rps;
@ -1981,8 +1985,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev)
return 0; return 0;
} }
static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) static void kv_dpm_post_set_power_state(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps; struct amdgpu_ps *new_ps = &pi->requested_rps;
@ -2848,9 +2853,10 @@ static int kv_dpm_init(struct amdgpu_device *adev)
} }
static void static void
kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, kv_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m) struct seq_file *m)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
u32 current_index = u32 current_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
@ -2875,11 +2881,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
} }
static void static void
kv_dpm_print_power_state(struct amdgpu_device *adev, kv_dpm_print_power_state(void *handle, void *request_ps)
struct amdgpu_ps *rps)
{ {
int i; int i;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct kv_ps *ps = kv_get_ps(rps); struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2); amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps); amdgpu_dpm_print_cap_info(rps->caps);
@ -2905,13 +2912,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev); amdgpu_free_extended_power_table(adev);
} }
static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) static void kv_dpm_display_configuration_changed(void *handle)
{ {
} }
static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) static u32 kv_dpm_get_sclk(void *handle, bool low)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
@ -2921,18 +2929,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->levels[requested_state->num_levels - 1].sclk; return requested_state->levels[requested_state->num_levels - 1].sclk;
} }
static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) static u32 kv_dpm_get_mclk(void *handle, bool low)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
return pi->sys_info.bootup_uma_clk; return pi->sys_info.bootup_uma_clk;
} }
/* get temperature in millidegrees */ /* get temperature in millidegrees */
static int kv_dpm_get_temp(struct amdgpu_device *adev) static int kv_dpm_get_temp(void *handle)
{ {
u32 temp; u32 temp;
int actual_temp = 0; int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = RREG32_SMC(0xC0300E0C); temp = RREG32_SMC(0xC0300E0C);
@ -2950,7 +2960,6 @@ static int kv_dpm_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
kv_dpm_set_dpm_funcs(adev);
kv_dpm_set_irq_funcs(adev); kv_dpm_set_irq_funcs(adev);
return 0; return 0;
@ -2960,16 +2969,10 @@ static int kv_dpm_late_init(void *handle)
{ {
/* powerdown unused blocks for now */ /* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if (!amdgpu_dpm) if (!amdgpu_dpm)
return 0; return 0;
/* init the sysfs and debugfs files late */
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
return ret;
kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true); kv_dpm_powergate_samu(adev, true);
@ -3031,7 +3034,6 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work); flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev); kv_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
@ -3222,14 +3224,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
} }
static int kv_check_state_equal(struct amdgpu_device *adev, static int kv_check_state_equal(void *handle,
struct amdgpu_ps *cps, void *current_ps,
struct amdgpu_ps *rps, void *request_ps,
bool *equal) bool *equal)
{ {
struct kv_ps *kv_cps; struct kv_ps *kv_cps;
struct kv_ps *kv_rps; struct kv_ps *kv_rps;
int i; int i;
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL; return -EINVAL;
@ -3262,9 +3267,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev,
return 0; return 0;
} }
static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx, static int kv_dpm_read_sensor(void *handle, int idx,
void *value, int *size) void *value, int *size)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
uint32_t sclk; uint32_t sclk;
u32 pl_index = u32 pl_index =
@ -3312,7 +3318,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state, .set_powergating_state = kv_dpm_set_powergating_state,
}; };
static const struct amdgpu_dpm_funcs kv_dpm_funcs = { const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp, .get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state, .pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state, .set_power_state = &kv_dpm_set_power_state,
@ -3330,12 +3336,6 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.read_sensor = &kv_dpm_read_sensor, .read_sensor = &kv_dpm_read_sensor,
}; };
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
{
if (adev->pm.funcs == NULL)
adev->pm.funcs = &kv_dpm_funcs;
}
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
.set = kv_dpm_set_interrupt_state, .set = kv_dpm_set_interrupt_state,
.process = kv_dpm_process_interrupt, .process = kv_dpm_process_interrupt,

Просмотреть файл

@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = {
{0x135, 0x12a810}, {0x135, 0x12a810},
{0x149, 0x7a82c} {0x149, 0x7a82c}
}; };
#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0])) #define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
#define PCTL0_RENG_EXEC_END_PTR 0x151 #define PCTL0_RENG_EXEC_END_PTR 0x151
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = {
{0x1f0, 0x5000a7f6}, {0x1f0, 0x5000a7f6},
{0x1f1, 0x5000a7e4} {0x1f1, 0x5000a7e4}
}; };
#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0])) #define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
#define PCTL1_RENG_EXEC_END_PTR 0x1f1 #define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
} }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше