drm fixes for 5.7-rc1
i915: - gvt: Fix one clang warning on debug only function Use ARRAY_SIZE for coccicheck warn - Use after free fix for display global state. - Whitelisting context-local timestamp on Gen9 and two scheduler fixes with deps (Cc: stable) - Removal of write flag from sysfs files where ineffective nouveau: - HDMI/DP audio HDA fixes - display hang fix for Volta/Turing - GK20A regression fix. amdgpu: - Prevent hwmon accesses while GPU is in reset - CTF interrupt fix - Backlight fix for renoir - Fix for display sync groups - Display bandwidth validation workaround -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJe3awVAAoJEAx081l5xIa+a6QP/2KoSAxod8tFUG5Lh8e0XUQS H8lnmikPfHhngzfHdWvC9lkxfZ+MII3Bs6I6agJtqYsavy9u9ooMuYG4I3TWULZb rO8Z/lJHdOjlFnHXUxfZKg0oc1zrY2U+5IcnEFXvHV3/MPboshWohK7dh5c/LZuA tL84JUM5eIdLFphM5xtgTDE4gKFyVkdw8ndlnCSxNagYhlRNyUeP4qteqmCdilCr CP7FcVRIe+Bk7y3wtOzB43mdRJQ9vDjUvQurz8voI9WObnW4oXvEjoZUaN4KwRHL Lpd52QAS7aFT+nnIzpYkrnHPSpNk710i/SsHamOWFLS/jI9NJq7hTK3dZT3ZSyLR 5Dw0mZuu038gwk1SHmN2DtkUR8JEULppDHphh3yL0yp0Hzze6IoeUIe9XdzVHKuV tBk5AoFGNf98WzSHvOwQAchvB60gk861pt7p27q5JhO9umLNvkLI7z4jWpa2aUda hZkP+4ycslN1Q77FrYjca4yZVRvhtsuejAJbn74oSNB6UdX4pfvdnF2swhMf8//i Lyyl/s5e/qkudZYoMku2gEqFahnmDxFeyy5X2I/Doc1lfYQbdlGMS1FGT872yXW2 x8hOiMuVFNCs/RMik3HiYxZ1WjwIVeMXp7rnUOvZ4ObE4VCA0P8IVOb9MbCOScC1 QtsJN1yA0xEfSvoC+Lfh =wXjv -----END PGP SIGNATURE----- Merge tag 'drm-next-2020-06-08' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "These are the fixes from last week for the stuff merged in the merge window. It got a bunch of nouveau fixes for HDA audio on some new GPUs, some i915 and some amdpgu fixes. i915: - gvt: Fix one clang warning on debug only function - Use ARRAY_SIZE for coccicheck warning - Use after free fix for display global state. - Whitelisting context-local timestamp on Gen9 and two scheduler fixes with deps (Cc: stable) - Removal of write flag from sysfs files where ineffective nouveau: - HDMI/DP audio HDA fixes - display hang fix for Volta/Turing - GK20A regression fix. amdgpu: - Prevent hwmon accesses while GPU is in reset - CTF interrupt fix - Backlight fix for renoir - Fix for display sync groups - Display bandwidth validation workaround" * tag 'drm-next-2020-06-08' of git://anongit.freedesktop.org/drm/drm: (28 commits) drm/nouveau/kms/nv50-: clear SW state of disabled windows harder drm/nouveau: gr/gk20a: Use firmware version 0 drm/nouveau/disp/gm200-: detect and potentially disable HDA support on some SORs drm/nouveau/disp/gp100: split SOR implementation from gm200 drm/nouveau/disp: modify OR allocation policy to account for HDA requirements drm/nouveau/disp: split part of OR allocation logic into a function drm/nouveau/disp: provide hint to OR allocation about HDA requirements drm/amd/display: Revalidate bandwidth before commiting DC updates drm/amdgpu/display: use blanked rather than plane state for sync groups drm/i915/params: fix i915.fake_lmem_start module param sysfs permissions drm/i915/params: don't expose inject_probe_failure in debugfs drm/i915: Whitelist context-local timestamp in the gen9 cmdparser drm/i915: Fix global state use-after-frees with a refcount drm/i915: Check for awaits on still currently executing requests drm/i915/gt: Do not schedule normal requests immediately along virtual drm/i915: Reorder await_execution before await_request drm/nouveau/kms/gt215-: fix race with audio driver runpm drm/nouveau/disp/gm200-: fix NV_PDISP_SOR_HDMI2_CTRL(n) selection Revert "drm/amd/display: disable dcn20 abm feature for bring up" drm/amd/powerplay: ack the SMUToHost interrupt on receive V2 ...
This commit is contained in:
Коммит
107821669a
|
@ -163,6 +163,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
|
||||||
enum amd_pm_state_type pm;
|
enum amd_pm_state_type pm;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -196,6 +199,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
|
||||||
enum amd_pm_state_type state;
|
enum amd_pm_state_type state;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (strncmp("battery", buf, strlen("battery")) == 0)
|
if (strncmp("battery", buf, strlen("battery")) == 0)
|
||||||
state = POWER_STATE_TYPE_BATTERY;
|
state = POWER_STATE_TYPE_BATTERY;
|
||||||
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
|
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
|
||||||
|
@ -297,6 +303,9 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
|
||||||
enum amd_dpm_forced_level level = 0xff;
|
enum amd_dpm_forced_level level = 0xff;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -334,6 +343,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
|
||||||
enum amd_dpm_forced_level current_level = 0xff;
|
enum amd_dpm_forced_level current_level = 0xff;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (strncmp("low", buf, strlen("low")) == 0) {
|
if (strncmp("low", buf, strlen("low")) == 0) {
|
||||||
level = AMD_DPM_FORCED_LEVEL_LOW;
|
level = AMD_DPM_FORCED_LEVEL_LOW;
|
||||||
} else if (strncmp("high", buf, strlen("high")) == 0) {
|
} else if (strncmp("high", buf, strlen("high")) == 0) {
|
||||||
|
@ -433,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
|
||||||
struct pp_states_info data;
|
struct pp_states_info data;
|
||||||
int i, buf_len, ret;
|
int i, buf_len, ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -472,6 +487,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
|
||||||
enum amd_pm_state_type pm = 0;
|
enum amd_pm_state_type pm = 0;
|
||||||
int i = 0, ret = 0;
|
int i = 0, ret = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -508,6 +526,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (adev->pp_force_state_enabled)
|
if (adev->pp_force_state_enabled)
|
||||||
return amdgpu_get_pp_cur_state(dev, attr, buf);
|
return amdgpu_get_pp_cur_state(dev, attr, buf);
|
||||||
else
|
else
|
||||||
|
@ -525,6 +546,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
||||||
unsigned long idx;
|
unsigned long idx;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (strlen(buf) == 1)
|
if (strlen(buf) == 1)
|
||||||
adev->pp_force_state_enabled = false;
|
adev->pp_force_state_enabled = false;
|
||||||
else if (is_support_sw_smu(adev))
|
else if (is_support_sw_smu(adev))
|
||||||
|
@ -580,6 +604,9 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
|
||||||
char *table = NULL;
|
char *table = NULL;
|
||||||
int size, ret;
|
int size, ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -619,6 +646,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -721,6 +751,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||||
const char delimiter[3] = {' ', '\n', '\0'};
|
const char delimiter[3] = {' ', '\n', '\0'};
|
||||||
uint32_t type;
|
uint32_t type;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (count > 127)
|
if (count > 127)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -810,6 +843,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -859,6 +895,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
|
||||||
uint64_t featuremask;
|
uint64_t featuremask;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = kstrtou64(buf, 0, &featuremask);
|
ret = kstrtou64(buf, 0, &featuremask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -899,6 +938,9 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -955,6 +997,9 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1018,6 +1063,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t mask = 0;
|
uint32_t mask = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = amdgpu_read_mask(buf, count, &mask);
|
ret = amdgpu_read_mask(buf, count, &mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1049,6 +1097,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1076,6 +1127,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||||
uint32_t mask = 0;
|
uint32_t mask = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = amdgpu_read_mask(buf, count, &mask);
|
ret = amdgpu_read_mask(buf, count, &mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1107,6 +1161,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1134,6 +1191,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t mask = 0;
|
uint32_t mask = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = amdgpu_read_mask(buf, count, &mask);
|
ret = amdgpu_read_mask(buf, count, &mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1167,6 +1227,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1194,6 +1257,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t mask = 0;
|
uint32_t mask = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = amdgpu_read_mask(buf, count, &mask);
|
ret = amdgpu_read_mask(buf, count, &mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1227,6 +1293,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1254,6 +1323,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t mask = 0;
|
uint32_t mask = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = amdgpu_read_mask(buf, count, &mask);
|
ret = amdgpu_read_mask(buf, count, &mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1287,6 +1359,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1314,6 +1389,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t mask = 0;
|
uint32_t mask = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = amdgpu_read_mask(buf, count, &mask);
|
ret = amdgpu_read_mask(buf, count, &mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1347,6 +1425,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1372,6 +1453,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
long int value;
|
long int value;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = kstrtol(buf, 0, &value);
|
ret = kstrtol(buf, 0, &value);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1410,6 +1494,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1435,6 +1522,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
||||||
int ret;
|
int ret;
|
||||||
long int value;
|
long int value;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = kstrtol(buf, 0, &value);
|
ret = kstrtol(buf, 0, &value);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1493,6 +1583,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(ddev->dev);
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1528,6 +1621,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||||
long int profile_mode = 0;
|
long int profile_mode = 0;
|
||||||
const char delimiter[3] = {' ', '\n', '\0'};
|
const char delimiter[3] = {' ', '\n', '\0'};
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
tmp[0] = *(buf);
|
tmp[0] = *(buf);
|
||||||
tmp[1] = '\0';
|
tmp[1] = '\0';
|
||||||
ret = kstrtol(tmp, 0, &profile_mode);
|
ret = kstrtol(tmp, 0, &profile_mode);
|
||||||
|
@ -1587,6 +1683,9 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
int r, value, size = sizeof(value);
|
int r, value, size = sizeof(value);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(ddev->dev);
|
r = pm_runtime_get_sync(ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -1620,6 +1719,9 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
int r, value, size = sizeof(value);
|
int r, value, size = sizeof(value);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(ddev->dev);
|
r = pm_runtime_get_sync(ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -1658,6 +1760,9 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
|
||||||
uint64_t count0 = 0, count1 = 0;
|
uint64_t count0 = 0, count1 = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (adev->flags & AMD_IS_APU)
|
if (adev->flags & AMD_IS_APU)
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
|
|
||||||
|
@ -1694,6 +1799,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (adev->unique_id)
|
if (adev->unique_id)
|
||||||
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
|
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
|
||||||
|
|
||||||
|
@ -1888,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||||
int channel = to_sensor_dev_attr(attr)->index;
|
int channel = to_sensor_dev_attr(attr)->index;
|
||||||
int r, temp = 0, size = sizeof(temp);
|
int r, temp = 0, size = sizeof(temp);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (channel >= PP_TEMP_MAX)
|
if (channel >= PP_TEMP_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2019,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||||||
u32 pwm_mode = 0;
|
u32 pwm_mode = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
ret = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2050,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||||||
int err, ret;
|
int err, ret;
|
||||||
int value;
|
int value;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = kstrtoint(buf, 10, &value);
|
err = kstrtoint(buf, 10, &value);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2099,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||||
u32 value;
|
u32 value;
|
||||||
u32 pwm_mode;
|
u32 pwm_mode;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2148,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
u32 speed = 0;
|
u32 speed = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2178,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
u32 speed = 0;
|
u32 speed = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2207,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
|
||||||
u32 size = sizeof(min_rpm);
|
u32 size = sizeof(min_rpm);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2232,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
|
||||||
u32 size = sizeof(max_rpm);
|
u32 size = sizeof(max_rpm);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2256,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
u32 rpm = 0;
|
u32 rpm = 0;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2285,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||||
u32 value;
|
u32 value;
|
||||||
u32 pwm_mode;
|
u32 pwm_mode;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2331,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||||||
u32 pwm_mode = 0;
|
u32 pwm_mode = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
ret = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2363,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||||
int value;
|
int value;
|
||||||
u32 pwm_mode;
|
u32 pwm_mode;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
err = kstrtoint(buf, 10, &value);
|
err = kstrtoint(buf, 10, &value);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2403,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
|
||||||
u32 vddgfx;
|
u32 vddgfx;
|
||||||
int r, size = sizeof(vddgfx);
|
int r, size = sizeof(vddgfx);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2435,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
|
||||||
u32 vddnb;
|
u32 vddnb;
|
||||||
int r, size = sizeof(vddnb);
|
int r, size = sizeof(vddnb);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
/* only APUs have vddnb */
|
/* only APUs have vddnb */
|
||||||
if (!(adev->flags & AMD_IS_APU))
|
if (!(adev->flags & AMD_IS_APU))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -2472,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||||||
int r, size = sizeof(u32);
|
int r, size = sizeof(u32);
|
||||||
unsigned uw;
|
unsigned uw;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2508,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2537,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2567,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
u32 value;
|
u32 value;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2605,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
|
||||||
uint32_t sclk;
|
uint32_t sclk;
|
||||||
int r, size = sizeof(sclk);
|
int r, size = sizeof(sclk);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2637,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
|
||||||
uint32_t mclk;
|
uint32_t mclk;
|
||||||
int r, size = sizeof(mclk);
|
int r, size = sizeof(mclk);
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
@ -3497,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
||||||
u32 flags = 0;
|
u32 flags = 0;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (adev->in_gpu_reset)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(dev->dev);
|
r = pm_runtime_get_sync(dev->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -1356,7 +1356,7 @@ static int dm_late_init(void *handle)
|
||||||
unsigned int linear_lut[16];
|
unsigned int linear_lut[16];
|
||||||
int i;
|
int i;
|
||||||
struct dmcu *dmcu = NULL;
|
struct dmcu *dmcu = NULL;
|
||||||
bool ret = false;
|
bool ret;
|
||||||
|
|
||||||
if (!adev->dm.fw_dmcu)
|
if (!adev->dm.fw_dmcu)
|
||||||
return detect_mst_link_for_all_connectors(adev->ddev);
|
return detect_mst_link_for_all_connectors(adev->ddev);
|
||||||
|
@ -1377,13 +1377,10 @@ static int dm_late_init(void *handle)
|
||||||
*/
|
*/
|
||||||
params.min_abm_backlight = 0x28F;
|
params.min_abm_backlight = 0x28F;
|
||||||
|
|
||||||
/* todo will enable for navi10 */
|
ret = dmcu_load_iram(dmcu, params);
|
||||||
if (adev->asic_type <= CHIP_RAVEN) {
|
|
||||||
ret = dmcu_load_iram(dmcu, params);
|
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
return detect_mst_link_for_all_connectors(adev->ddev);
|
return detect_mst_link_for_all_connectors(adev->ddev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1016,9 +1016,17 @@ static void program_timing_sync(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set first pipe with plane as master */
|
/* set first unblanked pipe as master */
|
||||||
for (j = 0; j < group_size; j++) {
|
for (j = 0; j < group_size; j++) {
|
||||||
if (pipe_set[j]->plane_state) {
|
bool is_blanked;
|
||||||
|
|
||||||
|
if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
|
||||||
|
is_blanked =
|
||||||
|
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
|
||||||
|
else
|
||||||
|
is_blanked =
|
||||||
|
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
|
||||||
|
if (!is_blanked) {
|
||||||
if (j == 0)
|
if (j == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1039,9 +1047,17 @@ static void program_timing_sync(
|
||||||
status->timing_sync_info.master = false;
|
status->timing_sync_info.master = false;
|
||||||
|
|
||||||
}
|
}
|
||||||
/* remove any other pipes with plane as they have already been synced */
|
/* remove any other unblanked pipes as they have already been synced */
|
||||||
for (j = j + 1; j < group_size; j++) {
|
for (j = j + 1; j < group_size; j++) {
|
||||||
if (pipe_set[j]->plane_state) {
|
bool is_blanked;
|
||||||
|
|
||||||
|
if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
|
||||||
|
is_blanked =
|
||||||
|
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
|
||||||
|
else
|
||||||
|
is_blanked =
|
||||||
|
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
|
||||||
|
if (!is_blanked) {
|
||||||
group_size--;
|
group_size--;
|
||||||
pipe_set[j] = pipe_set[group_size];
|
pipe_set[j] = pipe_set[group_size];
|
||||||
j--;
|
j--;
|
||||||
|
@ -2522,6 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||||
|
|
||||||
copy_stream_update_to_stream(dc, context, stream, stream_update);
|
copy_stream_update_to_stream(dc, context, stream, stream_update);
|
||||||
|
|
||||||
|
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
|
||||||
|
DC_ERROR("Mode validation failed for stream update!\n");
|
||||||
|
dc_release_state(context);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
commit_planes_for_stream(
|
commit_planes_for_stream(
|
||||||
dc,
|
dc,
|
||||||
srf_updates,
|
srf_updates,
|
||||||
|
|
|
@ -1561,6 +1561,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
||||||
* events for SMCToHost interrupt.
|
* events for SMCToHost interrupt.
|
||||||
*/
|
*/
|
||||||
uint32_t ctxid = entry->src_data[0];
|
uint32_t ctxid = entry->src_data[0];
|
||||||
|
uint32_t data;
|
||||||
|
|
||||||
if (client_id == SOC15_IH_CLIENTID_THM) {
|
if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||||
switch (src_id) {
|
switch (src_id) {
|
||||||
|
@ -1590,6 +1591,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
||||||
orderly_poweroff(true);
|
orderly_poweroff(true);
|
||||||
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
|
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
|
||||||
if (src_id == 0xfe) {
|
if (src_id == 0xfe) {
|
||||||
|
/* ACK SMUToHost interrupt */
|
||||||
|
data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
|
||||||
|
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
|
||||||
|
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
|
||||||
|
|
||||||
switch (ctxid) {
|
switch (ctxid) {
|
||||||
case 0x3:
|
case 0x3:
|
||||||
dev_dbg(adev->dev, "Switched to AC mode!\n");
|
dev_dbg(adev->dev, "Switched to AC mode!\n");
|
||||||
|
|
|
@ -10,6 +10,28 @@
|
||||||
#include "intel_display_types.h"
|
#include "intel_display_types.h"
|
||||||
#include "intel_global_state.h"
|
#include "intel_global_state.h"
|
||||||
|
|
||||||
|
static void __intel_atomic_global_state_free(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct intel_global_state *obj_state =
|
||||||
|
container_of(kref, struct intel_global_state, ref);
|
||||||
|
struct intel_global_obj *obj = obj_state->obj;
|
||||||
|
|
||||||
|
obj->funcs->atomic_destroy_state(obj, obj_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
|
||||||
|
{
|
||||||
|
kref_put(&obj_state->ref, __intel_atomic_global_state_free);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct intel_global_state *
|
||||||
|
intel_atomic_global_state_get(struct intel_global_state *obj_state)
|
||||||
|
{
|
||||||
|
kref_get(&obj_state->ref);
|
||||||
|
|
||||||
|
return obj_state;
|
||||||
|
}
|
||||||
|
|
||||||
void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
|
void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
|
||||||
struct intel_global_obj *obj,
|
struct intel_global_obj *obj,
|
||||||
struct intel_global_state *state,
|
struct intel_global_state *state,
|
||||||
|
@ -17,6 +39,10 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
|
||||||
{
|
{
|
||||||
memset(obj, 0, sizeof(*obj));
|
memset(obj, 0, sizeof(*obj));
|
||||||
|
|
||||||
|
state->obj = obj;
|
||||||
|
|
||||||
|
kref_init(&state->ref);
|
||||||
|
|
||||||
obj->state = state;
|
obj->state = state;
|
||||||
obj->funcs = funcs;
|
obj->funcs = funcs;
|
||||||
list_add_tail(&obj->head, &dev_priv->global_obj_list);
|
list_add_tail(&obj->head, &dev_priv->global_obj_list);
|
||||||
|
@ -28,7 +54,9 @@ void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
|
list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
|
||||||
list_del(&obj->head);
|
list_del(&obj->head);
|
||||||
obj->funcs->atomic_destroy_state(obj, obj->state);
|
|
||||||
|
drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
|
||||||
|
intel_atomic_global_state_put(obj->state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,10 +125,14 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
|
||||||
if (!obj_state)
|
if (!obj_state)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
obj_state->obj = obj;
|
||||||
obj_state->changed = false;
|
obj_state->changed = false;
|
||||||
|
|
||||||
|
kref_init(&obj_state->ref);
|
||||||
|
|
||||||
state->global_objs[index].state = obj_state;
|
state->global_objs[index].state = obj_state;
|
||||||
state->global_objs[index].old_state = obj->state;
|
state->global_objs[index].old_state =
|
||||||
|
intel_atomic_global_state_get(obj->state);
|
||||||
state->global_objs[index].new_state = obj_state;
|
state->global_objs[index].new_state = obj_state;
|
||||||
state->global_objs[index].ptr = obj;
|
state->global_objs[index].ptr = obj;
|
||||||
obj_state->state = state;
|
obj_state->state = state;
|
||||||
|
@ -163,7 +195,9 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
|
||||||
new_obj_state->state = NULL;
|
new_obj_state->state = NULL;
|
||||||
|
|
||||||
state->global_objs[i].state = old_obj_state;
|
state->global_objs[i].state = old_obj_state;
|
||||||
obj->state = new_obj_state;
|
|
||||||
|
intel_atomic_global_state_put(obj->state);
|
||||||
|
obj->state = intel_atomic_global_state_get(new_obj_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,10 +206,9 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < state->num_global_objs; i++) {
|
for (i = 0; i < state->num_global_objs; i++) {
|
||||||
struct intel_global_obj *obj = state->global_objs[i].ptr;
|
intel_atomic_global_state_put(state->global_objs[i].old_state);
|
||||||
|
intel_atomic_global_state_put(state->global_objs[i].new_state);
|
||||||
|
|
||||||
obj->funcs->atomic_destroy_state(obj,
|
|
||||||
state->global_objs[i].state);
|
|
||||||
state->global_objs[i].ptr = NULL;
|
state->global_objs[i].ptr = NULL;
|
||||||
state->global_objs[i].state = NULL;
|
state->global_objs[i].state = NULL;
|
||||||
state->global_objs[i].old_state = NULL;
|
state->global_objs[i].old_state = NULL;
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#ifndef __INTEL_GLOBAL_STATE_H__
|
#ifndef __INTEL_GLOBAL_STATE_H__
|
||||||
#define __INTEL_GLOBAL_STATE_H__
|
#define __INTEL_GLOBAL_STATE_H__
|
||||||
|
|
||||||
|
#include <linux/kref.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
|
||||||
struct drm_i915_private;
|
struct drm_i915_private;
|
||||||
|
@ -54,7 +55,9 @@ struct intel_global_obj {
|
||||||
for_each_if(obj)
|
for_each_if(obj)
|
||||||
|
|
||||||
struct intel_global_state {
|
struct intel_global_state {
|
||||||
|
struct intel_global_obj *obj;
|
||||||
struct intel_atomic_state *state;
|
struct intel_atomic_state *state;
|
||||||
|
struct kref ref;
|
||||||
bool changed;
|
bool changed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -230,7 +230,7 @@ static void intel_context_set_gem(struct intel_context *ce,
|
||||||
ce->timeline = intel_timeline_get(ctx->timeline);
|
ce->timeline = intel_timeline_get(ctx->timeline);
|
||||||
|
|
||||||
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
|
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
|
||||||
intel_engine_has_semaphores(ce->engine))
|
intel_engine_has_timeslices(ce->engine))
|
||||||
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
|
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1969,7 +1969,7 @@ static int __apply_priority(struct intel_context *ce, void *arg)
|
||||||
{
|
{
|
||||||
struct i915_gem_context *ctx = arg;
|
struct i915_gem_context *ctx = arg;
|
||||||
|
|
||||||
if (!intel_engine_has_semaphores(ce->engine))
|
if (!intel_engine_has_timeslices(ce->engine))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
|
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
|
||||||
|
|
|
@ -39,7 +39,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
|
||||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
unsigned long last_pfn = 0; /* suppress gcc warning */
|
||||||
unsigned int max_segment = i915_sg_segment_size();
|
unsigned int max_segment = i915_sg_segment_size();
|
||||||
unsigned int sg_page_sizes;
|
unsigned int sg_page_sizes;
|
||||||
struct pagevec pvec;
|
|
||||||
gfp_t noreclaim;
|
gfp_t noreclaim;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -192,13 +191,17 @@ err_sg:
|
||||||
sg_mark_end(sg);
|
sg_mark_end(sg);
|
||||||
err_pages:
|
err_pages:
|
||||||
mapping_clear_unevictable(mapping);
|
mapping_clear_unevictable(mapping);
|
||||||
pagevec_init(&pvec);
|
if (sg != st->sgl) {
|
||||||
for_each_sgt_page(page, sgt_iter, st) {
|
struct pagevec pvec;
|
||||||
if (!pagevec_add(&pvec, page))
|
|
||||||
|
pagevec_init(&pvec);
|
||||||
|
for_each_sgt_page(page, sgt_iter, st) {
|
||||||
|
if (!pagevec_add(&pvec, page))
|
||||||
|
check_release_pagevec(&pvec);
|
||||||
|
}
|
||||||
|
if (pagevec_count(&pvec))
|
||||||
check_release_pagevec(&pvec);
|
check_release_pagevec(&pvec);
|
||||||
}
|
}
|
||||||
if (pagevec_count(&pvec))
|
|
||||||
check_release_pagevec(&pvec);
|
|
||||||
sg_free_table(st);
|
sg_free_table(st);
|
||||||
kfree(st);
|
kfree(st);
|
||||||
|
|
||||||
|
|
|
@ -97,8 +97,6 @@ int __intel_context_do_pin(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
GEM_BUG_ON(intel_context_is_closed(ce));
|
|
||||||
|
|
||||||
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
||||||
err = intel_context_alloc_state(ce);
|
err = intel_context_alloc_state(ce);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -124,7 +124,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
||||||
*/
|
*/
|
||||||
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
|
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
|
||||||
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
|
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
|
||||||
num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
|
num_types = ARRAY_SIZE(vgpu_types);
|
||||||
|
|
||||||
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
|
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
|
@ -572,6 +572,9 @@ struct drm_i915_reg_descriptor {
|
||||||
#define REG32(_reg, ...) \
|
#define REG32(_reg, ...) \
|
||||||
{ .addr = (_reg), __VA_ARGS__ }
|
{ .addr = (_reg), __VA_ARGS__ }
|
||||||
|
|
||||||
|
#define REG32_IDX(_reg, idx) \
|
||||||
|
{ .addr = _reg(idx) }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro for adding 64-bit registers.
|
* Convenience macro for adding 64-bit registers.
|
||||||
*
|
*
|
||||||
|
@ -669,6 +672,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
|
||||||
REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
|
REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
|
||||||
REG32(BCS_SWCTRL),
|
REG32(BCS_SWCTRL),
|
||||||
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
|
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
|
||||||
|
REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE),
|
||||||
REG64_IDX(BCS_GPR, 0),
|
REG64_IDX(BCS_GPR, 0),
|
||||||
REG64_IDX(BCS_GPR, 1),
|
REG64_IDX(BCS_GPR, 1),
|
||||||
REG64_IDX(BCS_GPR, 2),
|
REG64_IDX(BCS_GPR, 2),
|
||||||
|
|
|
@ -173,7 +173,7 @@ i915_param_named(enable_gvt, bool, 0400,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
|
#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
|
||||||
i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
|
i915_param_named_unsafe(fake_lmem_start, ulong, 0400,
|
||||||
"Fake LMEM start offset (default: 0)");
|
"Fake LMEM start offset (default: 0)");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ struct drm_printer;
|
||||||
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
|
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
|
||||||
param(int, edp_vswing, 0, 0400) \
|
param(int, edp_vswing, 0, 0400) \
|
||||||
param(unsigned int, reset, 3, 0600) \
|
param(unsigned int, reset, 3, 0600) \
|
||||||
param(unsigned int, inject_probe_failure, 0, 0600) \
|
param(unsigned int, inject_probe_failure, 0, 0) \
|
||||||
param(int, fastboot, -1, 0600) \
|
param(int, fastboot, -1, 0600) \
|
||||||
param(int, enable_dpcd_backlight, -1, 0600) \
|
param(int, enable_dpcd_backlight, -1, 0600) \
|
||||||
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
|
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
|
||||||
|
|
|
@ -121,8 +121,39 @@ static void i915_fence_release(struct dma_fence *fence)
|
||||||
i915_sw_fence_fini(&rq->submit);
|
i915_sw_fence_fini(&rq->submit);
|
||||||
i915_sw_fence_fini(&rq->semaphore);
|
i915_sw_fence_fini(&rq->semaphore);
|
||||||
|
|
||||||
/* Keep one request on each engine for reserved use under mempressure */
|
/*
|
||||||
if (!cmpxchg(&rq->engine->request_pool, NULL, rq))
|
* Keep one request on each engine for reserved use under mempressure
|
||||||
|
*
|
||||||
|
* We do not hold a reference to the engine here and so have to be
|
||||||
|
* very careful in what rq->engine we poke. The virtual engine is
|
||||||
|
* referenced via the rq->context and we released that ref during
|
||||||
|
* i915_request_retire(), ergo we must not dereference a virtual
|
||||||
|
* engine here. Not that we would want to, as the only consumer of
|
||||||
|
* the reserved engine->request_pool is the power management parking,
|
||||||
|
* which must-not-fail, and that is only run on the physical engines.
|
||||||
|
*
|
||||||
|
* Since the request must have been executed to be have completed,
|
||||||
|
* we know that it will have been processed by the HW and will
|
||||||
|
* not be unsubmitted again, so rq->engine and rq->execution_mask
|
||||||
|
* at this point is stable. rq->execution_mask will be a single
|
||||||
|
* bit if the last and _only_ engine it could execution on was a
|
||||||
|
* physical engine, if it's multiple bits then it started on and
|
||||||
|
* could still be on a virtual engine. Thus if the mask is not a
|
||||||
|
* power-of-two we assume that rq->engine may still be a virtual
|
||||||
|
* engine and so a dangling invalid pointer that we cannot dereference
|
||||||
|
*
|
||||||
|
* For example, consider the flow of a bonded request through a virtual
|
||||||
|
* engine. The request is created with a wide engine mask (all engines
|
||||||
|
* that we might execute on). On processing the bond, the request mask
|
||||||
|
* is reduced to one or more engines. If the request is subsequently
|
||||||
|
* bound to a single engine, it will then be constrained to only
|
||||||
|
* execute on that engine and never returned to the virtual engine
|
||||||
|
* after timeslicing away, see __unwind_incomplete_requests(). Thus we
|
||||||
|
* know that if the rq->execution_mask is a single bit, rq->engine
|
||||||
|
* can be a physical engine with the exact corresponding mask.
|
||||||
|
*/
|
||||||
|
if (is_power_of_2(rq->execution_mask) &&
|
||||||
|
!cmpxchg(&rq->engine->request_pool, NULL, rq))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kmem_cache_free(global.slab_requests, rq);
|
kmem_cache_free(global.slab_requests, rq);
|
||||||
|
@ -326,6 +357,53 @@ void i915_request_retire_upto(struct i915_request *rq)
|
||||||
} while (i915_request_retire(tmp) && tmp != rq);
|
} while (i915_request_retire(tmp) && tmp != rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct i915_request * const *
|
||||||
|
__engine_active(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
return READ_ONCE(engine->execlists.active);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool __request_in_flight(const struct i915_request *signal)
|
||||||
|
{
|
||||||
|
struct i915_request * const *port, *rq;
|
||||||
|
bool inflight = false;
|
||||||
|
|
||||||
|
if (!i915_request_is_ready(signal))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even if we have unwound the request, it may still be on
|
||||||
|
* the GPU (preempt-to-busy). If that request is inside an
|
||||||
|
* unpreemptible critical section, it will not be removed. Some
|
||||||
|
* GPU functions may even be stuck waiting for the paired request
|
||||||
|
* (__await_execution) to be submitted and cannot be preempted
|
||||||
|
* until the bond is executing.
|
||||||
|
*
|
||||||
|
* As we know that there are always preemption points between
|
||||||
|
* requests, we know that only the currently executing request
|
||||||
|
* may be still active even though we have cleared the flag.
|
||||||
|
* However, we can't rely on our tracking of ELSP[0] to known
|
||||||
|
* which request is currently active and so maybe stuck, as
|
||||||
|
* the tracking maybe an event behind. Instead assume that
|
||||||
|
* if the context is still inflight, then it is still active
|
||||||
|
* even if the active flag has been cleared.
|
||||||
|
*/
|
||||||
|
if (!intel_context_inflight(signal->context))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for (port = __engine_active(signal->engine); (rq = *port); port++) {
|
||||||
|
if (rq->context == signal->context) {
|
||||||
|
inflight = i915_seqno_passed(rq->fence.seqno,
|
||||||
|
signal->fence.seqno);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return inflight;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
__await_execution(struct i915_request *rq,
|
__await_execution(struct i915_request *rq,
|
||||||
struct i915_request *signal,
|
struct i915_request *signal,
|
||||||
|
@ -356,7 +434,7 @@ __await_execution(struct i915_request *rq,
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&signal->lock);
|
spin_lock_irq(&signal->lock);
|
||||||
if (i915_request_is_active(signal)) {
|
if (i915_request_is_active(signal) || __request_in_flight(signal)) {
|
||||||
if (hook) {
|
if (hook) {
|
||||||
hook(rq, &signal->fence);
|
hook(rq, &signal->fence);
|
||||||
i915_request_put(signal);
|
i915_request_put(signal);
|
||||||
|
@ -1022,148 +1100,6 @@ await_fence:
|
||||||
I915_FENCE_GFP);
|
I915_FENCE_GFP);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
i915_request_await_request(struct i915_request *to, struct i915_request *from)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
GEM_BUG_ON(to == from);
|
|
||||||
GEM_BUG_ON(to->timeline == from->timeline);
|
|
||||||
|
|
||||||
if (i915_request_completed(from)) {
|
|
||||||
i915_sw_fence_set_error_once(&to->submit, from->fence.error);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (to->engine->schedule) {
|
|
||||||
ret = i915_sched_node_add_dependency(&to->sched,
|
|
||||||
&from->sched,
|
|
||||||
I915_DEPENDENCY_EXTERNAL);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (to->engine == from->engine)
|
|
||||||
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
|
|
||||||
&from->submit,
|
|
||||||
I915_FENCE_GFP);
|
|
||||||
else
|
|
||||||
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mark_external(struct i915_request *rq)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The downside of using semaphores is that we lose metadata passing
|
|
||||||
* along the signaling chain. This is particularly nasty when we
|
|
||||||
* need to pass along a fatal error such as EFAULT or EDEADLK. For
|
|
||||||
* fatal errors we want to scrub the request before it is executed,
|
|
||||||
* which means that we cannot preload the request onto HW and have
|
|
||||||
* it wait upon a semaphore.
|
|
||||||
*/
|
|
||||||
rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
|
|
||||||
{
|
|
||||||
mark_external(rq);
|
|
||||||
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
|
|
||||||
i915_fence_context_timeout(rq->i915,
|
|
||||||
fence->context),
|
|
||||||
I915_FENCE_GFP);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
|
|
||||||
{
|
|
||||||
struct dma_fence *iter;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (!to_dma_fence_chain(fence))
|
|
||||||
return __i915_request_await_external(rq, fence);
|
|
||||||
|
|
||||||
dma_fence_chain_for_each(iter, fence) {
|
|
||||||
struct dma_fence_chain *chain = to_dma_fence_chain(iter);
|
|
||||||
|
|
||||||
if (!dma_fence_is_i915(chain->fence)) {
|
|
||||||
err = __i915_request_await_external(rq, iter);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = i915_request_await_dma_fence(rq, chain->fence);
|
|
||||||
if (err < 0)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_fence_put(iter);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
|
|
||||||
{
|
|
||||||
struct dma_fence **child = &fence;
|
|
||||||
unsigned int nchild = 1;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that if the fence-array was created in signal-on-any mode,
|
|
||||||
* we should *not* decompose it into its individual fences. However,
|
|
||||||
* we don't currently store which mode the fence-array is operating
|
|
||||||
* in. Fortunately, the only user of signal-on-any is private to
|
|
||||||
* amdgpu and we should not see any incoming fence-array from
|
|
||||||
* sync-file being in signal-on-any mode.
|
|
||||||
*/
|
|
||||||
if (dma_fence_is_array(fence)) {
|
|
||||||
struct dma_fence_array *array = to_dma_fence_array(fence);
|
|
||||||
|
|
||||||
child = array->fences;
|
|
||||||
nchild = array->num_fences;
|
|
||||||
GEM_BUG_ON(!nchild);
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
|
||||||
fence = *child++;
|
|
||||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
|
||||||
i915_sw_fence_set_error_once(&rq->submit, fence->error);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Requests on the same timeline are explicitly ordered, along
|
|
||||||
* with their dependencies, by i915_request_add() which ensures
|
|
||||||
* that requests are submitted in-order through each ring.
|
|
||||||
*/
|
|
||||||
if (fence->context == rq->fence.context)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Squash repeated waits to the same timelines */
|
|
||||||
if (fence->context &&
|
|
||||||
intel_timeline_sync_is_later(i915_request_timeline(rq),
|
|
||||||
fence))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (dma_fence_is_i915(fence))
|
|
||||||
ret = i915_request_await_request(rq, to_request(fence));
|
|
||||||
else
|
|
||||||
ret = i915_request_await_external(rq, fence);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Record the latest fence used against each timeline */
|
|
||||||
if (fence->context)
|
|
||||||
intel_timeline_sync_set(i915_request_timeline(rq),
|
|
||||||
fence);
|
|
||||||
} while (--nchild);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
|
static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
|
||||||
struct dma_fence *fence)
|
struct dma_fence *fence)
|
||||||
{
|
{
|
||||||
|
@ -1251,6 +1187,55 @@ __i915_request_await_execution(struct i915_request *to,
|
||||||
&from->fence);
|
&from->fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mark_external(struct i915_request *rq)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The downside of using semaphores is that we lose metadata passing
|
||||||
|
* along the signaling chain. This is particularly nasty when we
|
||||||
|
* need to pass along a fatal error such as EFAULT or EDEADLK. For
|
||||||
|
* fatal errors we want to scrub the request before it is executed,
|
||||||
|
* which means that we cannot preload the request onto HW and have
|
||||||
|
* it wait upon a semaphore.
|
||||||
|
*/
|
||||||
|
rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
mark_external(rq);
|
||||||
|
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
|
||||||
|
i915_fence_context_timeout(rq->i915,
|
||||||
|
fence->context),
|
||||||
|
I915_FENCE_GFP);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
struct dma_fence *iter;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (!to_dma_fence_chain(fence))
|
||||||
|
return __i915_request_await_external(rq, fence);
|
||||||
|
|
||||||
|
dma_fence_chain_for_each(iter, fence) {
|
||||||
|
struct dma_fence_chain *chain = to_dma_fence_chain(iter);
|
||||||
|
|
||||||
|
if (!dma_fence_is_i915(chain->fence)) {
|
||||||
|
err = __i915_request_await_external(rq, iter);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = i915_request_await_dma_fence(rq, chain->fence);
|
||||||
|
if (err < 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_fence_put(iter);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
i915_request_await_execution(struct i915_request *rq,
|
i915_request_await_execution(struct i915_request *rq,
|
||||||
struct dma_fence *fence,
|
struct dma_fence *fence,
|
||||||
|
@ -1299,6 +1284,116 @@ i915_request_await_execution(struct i915_request *rq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
await_request_submit(struct i915_request *to, struct i915_request *from)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If we are waiting on a virtual engine, then it may be
|
||||||
|
* constrained to execute on a single engine *prior* to submission.
|
||||||
|
* When it is submitted, it will be first submitted to the virtual
|
||||||
|
* engine and then passed to the physical engine. We cannot allow
|
||||||
|
* the waiter to be submitted immediately to the physical engine
|
||||||
|
* as it may then bypass the virtual request.
|
||||||
|
*/
|
||||||
|
if (to->engine == READ_ONCE(from->engine))
|
||||||
|
return i915_sw_fence_await_sw_fence_gfp(&to->submit,
|
||||||
|
&from->submit,
|
||||||
|
I915_FENCE_GFP);
|
||||||
|
else
|
||||||
|
return __i915_request_await_execution(to, from, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
i915_request_await_request(struct i915_request *to, struct i915_request *from)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
GEM_BUG_ON(to == from);
|
||||||
|
GEM_BUG_ON(to->timeline == from->timeline);
|
||||||
|
|
||||||
|
if (i915_request_completed(from)) {
|
||||||
|
i915_sw_fence_set_error_once(&to->submit, from->fence.error);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (to->engine->schedule) {
|
||||||
|
ret = i915_sched_node_add_dependency(&to->sched,
|
||||||
|
&from->sched,
|
||||||
|
I915_DEPENDENCY_EXTERNAL);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
|
||||||
|
ret = await_request_submit(to, from);
|
||||||
|
else
|
||||||
|
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
struct dma_fence **child = &fence;
|
||||||
|
unsigned int nchild = 1;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that if the fence-array was created in signal-on-any mode,
|
||||||
|
* we should *not* decompose it into its individual fences. However,
|
||||||
|
* we don't currently store which mode the fence-array is operating
|
||||||
|
* in. Fortunately, the only user of signal-on-any is private to
|
||||||
|
* amdgpu and we should not see any incoming fence-array from
|
||||||
|
* sync-file being in signal-on-any mode.
|
||||||
|
*/
|
||||||
|
if (dma_fence_is_array(fence)) {
|
||||||
|
struct dma_fence_array *array = to_dma_fence_array(fence);
|
||||||
|
|
||||||
|
child = array->fences;
|
||||||
|
nchild = array->num_fences;
|
||||||
|
GEM_BUG_ON(!nchild);
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
fence = *child++;
|
||||||
|
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||||
|
i915_sw_fence_set_error_once(&rq->submit, fence->error);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Requests on the same timeline are explicitly ordered, along
|
||||||
|
* with their dependencies, by i915_request_add() which ensures
|
||||||
|
* that requests are submitted in-order through each ring.
|
||||||
|
*/
|
||||||
|
if (fence->context == rq->fence.context)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Squash repeated waits to the same timelines */
|
||||||
|
if (fence->context &&
|
||||||
|
intel_timeline_sync_is_later(i915_request_timeline(rq),
|
||||||
|
fence))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (dma_fence_is_i915(fence))
|
||||||
|
ret = i915_request_await_request(rq, to_request(fence));
|
||||||
|
else
|
||||||
|
ret = i915_request_await_external(rq, fence);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* Record the latest fence used against each timeline */
|
||||||
|
if (fence->context)
|
||||||
|
intel_timeline_sync_set(i915_request_timeline(rq),
|
||||||
|
fence);
|
||||||
|
} while (--nchild);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_request_await_object - set this request to (async) wait upon a bo
|
* i915_request_await_object - set this request to (async) wait upon a bo
|
||||||
* @to: request we are wishing to use
|
* @to: request we are wishing to use
|
||||||
|
|
|
@ -209,14 +209,6 @@ static void kick_submission(struct intel_engine_cs *engine,
|
||||||
if (!inflight)
|
if (!inflight)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
ENGINE_TRACE(engine,
|
|
||||||
"bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
|
|
||||||
prio,
|
|
||||||
rq->fence.context, rq->fence.seqno,
|
|
||||||
inflight->fence.context, inflight->fence.seqno,
|
|
||||||
inflight->sched.attr.priority);
|
|
||||||
engine->execlists.queue_priority_hint = prio;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are already the currently executing context, don't
|
* If we are already the currently executing context, don't
|
||||||
* bother evaluating if we should preempt ourselves.
|
* bother evaluating if we should preempt ourselves.
|
||||||
|
@ -224,6 +216,14 @@ static void kick_submission(struct intel_engine_cs *engine,
|
||||||
if (inflight->context == rq->context)
|
if (inflight->context == rq->context)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
ENGINE_TRACE(engine,
|
||||||
|
"bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
|
||||||
|
prio,
|
||||||
|
rq->fence.context, rq->fence.seqno,
|
||||||
|
inflight->fence.context, inflight->fence.seqno,
|
||||||
|
inflight->sched.attr.priority);
|
||||||
|
|
||||||
|
engine->execlists.queue_priority_hint = prio;
|
||||||
if (need_preempt(prio, rq_prio(inflight)))
|
if (need_preempt(prio, rq_prio(inflight)))
|
||||||
tasklet_hi_schedule(&engine->execlists.tasklet);
|
tasklet_hi_schedule(&engine->execlists.tasklet);
|
||||||
|
|
||||||
|
|
|
@ -277,7 +277,7 @@ nv50_outp_release(struct nouveau_encoder *nv_encoder)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
|
nv50_outp_acquire(struct nouveau_encoder *nv_encoder, bool hda)
|
||||||
{
|
{
|
||||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||||
struct nv50_disp *disp = nv50_disp(drm->dev);
|
struct nv50_disp *disp = nv50_disp(drm->dev);
|
||||||
|
@ -289,6 +289,7 @@ nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
|
||||||
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
|
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
|
||||||
.base.hasht = nv_encoder->dcb->hasht,
|
.base.hasht = nv_encoder->dcb->hasht,
|
||||||
.base.hashm = nv_encoder->dcb->hashm,
|
.base.hashm = nv_encoder->dcb->hashm,
|
||||||
|
.info.hda = hda,
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -393,7 +394,7 @@ nv50_dac_enable(struct drm_encoder *encoder)
|
||||||
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
||||||
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
||||||
|
|
||||||
nv50_outp_acquire(nv_encoder);
|
nv50_outp_acquire(nv_encoder, false);
|
||||||
|
|
||||||
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
|
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
|
||||||
asyh->or.depth = 0;
|
asyh->or.depth = 0;
|
||||||
|
@ -510,7 +511,7 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
|
||||||
if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
|
if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
|
||||||
nv_crtc->index != dev_id)
|
nv_crtc->index != dev_id)
|
||||||
continue;
|
continue;
|
||||||
*enabled = drm_detect_monitor_audio(nv_connector->edid);
|
*enabled = nv_encoder->audio;
|
||||||
if (*enabled) {
|
if (*enabled) {
|
||||||
ret = drm_eld_size(nv_connector->base.eld);
|
ret = drm_eld_size(nv_connector->base.eld);
|
||||||
memcpy(buf, nv_connector->base.eld,
|
memcpy(buf, nv_connector->base.eld,
|
||||||
|
@ -600,6 +601,7 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
||||||
(0x0100 << nv_crtc->index),
|
(0x0100 << nv_crtc->index),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
nv_encoder->audio = false;
|
||||||
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
||||||
|
|
||||||
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
||||||
|
@ -636,6 +638,7 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
|
||||||
|
|
||||||
nvif_mthd(&disp->disp->object, 0, &args,
|
nvif_mthd(&disp->disp->object, 0, &args,
|
||||||
sizeof(args.base) + drm_eld_size(args.data));
|
sizeof(args.base) + drm_eld_size(args.data));
|
||||||
|
nv_encoder->audio = true;
|
||||||
|
|
||||||
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
||||||
nv_crtc->index);
|
nv_crtc->index);
|
||||||
|
@ -966,7 +969,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
|
||||||
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
|
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
|
||||||
|
|
||||||
if (!mstm->links++)
|
if (!mstm->links++)
|
||||||
nv50_outp_acquire(mstm->outp);
|
nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
|
||||||
|
|
||||||
if (mstm->outp->link & 1)
|
if (mstm->outp->link & 1)
|
||||||
proto = 0x8;
|
proto = 0x8;
|
||||||
|
@ -1560,12 +1563,18 @@ nv50_sor_enable(struct drm_encoder *encoder)
|
||||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||||
struct nouveau_connector *nv_connector;
|
struct nouveau_connector *nv_connector;
|
||||||
struct nvbios *bios = &drm->vbios;
|
struct nvbios *bios = &drm->vbios;
|
||||||
|
bool hda = false;
|
||||||
u8 proto = 0xf;
|
u8 proto = 0xf;
|
||||||
u8 depth = 0x0;
|
u8 depth = 0x0;
|
||||||
|
|
||||||
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
||||||
nv_encoder->crtc = encoder->crtc;
|
nv_encoder->crtc = encoder->crtc;
|
||||||
nv50_outp_acquire(nv_encoder);
|
|
||||||
|
if ((disp->disp->object.oclass == GT214_DISP ||
|
||||||
|
disp->disp->object.oclass >= GF110_DISP) &&
|
||||||
|
drm_detect_monitor_audio(nv_connector->edid))
|
||||||
|
hda = true;
|
||||||
|
nv50_outp_acquire(nv_encoder, hda);
|
||||||
|
|
||||||
switch (nv_encoder->dcb->type) {
|
switch (nv_encoder->dcb->type) {
|
||||||
case DCB_OUTPUT_TMDS:
|
case DCB_OUTPUT_TMDS:
|
||||||
|
@ -1775,7 +1784,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
|
||||||
u8 owner = 1 << nv_crtc->index;
|
u8 owner = 1 << nv_crtc->index;
|
||||||
u8 proto;
|
u8 proto;
|
||||||
|
|
||||||
nv50_outp_acquire(nv_encoder);
|
nv50_outp_acquire(nv_encoder, false);
|
||||||
|
|
||||||
switch (asyh->or.bpc) {
|
switch (asyh->or.bpc) {
|
||||||
case 10: asyh->or.depth = 0x6; break;
|
case 10: asyh->or.depth = 0x6; break;
|
||||||
|
|
|
@ -192,6 +192,8 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
|
||||||
wndw->func->release(wndw, asyw, asyh);
|
wndw->func->release(wndw, asyw, asyh);
|
||||||
asyw->ntfy.handle = 0;
|
asyw->ntfy.handle = 0;
|
||||||
asyw->sema.handle = 0;
|
asyw->sema.handle = 0;
|
||||||
|
asyw->xlut.handle = 0;
|
||||||
|
memset(asyw->image.handle, 0x00, sizeof(asyw->image.handle));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -519,7 +521,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
|
||||||
return PTR_ERR(ctxdma);
|
return PTR_ERR(ctxdma);
|
||||||
}
|
}
|
||||||
|
|
||||||
asyw->image.handle[0] = ctxdma->object.handle;
|
if (asyw->visible)
|
||||||
|
asyw->image.handle[0] = ctxdma->object.handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
|
asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
|
||||||
|
|
|
@ -46,7 +46,8 @@ struct nv50_disp_acquire_v0 {
|
||||||
__u8 version;
|
__u8 version;
|
||||||
__u8 or;
|
__u8 or;
|
||||||
__u8 link;
|
__u8 link;
|
||||||
__u8 pad03[5];
|
__u8 hda;
|
||||||
|
__u8 pad04[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nv50_disp_dac_load_v0 {
|
struct nv50_disp_dac_load_v0 {
|
||||||
|
|
|
@ -52,6 +52,7 @@ struct nouveau_encoder {
|
||||||
* actually programmed on the hw, not the proposed crtc */
|
* actually programmed on the hw, not the proposed crtc */
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
u32 ctrl;
|
u32 ctrl;
|
||||||
|
bool audio;
|
||||||
|
|
||||||
struct drm_display_mode mode;
|
struct drm_display_mode mode;
|
||||||
int last_dpms;
|
int last_dpms;
|
||||||
|
|
|
@ -39,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgf119.o
|
||||||
nvkm-y += nvkm/engine/disp/sorgk104.o
|
nvkm-y += nvkm/engine/disp/sorgk104.o
|
||||||
nvkm-y += nvkm/engine/disp/sorgm107.o
|
nvkm-y += nvkm/engine/disp/sorgm107.o
|
||||||
nvkm-y += nvkm/engine/disp/sorgm200.o
|
nvkm-y += nvkm/engine/disp/sorgm200.o
|
||||||
|
nvkm-y += nvkm/engine/disp/sorgp100.o
|
||||||
nvkm-y += nvkm/engine/disp/sorgv100.o
|
nvkm-y += nvkm/engine/disp/sorgv100.o
|
||||||
nvkm-y += nvkm/engine/disp/sortu102.o
|
nvkm-y += nvkm/engine/disp/sortu102.o
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ gp100_disp = {
|
||||||
.super = gf119_disp_super,
|
.super = gf119_disp_super,
|
||||||
.root = &gp100_disp_root_oclass,
|
.root = &gp100_disp_root_oclass,
|
||||||
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
|
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
|
||||||
.sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
|
.sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
|
||||||
};
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -63,7 +63,7 @@ gp102_disp = {
|
||||||
.super = gf119_disp_super,
|
.super = gf119_disp_super,
|
||||||
.root = &gp102_disp_root_oclass,
|
.root = &gp102_disp_root_oclass,
|
||||||
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
|
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
|
||||||
.sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
|
.sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
|
||||||
};
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -27,10 +27,10 @@ void
|
||||||
gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
|
gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
|
||||||
{
|
{
|
||||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||||
const u32 hoff = head * 0x800;
|
const u32 soff = nv50_ior_base(ior);
|
||||||
const u32 ctrl = scdc & 0x3;
|
const u32 ctrl = scdc & 0x3;
|
||||||
|
|
||||||
nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl);
|
nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
|
||||||
|
|
||||||
ior->tmds.high_speed = !!(scdc & 0x2);
|
ior->tmds.high_speed = !!(scdc & 0x2);
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,6 +201,7 @@ int gf119_sor_new(struct nvkm_disp *, int);
|
||||||
int gk104_sor_new(struct nvkm_disp *, int);
|
int gk104_sor_new(struct nvkm_disp *, int);
|
||||||
int gm107_sor_new(struct nvkm_disp *, int);
|
int gm107_sor_new(struct nvkm_disp *, int);
|
||||||
int gm200_sor_new(struct nvkm_disp *, int);
|
int gm200_sor_new(struct nvkm_disp *, int);
|
||||||
|
int gp100_sor_new(struct nvkm_disp *, int);
|
||||||
|
|
||||||
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
|
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
|
||||||
int gv100_sor_new(struct nvkm_disp *, int);
|
int gv100_sor_new(struct nvkm_disp *, int);
|
||||||
|
|
|
@ -111,8 +111,44 @@ nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
|
||||||
|
u8 user, bool hda)
|
||||||
|
{
|
||||||
|
struct nvkm_ior *ior;
|
||||||
|
|
||||||
|
/* First preference is to reuse the OR that is currently armed
|
||||||
|
* on HW, if any, in order to prevent unnecessary switching.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(ior, &outp->disp->ior, head) {
|
||||||
|
if (!ior->identity && !!ior->func->hda.hpd == hda &&
|
||||||
|
!ior->asy.outp && ior->arm.outp == outp)
|
||||||
|
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Failing that, a completely unused OR is the next best thing. */
|
||||||
|
list_for_each_entry(ior, &outp->disp->ior, head) {
|
||||||
|
if (!ior->identity && !!ior->func->hda.hpd == hda &&
|
||||||
|
!ior->asy.outp && ior->type == type && !ior->arm.outp &&
|
||||||
|
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
|
||||||
|
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Last resort is to assign an OR that's already active on HW,
|
||||||
|
* but will be released during the next modeset.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(ior, &outp->disp->ior, head) {
|
||||||
|
if (!ior->identity && !!ior->func->hda.hpd == hda &&
|
||||||
|
!ior->asy.outp && ior->type == type &&
|
||||||
|
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
|
||||||
|
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENOSPC;
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
|
nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
|
||||||
{
|
{
|
||||||
struct nvkm_ior *ior = outp->ior;
|
struct nvkm_ior *ior = outp->ior;
|
||||||
enum nvkm_ior_proto proto;
|
enum nvkm_ior_proto proto;
|
||||||
|
@ -137,32 +173,25 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
|
||||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* First preference is to reuse the OR that is currently armed
|
/* If we don't need HDA, first try to acquire an OR that doesn't
|
||||||
* on HW, if any, in order to prevent unnecessary switching.
|
* support it to leave free the ones that do.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(ior, &outp->disp->ior, head) {
|
if (!hda) {
|
||||||
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
|
if (!nvkm_outp_acquire_hda(outp, type, user, false))
|
||||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
return 0;
|
||||||
|
|
||||||
|
/* Use a HDA-supporting SOR anyway. */
|
||||||
|
return nvkm_outp_acquire_hda(outp, type, user, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Failing that, a completely unused OR is the next best thing. */
|
/* We want HDA, try to acquire an OR that supports it. */
|
||||||
list_for_each_entry(ior, &outp->disp->ior, head) {
|
if (!nvkm_outp_acquire_hda(outp, type, user, true))
|
||||||
if (!ior->identity &&
|
return 0;
|
||||||
!ior->asy.outp && ior->type == type && !ior->arm.outp &&
|
|
||||||
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
|
|
||||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Last resort is to assign an OR that's already active on HW,
|
/* There weren't any free ORs that support HDA, grab one that
|
||||||
* but will be released during the next modeset.
|
* doesn't and at least allow display to work still.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(ior, &outp->disp->ior, head) {
|
return nvkm_outp_acquire_hda(outp, type, user, false);
|
||||||
if (!ior->identity && !ior->asy.outp && ior->type == type &&
|
|
||||||
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
|
|
||||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
|
||||||
}
|
|
||||||
|
|
||||||
return -ENOSPC;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -32,7 +32,7 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *,
|
||||||
void nvkm_outp_del(struct nvkm_outp **);
|
void nvkm_outp_del(struct nvkm_outp **);
|
||||||
void nvkm_outp_init(struct nvkm_outp *);
|
void nvkm_outp_init(struct nvkm_outp *);
|
||||||
void nvkm_outp_fini(struct nvkm_outp *);
|
void nvkm_outp_fini(struct nvkm_outp *);
|
||||||
int nvkm_outp_acquire(struct nvkm_outp *, u8 user);
|
int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda);
|
||||||
void nvkm_outp_release(struct nvkm_outp *, u8 user);
|
void nvkm_outp_release(struct nvkm_outp *, u8 user);
|
||||||
void nvkm_outp_route(struct nvkm_disp *);
|
void nvkm_outp_route(struct nvkm_disp *);
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
|
||||||
} *args = data;
|
} *args = data;
|
||||||
int ret = -ENOSYS;
|
int ret = -ENOSYS;
|
||||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
|
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
|
||||||
ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER);
|
ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, args->v0.hda);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
args->v0.or = outp->ior->id;
|
args->v0.or = outp->ior->id;
|
||||||
args->v0.link = outp->ior->asy.link;
|
args->v0.link = outp->ior->asy.link;
|
||||||
|
@ -119,7 +119,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
|
||||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
|
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
|
||||||
if (args->v0.data & 0xfff00000)
|
if (args->v0.data & 0xfff00000)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV);
|
ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
ret = outp->ior->func->sense(outp->ior, args->v0.data);
|
ret = outp->ior->func->sense(outp->ior, args->v0.data);
|
||||||
|
|
|
@ -89,7 +89,7 @@ gm200_sor_route_get(struct nvkm_outp *outp, int *link)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvkm_ior_func
|
static const struct nvkm_ior_func
|
||||||
gm200_sor = {
|
gm200_sor_hda = {
|
||||||
.route = {
|
.route = {
|
||||||
.get = gm200_sor_route_get,
|
.get = gm200_sor_route_get,
|
||||||
.set = gm200_sor_route_set,
|
.set = gm200_sor_route_set,
|
||||||
|
@ -119,8 +119,42 @@ gm200_sor = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct nvkm_ior_func
|
||||||
|
gm200_sor = {
|
||||||
|
.route = {
|
||||||
|
.get = gm200_sor_route_get,
|
||||||
|
.set = gm200_sor_route_set,
|
||||||
|
},
|
||||||
|
.state = gf119_sor_state,
|
||||||
|
.power = nv50_sor_power,
|
||||||
|
.clock = gf119_sor_clock,
|
||||||
|
.hdmi = {
|
||||||
|
.ctrl = gk104_hdmi_ctrl,
|
||||||
|
.scdc = gm200_hdmi_scdc,
|
||||||
|
},
|
||||||
|
.dp = {
|
||||||
|
.lanes = { 0, 1, 2, 3 },
|
||||||
|
.links = gf119_sor_dp_links,
|
||||||
|
.power = g94_sor_dp_power,
|
||||||
|
.pattern = gm107_sor_dp_pattern,
|
||||||
|
.drive = gm200_sor_dp_drive,
|
||||||
|
.vcpi = gf119_sor_dp_vcpi,
|
||||||
|
.audio = gf119_sor_dp_audio,
|
||||||
|
.audio_sym = gf119_sor_dp_audio_sym,
|
||||||
|
.watermark = gf119_sor_dp_watermark,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
gm200_sor_new(struct nvkm_disp *disp, int id)
|
gm200_sor_new(struct nvkm_disp *disp, int id)
|
||||||
{
|
{
|
||||||
|
struct nvkm_device *device = disp->engine.subdev.device;
|
||||||
|
u32 hda;
|
||||||
|
|
||||||
|
if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
|
||||||
|
hda = nvkm_rd32(device, 0x101034);
|
||||||
|
|
||||||
|
if (hda & BIT(id))
|
||||||
|
return nvkm_ior_new_(&gm200_sor_hda, disp, SOR, id);
|
||||||
return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
|
return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2020 Red Hat Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#include "ior.h"
|
||||||
|
|
||||||
|
static const struct nvkm_ior_func
|
||||||
|
gp100_sor_hda = {
|
||||||
|
.route = {
|
||||||
|
.get = gm200_sor_route_get,
|
||||||
|
.set = gm200_sor_route_set,
|
||||||
|
},
|
||||||
|
.state = gf119_sor_state,
|
||||||
|
.power = nv50_sor_power,
|
||||||
|
.clock = gf119_sor_clock,
|
||||||
|
.hdmi = {
|
||||||
|
.ctrl = gk104_hdmi_ctrl,
|
||||||
|
.scdc = gm200_hdmi_scdc,
|
||||||
|
},
|
||||||
|
.dp = {
|
||||||
|
.lanes = { 0, 1, 2, 3 },
|
||||||
|
.links = gf119_sor_dp_links,
|
||||||
|
.power = g94_sor_dp_power,
|
||||||
|
.pattern = gm107_sor_dp_pattern,
|
||||||
|
.drive = gm200_sor_dp_drive,
|
||||||
|
.vcpi = gf119_sor_dp_vcpi,
|
||||||
|
.audio = gf119_sor_dp_audio,
|
||||||
|
.audio_sym = gf119_sor_dp_audio_sym,
|
||||||
|
.watermark = gf119_sor_dp_watermark,
|
||||||
|
},
|
||||||
|
.hda = {
|
||||||
|
.hpd = gf119_hda_hpd,
|
||||||
|
.eld = gf119_hda_eld,
|
||||||
|
.device_entry = gf119_hda_device_entry,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct nvkm_ior_func
|
||||||
|
gp100_sor = {
|
||||||
|
.route = {
|
||||||
|
.get = gm200_sor_route_get,
|
||||||
|
.set = gm200_sor_route_set,
|
||||||
|
},
|
||||||
|
.state = gf119_sor_state,
|
||||||
|
.power = nv50_sor_power,
|
||||||
|
.clock = gf119_sor_clock,
|
||||||
|
.hdmi = {
|
||||||
|
.ctrl = gk104_hdmi_ctrl,
|
||||||
|
.scdc = gm200_hdmi_scdc,
|
||||||
|
},
|
||||||
|
.dp = {
|
||||||
|
.lanes = { 0, 1, 2, 3 },
|
||||||
|
.links = gf119_sor_dp_links,
|
||||||
|
.power = g94_sor_dp_power,
|
||||||
|
.pattern = gm107_sor_dp_pattern,
|
||||||
|
.drive = gm200_sor_dp_drive,
|
||||||
|
.vcpi = gf119_sor_dp_vcpi,
|
||||||
|
.audio = gf119_sor_dp_audio,
|
||||||
|
.audio_sym = gf119_sor_dp_audio_sym,
|
||||||
|
.watermark = gf119_sor_dp_watermark,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
int
|
||||||
|
gp100_sor_new(struct nvkm_disp *disp, int id)
|
||||||
|
{
|
||||||
|
struct nvkm_device *device = disp->engine.subdev.device;
|
||||||
|
u32 hda;
|
||||||
|
|
||||||
|
if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
|
||||||
|
hda = nvkm_rd32(device, 0x10ebb0) >> 8;
|
||||||
|
|
||||||
|
if (hda & BIT(id))
|
||||||
|
return nvkm_ior_new_(&gp100_sor_hda, disp, SOR, id);
|
||||||
|
return nvkm_ior_new_(&gp100_sor, disp, SOR, id);
|
||||||
|
}
|
|
@ -78,7 +78,7 @@ gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvkm_ior_func
|
static const struct nvkm_ior_func
|
||||||
gv100_sor = {
|
gv100_sor_hda = {
|
||||||
.route = {
|
.route = {
|
||||||
.get = gm200_sor_route_get,
|
.get = gm200_sor_route_get,
|
||||||
.set = gm200_sor_route_set,
|
.set = gm200_sor_route_set,
|
||||||
|
@ -107,9 +107,42 @@ gv100_sor = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct nvkm_ior_func
|
||||||
|
gv100_sor = {
|
||||||
|
.route = {
|
||||||
|
.get = gm200_sor_route_get,
|
||||||
|
.set = gm200_sor_route_set,
|
||||||
|
},
|
||||||
|
.state = gv100_sor_state,
|
||||||
|
.power = nv50_sor_power,
|
||||||
|
.clock = gf119_sor_clock,
|
||||||
|
.hdmi = {
|
||||||
|
.ctrl = gv100_hdmi_ctrl,
|
||||||
|
.scdc = gm200_hdmi_scdc,
|
||||||
|
},
|
||||||
|
.dp = {
|
||||||
|
.lanes = { 0, 1, 2, 3 },
|
||||||
|
.links = gf119_sor_dp_links,
|
||||||
|
.power = g94_sor_dp_power,
|
||||||
|
.pattern = gm107_sor_dp_pattern,
|
||||||
|
.drive = gm200_sor_dp_drive,
|
||||||
|
.audio = gv100_sor_dp_audio,
|
||||||
|
.audio_sym = gv100_sor_dp_audio_sym,
|
||||||
|
.watermark = gv100_sor_dp_watermark,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
gv100_sor_new(struct nvkm_disp *disp, int id)
|
gv100_sor_new(struct nvkm_disp *disp, int id)
|
||||||
{
|
{
|
||||||
|
struct nvkm_device *device = disp->engine.subdev.device;
|
||||||
|
u32 hda;
|
||||||
|
|
||||||
|
if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
|
||||||
|
hda = nvkm_rd32(device, 0x118fb0) >> 8;
|
||||||
|
|
||||||
|
if (hda & BIT(id))
|
||||||
|
return nvkm_ior_new_(&gv100_sor_hda, disp, SOR, id);
|
||||||
return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
|
return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvkm_ior_func
|
static const struct nvkm_ior_func
|
||||||
tu102_sor = {
|
tu102_sor_hda = {
|
||||||
.route = {
|
.route = {
|
||||||
.get = gm200_sor_route_get,
|
.get = gm200_sor_route_get,
|
||||||
.set = gm200_sor_route_set,
|
.set = gm200_sor_route_set,
|
||||||
|
@ -92,8 +92,38 @@ tu102_sor = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct nvkm_ior_func
|
||||||
|
tu102_sor = {
|
||||||
|
.route = {
|
||||||
|
.get = gm200_sor_route_get,
|
||||||
|
.set = gm200_sor_route_set,
|
||||||
|
},
|
||||||
|
.state = gv100_sor_state,
|
||||||
|
.power = nv50_sor_power,
|
||||||
|
.clock = gf119_sor_clock,
|
||||||
|
.hdmi = {
|
||||||
|
.ctrl = gv100_hdmi_ctrl,
|
||||||
|
.scdc = gm200_hdmi_scdc,
|
||||||
|
},
|
||||||
|
.dp = {
|
||||||
|
.lanes = { 0, 1, 2, 3 },
|
||||||
|
.links = tu102_sor_dp_links,
|
||||||
|
.power = g94_sor_dp_power,
|
||||||
|
.pattern = gm107_sor_dp_pattern,
|
||||||
|
.drive = gm200_sor_dp_drive,
|
||||||
|
.vcpi = tu102_sor_dp_vcpi,
|
||||||
|
.audio = gv100_sor_dp_audio,
|
||||||
|
.audio_sym = gv100_sor_dp_audio_sym,
|
||||||
|
.watermark = gv100_sor_dp_watermark,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
tu102_sor_new(struct nvkm_disp *disp, int id)
|
tu102_sor_new(struct nvkm_disp *disp, int id)
|
||||||
{
|
{
|
||||||
|
struct nvkm_device *device = disp->engine.subdev.device;
|
||||||
|
u32 hda = nvkm_rd32(device, 0x08a15c);
|
||||||
|
if (hda & BIT(id))
|
||||||
|
return nvkm_ior_new_(&tu102_sor_hda, disp, SOR, id);
|
||||||
return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
|
return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
|
||||||
}
|
}
|
||||||
|
|
|
@ -352,7 +352,7 @@ gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
|
||||||
|
|
||||||
static const struct gf100_gr_fwif
|
static const struct gf100_gr_fwif
|
||||||
gk20a_gr_fwif[] = {
|
gk20a_gr_fwif[] = {
|
||||||
{ -1, gk20a_gr_load, &gk20a_gr },
|
{ 0, gk20a_gr_load, &gk20a_gr },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче