summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c66
26 files changed, 311 insertions, 169 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f85ace0384d2..1f5a296f5ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
struct acpi_buffer *params)
{
acpi_status status;
+ union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
+ obj = (union acpi_object *)buffer.pointer;
- /* Fail only if calling the method fails and ATIF is supported */
+ /* Fail if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
- kfree(buffer.pointer);
+ kfree(obj);
return NULL;
}
- return buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
+ obj->type);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ce5ca304dba9..fa572ba7f9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
list_add_tail(&vm->vm_list_node,
&(vm->process_info->vm_list_head));
vm->process_info->n_vms++;
-
- *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
+ if (ef)
+ *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
mutex_unlock(&vm->process_info->lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1e475eb01417..d891ab779ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
/* Only a single BO list is allowed to simplify handling. */
if (p->bo_list)
- ret = -EINVAL;
+ goto free_partial_kdata;
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 83e54697f0ee..f1ffab5a1eae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1635,11 +1635,9 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_sriov_vf(adev)) {
- r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
- if (r)
- return r;
- }
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
if (r)
@@ -1650,8 +1648,7 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
- device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 10b61ff63802..7d4b540340e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1203,8 +1203,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r)
+ if (r) {
+ amdgpu_mes_unlock(&adev->mes);
goto clean_up_memory;
+ }
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
@@ -1237,7 +1239,6 @@ clean_up_ring:
amdgpu_ring_fini(ring);
clean_up_memory:
kfree(ring);
- amdgpu_mes_unlock(&adev->mes);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 09715b506468..81d195d366ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/string_helpers.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/drm_util.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 8d27421689c9..a37a6801c9ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -621,7 +621,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
}
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -1336,7 +1336,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
+ adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index a8763496aed3..9288f37a3cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+/*define for compression field for sdma7*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift)
+
static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
@@ -1724,7 +1730,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) |
+ SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 9044bdb38cf4..3e6b4736a7fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
}
mutex_unlock(&p->mutex);
@@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+ atomic64_sub(size, &pdd->vram_usage);
err_unlock:
err_pdd:
@@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
+ atomic64_add(bo_bucket->size, &pdd->vram_usage);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d6530febabad..26e48fdc8728 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -775,7 +775,7 @@ struct kfd_process_device {
enum kfd_pdd_bound bound;
/* VRAM usage */
- uint64_t vram_usage;
+ atomic64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_SYSFS_FILENAME_LEN];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d07acf1b2f93..d4aa843aacfd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -332,7 +332,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
- return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
@@ -1625,7 +1625,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
- pdd->vram_usage = 0;
+ atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
@@ -1702,12 +1702,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
- &ef);
+ p->ef ? NULL : &ef);
if (ret) {
dev_err(dev->adev->dev, "Failed to create process VM object\n");
return ret;
}
- RCU_INIT_POINTER(p->ef, ef);
+
+ if (!p->ef)
+ RCU_INIT_POINTER(p->ef, ef);
+
pdd->drm_priv = drm_file->private_data;
ret = kfd_process_device_reserve_ib_mem(pdd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 04e746923697..1893c27746a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
+
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ struct mm_struct *mm;
+
+ mm = svm_bo->eviction_fence->mm;
+ /*
+ * The forked child process takes svm_bo device pages ref, svm_bo could be
+ * released after parent process is gone.
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (p) {
+ pdd = kfd_get_process_device_data(svm_bo->node, p);
+ if (pdd)
+ atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
+ kfd_unref_process(p);
+ }
+ mmput(mm);
+ }
+
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
@@ -532,6 +553,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
+ struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
+ pdd = svm_range_get_pdd_by_node(prange, node);
+ if (pdd)
+ atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
+
return 0;
reserve_bo_failed:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6e79028c5d78..13421a58210d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -770,6 +770,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
}
+ /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
+ if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
+ DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ return;
+ }
+
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
dev = adev->dm.ddev;
@@ -2026,7 +2032,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
- if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
+ if (adev->dm.dc->caps.ips_support &&
+ adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
adev->dm.idle_workqueue = idle_create_workqueue(adev);
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
@@ -2965,10 +2972,11 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
- if (adev->dm.dc->caps.ips_support)
- dc_allow_idle_optimizations(adev->dm.dc, true);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dm->dc->caps.ips_support && adev->in_s0ix)
+ dc_allow_idle_optimizations(dm->dc, true);
+
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -6735,12 +6743,21 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
stream->signal == SIGNAL_TYPE_EDP) {
+ const struct dc_edid_caps *edid_caps;
+ unsigned int disable_colorimetry = 0;
+
+ if (aconnector->dc_sink) {
+ edid_caps = &aconnector->dc_sink->edid_caps;
+ disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
+ }
+
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
+ !disable_colorimetry;
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
@@ -8357,7 +8374,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
IP_VERSION(3, 5, 0) ||
acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED) {
+ DC_PSR_VERSION_UNSUPPORTED ||
+ !(adev->flags & AMD_IS_APU)) {
timing = &acrtc_state->stream->timing;
/* at least 2 frames */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 50109d13d967..eea317dcbe8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -44,6 +44,7 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
+#include "clk_mgr.h"
static u32 edid_extract_panel_id(struct edid *edid)
{
@@ -73,6 +74,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
+ edid_caps->panel_patch.disable_colorimetry = true;
+ break;
default:
return;
}
@@ -1117,6 +1122,8 @@ bool dm_helpers_dp_handle_test_pattern_request(
struct pipe_ctx *pipe_ctx = NULL;
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_device *dev = aconnector->base.dev;
+ struct dc_state *dc_state = ctx->dc->current_state;
+ struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
int i;
for (i = 0; i < MAX_PIPES; i++) {
@@ -1217,6 +1224,16 @@ bool dm_helpers_dp_handle_test_pattern_request(
pipe_ctx->stream->test_pattern.type = test_pattern;
pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
+ /* Temp W/A for compliance test failure */
+ dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
+ clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
+ dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ ctx->dc->clk_mgr->funcs->update_clocks(
+ ctx->dc->clk_mgr,
+ dc_state,
+ false);
+
dc_link_dp_set_test_pattern(
(struct dc_link *) link,
test_pattern,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 83a31b97e96b..a08e8a0b696c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1027,6 +1027,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int remaining_to_try = 0;
int ret;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ int var_pbn;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1057,13 +1058,18 @@ static int try_disable_dsc(struct drm_atomic_state *state,
break;
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
+ var_pbn = vars[next_index].pbn;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
+ __func__, __LINE__, next_index, ret);
+ vars[next_index].pbn = var_pbn;
return ret;
+ }
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
@@ -1071,14 +1077,17 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index);
+ vars[next_index].pbn = var_pbn;
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
+ __func__, __LINE__, next_index, ret);
return ret;
+ }
}
tried[next_index] = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5c39390ecbd5..a88f1b6ea64c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -5065,11 +5065,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
return true;
}
+static void clear_update_flags(struct dc_surface_update *srf_updates,
+ int surface_count, struct dc_stream_state *stream)
+{
+ int i;
+
+ if (stream)
+ stream->update_flags.raw = 0;
+
+ for (i = 0; i < surface_count; i++)
+ if (srf_updates[i].surface)
+ srf_updates[i].surface->update_flags.raw = 0;
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/*
* update planes and stream version 3 separates FULL and FAST updates
@@ -5086,10 +5101,16 @@ bool dc_update_planes_and_stream(struct dc *dc,
* features as they are now transparent to the new sequence.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
- return update_planes_and_stream_v3(dc, srf_updates,
+ ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
- return update_planes_and_stream_v2(dc, srf_updates,
+ else
+ ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
+
+ return ret;
}
void dc_commit_updates_for_stream(struct dc *dc,
@@ -5099,6 +5120,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
@@ -5106,17 +5129,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
- update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ } else
+ ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index fd6dca735714..6d7989b751e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -178,6 +178,7 @@ struct dc_panel_patch {
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
+ unsigned int disable_colorimetry;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
index c4c52173ef22..11c904ae2958 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
@@ -303,7 +303,6 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
if (project == dml_project_dcn35 ||
project == dml_project_dcn351) {
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
- policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
policy->UseOnlyMaxPrefetchModes = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index da9101b83e8c..70abd32ce2ad 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -766,6 +766,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
.notify_dpia_hr_bw = true,
+ .min_disp_clk_khz = 50000,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3cd52e7a9c77..95838c7ab054 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 9118fcddbf11..227bf0e84a13 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -60,7 +60,7 @@ struct vi_dpm_level {
struct vi_dpm_table {
uint32_t count;
- struct vi_dpm_level dpm_level[] __counted_by(count);
+ struct vi_dpm_level dpm_level[];
};
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
@@ -91,7 +91,7 @@ struct phm_set_power_state_input {
struct phm_clock_array {
uint32_t count;
- uint32_t values[] __counted_by(count);
+ uint32_t values[];
};
struct phm_clock_voltage_dependency_record {
@@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record {
struct phm_clock_voltage_dependency_table {
uint32_t count;
- struct phm_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_clock_voltage_dependency_record entries[];
};
struct phm_phase_shedding_limits_record {
@@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record {
struct phm_uvd_clock_voltage_dependency_table {
uint8_t count;
- struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvd_clock_voltage_dependency_record entries[];
};
struct phm_acp_clock_voltage_dependency_record {
@@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record {
struct phm_acp_clock_voltage_dependency_table {
uint32_t count;
- struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acp_clock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_record {
@@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record {
struct phm_phase_shedding_limits_table {
uint32_t count;
- struct phm_phase_shedding_limits_record entries[] __counted_by(count);
+ struct phm_phase_shedding_limits_record entries[];
};
struct phm_vceclock_voltage_dependency_table {
uint8_t count;
- struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vceclock_voltage_dependency_record entries[];
};
struct phm_uvdclock_voltage_dependency_table {
uint8_t count;
- struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvdclock_voltage_dependency_record entries[];
};
struct phm_samuclock_voltage_dependency_table {
uint8_t count;
- struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samuclock_voltage_dependency_record entries[];
};
struct phm_acpclock_voltage_dependency_table {
uint32_t count;
- struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acpclock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_table {
uint8_t count;
- struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vce_clock_voltage_dependency_record entries[];
};
@@ -393,7 +393,7 @@ union phm_cac_leakage_record {
struct phm_cac_leakage_table {
uint32_t count;
- union phm_cac_leakage_record entries[] __counted_by(count);
+ union phm_cac_leakage_record entries[];
};
struct phm_samu_clock_voltage_dependency_record {
@@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record {
struct phm_samu_clock_voltage_dependency_table {
uint8_t count;
- struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samu_clock_voltage_dependency_record entries[];
};
struct phm_cac_tdp_table {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index bb3bc68dfc39..80e60ea2d11e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
+static bool smu_is_workload_profile_available(struct smu_context *smu,
+ u32 profile)
+{
+ if (profile >= PP_SMC_POWER_PROFILE_COUNT)
+ return false;
+ return smu->workload_map && smu->workload_map[profile].valid_mapping;
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1264,7 +1272,12 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+
+ if (smu->is_apu ||
+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ else
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -2226,7 +2239,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
- bool force_update)
+ bool init)
{
int ret = 0;
int index = 0;
@@ -2255,7 +2268,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (force_update || smu_dpm_ctx->dpm_level != level) {
+ if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@@ -2272,7 +2285,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
- if (force_update || smu->power_profile_mode != workload[0])
+ if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
index ee457a6f0813..c2fd0a4a13e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -25,7 +25,7 @@
#define SMU14_DRIVER_IF_V14_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x18
+#define PPTABLE_VERSION 0x1B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -145,7 +145,7 @@ typedef enum {
} FEATURE_BTC_e;
// Debug Overrides Bitmask
-#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_NOT_USE 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
@@ -161,6 +161,7 @@ typedef enum {
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -391,6 +392,21 @@ typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
+#define EPCS_HIGH_POWER 600
+#define EPCS_NORMAL_POWER 450
+#define EPCS_LOW_POWER 300
+#define EPCS_SHORTED_POWER 150
+#define EPCS_NO_BOOTUP 0
+
+typedef enum{
+ EPCS_SHORTED_LIMIT,
+ EPCS_LOW_POWER_LIMIT,
+ EPCS_NORMAL_POWER_LIMIT,
+ EPCS_HIGH_POWER_LIMIT,
+ EPCS_NOT_CONFIGURED,
+ EPCS_STATUS_COUNT,
+} EPCS_STATUS_e;
+
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
@@ -662,7 +678,7 @@ typedef enum {
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
-
+#define PP_NUM_PSM_DIDT_PWL_ZONES 3
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
@@ -746,10 +762,10 @@ typedef struct {
uint16_t Padding;
//Frequency changes
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
- uint16_t UclkFmin; // MHz
- uint16_t UclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding1;
+ uint16_t UclkFmin;
+ uint16_t UclkFmax;
uint16_t FclkFmin;
uint16_t FclkFmax;
@@ -770,19 +786,23 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t AdvancedOdModeEnabled;
- uint8_t Padding1[3];
+ uint8_t Padding2[3];
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
- uint16_t Padding2;
+ uint16_t Padding3;
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- uint32_t Spare[10];
+ uint16_t GfxclkFmaxVmax;
+ uint8_t GfxclkFmaxVmaxTemperature;
+ uint8_t Padding4[1];
+
+ uint32_t Spare[9];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -802,8 +822,8 @@ typedef struct {
uint16_t VddSocVmax;
//gfxclk
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding;
//uclk
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -828,7 +848,7 @@ typedef struct {
uint8_t FanZeroRpmEnable;
//temperature
uint8_t MaxOpTemp;
- uint8_t Padding[2];
+ uint8_t Padding1[2];
//Full Ctrl
uint16_t GfxVoltageFullCtrlMode;
@@ -839,7 +859,7 @@ typedef struct {
//EDC
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- int16_t Padding1;
+ int16_t Padding2;
uint32_t Spare[5];
} OverDriveLimits_t;
@@ -987,8 +1007,9 @@ typedef struct {
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
-
- uint32_t Reserved[4];
+ uint16_t MaxReportedClock;
+ uint16_t Padding;
+ uint32_t Reserved[3];
} DriverReportedClocks_t;
typedef struct {
@@ -1132,7 +1153,7 @@ typedef struct {
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t GfxclkAibFmax;
- uint16_t GfxclkFreqCap;
+ uint16_t GfxDpmPadding;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
@@ -1172,8 +1193,7 @@ typedef struct {
uint32_t DvoFmaxLowScaler; //Unitless float
// GFX DCS
- uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
- uint16_t PaddingDcs;
+ uint32_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
@@ -1205,8 +1225,7 @@ typedef struct {
uint16_t DalDcModeMaxUclkFreq;
uint8_t PaddingsMem[2];
//FCLK Section
- uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
- uint16_t PaddingFclk;
+ uint32_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
@@ -1215,12 +1234,19 @@ typedef struct {
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
- uint8_t GfxAvfsPadding[3];
+ uint8_t GfxAvfsPadding[1];
+ uint16_t DroopGBStDev;
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
- uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
+ uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t spare_HwRtAvfsFuses[19];
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
@@ -1246,11 +1272,7 @@ typedef struct {
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
- //Unused: PMFW-9370
- uint32_t V2F_vmin_range_low;
- uint32_t V2F_vmin_range_high;
- uint32_t V2F_vmax_range_low;
- uint32_t V2F_vmax_range_high;
+ uint32_t PaddingV2F[4];
AvfsDcBtcParams_t DcBtcGfxParams;
QuadraticInt_t SSCurve_GFX;
@@ -1327,18 +1349,18 @@ typedef struct {
uint16_t PsmDidtReleaseTimer;
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
// CAC EDC
- uint32_t Leakage_C0; // in IEEE float
- uint32_t Leakage_C1; // in IEEE float
- uint32_t Leakage_C2; // in IEEE float
- uint32_t Leakage_C3; // in IEEE float
- uint32_t Leakage_C4; // in IEEE float
- uint32_t Leakage_C5; // in IEEE float
- uint32_t GFX_CLK_SCALAR; // in IEEE float
- uint32_t GFX_CLK_INTERCEPT; // in IEEE float
- uint32_t GFX_CAC_M; // in IEEE float
- uint32_t GFX_CAC_B; // in IEEE float
- uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
- uint32_t DynToTotalCacScalar; // in IEEE
+ uint32_t CacEdcCacLeakageC0;
+ uint32_t CacEdcCacLeakageC1;
+ uint32_t CacEdcCacLeakageC2;
+ uint32_t CacEdcCacLeakageC3;
+ uint32_t CacEdcCacLeakageC4;
+ uint32_t CacEdcCacLeakageC5;
+ uint32_t CacEdcGfxClkScalar;
+ uint32_t CacEdcGfxClkIntercept;
+ uint32_t CacEdcCac_m;
+ uint32_t CacEdcCac_b;
+ uint32_t CacEdcCurrLimitGuardband;
+ uint32_t CacEdcDynToTotalCacRatio;
// GFX EDC XVMIN
uint32_t XVmin_Gfx_EdcThreshScalar;
uint32_t XVmin_Gfx_EdcEnableFreq;
@@ -1467,7 +1489,7 @@ typedef struct {
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
- uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t Paddign1;
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
@@ -1530,7 +1552,7 @@ typedef struct {
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
- uint16_t FuzzyFan_Reserved;
+ uint16_t FanPadding2;
uint16_t FwCtfLimit[TEMP_COUNT];
@@ -1547,9 +1569,10 @@ typedef struct {
uint16_t FanSpare[1];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding;
- uint32_t FanAmbientPerfBoostThreshold;
uint32_t FanSpare2[12];
+ uint32_t ODFeatureCtrlMask;
+
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
uint16_t TemperatureFwCtfLimit_Hynix;
@@ -1637,7 +1660,7 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
- uint16_t PCIeBusy ;
+ uint16_t AveragePCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
@@ -1665,12 +1688,12 @@ typedef struct {
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
- uint16_t Vcn0ActivityPercentage ;
+ uint16_t AverageVcn0ActivityPercentage;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
- uint16_t MovingAverageTotalBoardPower;
+ uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
@@ -1684,7 +1707,8 @@ typedef struct {
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
- uint8_t padding1[3];
+ uint8_t VmaxThrottlingPercentage;
+ uint8_t padding1[2];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
@@ -1693,7 +1717,7 @@ typedef struct {
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
- uint16_t MovingAvgApuSocketPower;
+ uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
@@ -1823,6 +1847,17 @@ typedef struct {
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
+#define TABLE_PPT_FAILED 0x100
+#define TABLE_TDC_FAILED 0x200
+#define TABLE_TEMP_FAILED 0x400
+#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
+#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
+#define TABLE_FAN_START_TEMP_FAILED 0x2000
+#define TABLE_FAN_PWM_MIN_FAILED 0x4000
+#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
+#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
+#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
+
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
@@ -1849,5 +1884,6 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 46b456590a08..727d5b405435 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -28,7 +28,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1d024b122b0c..cb923e33fd6f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2555,18 +2555,16 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
- }
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500)) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
}
ret = smu_cmn_send_smc_msg_with_param(smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5899d01fa73d..e83ea2bc7f9c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1077,12 +1077,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
switch (od_feature_bit) {
case PP_OD_FEATURE_GFXCLK_FMIN:
- od_min_setting = overdrive_lowerlimits->GfxclkFmin;
- od_max_setting = overdrive_upperlimits->GfxclkFmin;
- break;
case PP_OD_FEATURE_GFXCLK_FMAX:
- od_min_setting = overdrive_lowerlimits->GfxclkFmax;
- od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
+ od_max_setting = overdrive_upperlimits->GfxclkFoffset;
break;
case PP_OD_FEATURE_UCLK_FMIN:
od_min_setting = overdrive_lowerlimits->UclkFmin;
@@ -1269,10 +1266,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
- size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
- od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
+ overdrive_lowerlimits->GfxclkFoffset,
+ overdrive_upperlimits->GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1414,7 +1417,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_FMAX,
NULL,
&max_value);
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
}
@@ -1796,7 +1799,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
-
+ uint32_t current_profile_mode = smu->power_profile_mode;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
@@ -1854,6 +1857,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
}
}
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, false);
+ else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, true);
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -2158,7 +2166,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -2217,8 +2225,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
od_table->OverDriveTable.UclkFmax);
}
@@ -2309,10 +2316,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
memcpy(user_od_table,
boot_od_table,
sizeof(OverDriveTableExternal_t));
- user_od_table->OverDriveTable.GfxclkFmin =
- user_od_table_bak.OverDriveTable.GfxclkFmin;
- user_od_table->OverDriveTable.GfxclkFmax =
- user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.GfxclkFoffset =
+ user_od_table_bak.OverDriveTable.GfxclkFoffset;
user_od_table->OverDriveTable.UclkFmin =
user_od_table_bak.OverDriveTable.UclkFmin;
user_od_table->OverDriveTable.UclkFmax =
@@ -2441,22 +2446,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
switch (input[i]) {
- case 0:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFmin = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
-
case 1:
smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
@@ -2469,7 +2458,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
@@ -2480,13 +2469,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
}
- if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
- dev_err(adev->dev,
- "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
- (uint32_t)od_table->OverDriveTable.GfxclkFmin,
- (uint32_t)od_table->OverDriveTable.GfxclkFmax);
- return -EINVAL;
- }
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE: