summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/umc_v12_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c416
1 files changed, 278 insertions, 138 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 77af4e25ff46..bfe61d86ee6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -28,27 +28,7 @@
#include "umc/umc_12_0_0_sh_mask.h"
#include "mp/mp_13_0_6_sh_mask.h"
-const uint32_t
- umc_v12_0_channel_idx_tbl[]
- [UMC_V12_0_UMC_INSTANCE_NUM]
- [UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
- {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
- {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
- {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
- {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
- {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
- {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
- {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
- {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
- };
-
-/* mapping of MCA error address to normalized address */
-static const uint32_t umc_v12_0_ma2na_mapping[] = {
- 0, 5, 6, 8, 9, 14, 12, 13,
- 10, 11, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28,
- 24, 7, 29, 30,
-};
+#define MAX_ECC_NUM_PER_RETIREMENT 32
static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
uint32_t node_inst,
@@ -192,99 +172,74 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
-static bool umc_v12_0_bit_wise_xor(uint32_t val)
+static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in)
{
- bool result = 0;
- int i;
+ uint32_t col, row, row_xor, bank, channel_index;
+ uint64_t soc_pa, retired_page, column, err_addr;
+ struct ta_ras_query_address_output addr_out;
- for (i = 0; i < 32; i++)
- result = result ^ ((val >> i) & 0x1);
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
- return result;
-}
+ return;
+ }
+
+ soc_pa = addr_out.pa.pa;
+ bank = addr_out.pa.bank;
+ channel_index = addr_out.pa.channel_idx;
-static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
- uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst,
- struct ta_ras_query_address_output *addr_out)
-{
- uint32_t channel_index, i;
- uint64_t na, soc_pa;
- uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
- uint32_t bank0, bank1, bank2, bank3, bank;
-
- bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
- bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
- bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
- bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
+ row_xor = row ^ (0x1ULL << 13);
+ /* clear [C3 C2] in soc physical address */
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
+ /* clear [C4] in soc physical address */
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
+ retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+ /* include column bit 0 and 1 */
+ col &= 0x3;
+ col |= (column << 2);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row, col, bank, channel_index);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, addr_in->ma.umc_inst);
- /* apply bank hash algorithm */
- bank0 =
- bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
- bank1 =
- bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
- bank2 =
- bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
- bank3 =
- bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
-
- bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
- err_addr &= ~0x3c0ULL;
- err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
-
- na = 0x0;
- /* convert mca error address to normalized address */
- for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
- na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
-
- channel_index =
- adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
- adev->umc.channel_inst_num +
- umc_inst * adev->umc.channel_inst_num +
- ch_inst];
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_32KB_BLOCK(na) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(na);
-
- /* the umc channel bits are not original values, they are hashed */
- UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
-
- addr_out->pa.pa = soc_pa;
- addr_out->pa.bank = bank;
- addr_out->pa.channel_idx = channel_index;
+ /* shift R13 bit */
+ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row_xor, col, bank, channel_index);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, addr_in->ma.umc_inst);
+ }
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint64_t err_addr,
- uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst)
+static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
+ struct ta_ras_query_address_input *addr_in,
+ uint64_t *pfns, int len)
{
uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column;
- struct ta_ras_query_address_input addr_in;
+ uint64_t soc_pa, retired_page, column, err_addr;
struct ta_ras_query_address_output addr_out;
+ uint32_t pos = 0;
- addr_in.addr_type = TA_RAS_MCA_TO_PA;
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = ch_inst;
- addr_in.ma.umc_inst = umc_inst;
- addr_in.ma.node_inst = node_inst;
-
- if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out))
- /* fallback to old path if fail to get pa from psp */
- umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst,
- node_inst, &addr_out);
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
+ return 0;
+ }
soc_pa = addr_out.pa.pa;
bank = addr_out.pa.bank;
@@ -302,33 +257,42 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
/* include column bit 0 and 1 */
col &= 0x3;
col |= (column << 2);
dev_info(adev->dev,
"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
retired_page, row, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
/* shift R13 bit */
retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
dev_info(adev->dev,
"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
retired_page, row_xor, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
}
+
+ return pos;
}
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ struct ta_ras_query_address_input addr_in;
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
uint64_t mc_umc_addrt0;
- struct ras_err_data *err_data = (struct ras_err_data *)data;
uint64_t umc_reg_offset =
get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
@@ -357,8 +321,19 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- umc_v12_0_convert_error_address(adev, err_data, err_addr,
- ch_inst, umc_inst, node_inst);
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
+ else
+ addr_in.ma.socket_id = 0;
+
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch_inst;
+ addr_in.ma.umc_inst = umc_inst;
+ addr_in.ma.node_inst = node_inst;
+
+ umc_v12_0_convert_error_address(adev, err_data, &addr_in);
}
/* clear umc status */
@@ -401,13 +376,20 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
return 0;
}
+#ifdef TO_BE_REMOVED
static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
+ struct ras_query_context qctx;
+
+ memset(&qctx, 0, sizeof(qctx));
+ qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
+ RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
+
amdgpu_mca_smu_log_ras_error(adev,
- AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status, &qctx);
amdgpu_mca_smu_log_ras_error(adev,
- AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status, &qctx);
}
static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
@@ -418,12 +400,16 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
struct ras_err_info *err_info;
struct ras_err_addr *mca_err_addr, *tmp;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct ta_ras_query_address_input addr_in;
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
if (list_empty(&err_info->err_addr_list))
continue;
+ addr_in.ma.node_inst = err_info->mcm_info.die_id;
+ addr_in.ma.socket_id = err_info->mcm_info.socket_id;
+
list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) {
mc_umc_status = mca_err_addr->err_status;
if (mc_umc_status &&
@@ -439,6 +425,10 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = MCA_IPID_LO_2_UMC_CH(InstanceIdLo);
+ addr_in.ma.umc_inst = MCA_IPID_LO_2_UMC_INST(InstanceIdLo);
+
dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
mca_ipid,
err_info->mcm_info.die_id,
@@ -447,10 +437,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
err_addr);
umc_v12_0_convert_error_address(adev,
- err_data, err_addr,
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- err_info->mcm_info.die_id);
+ err_data, &addr_in);
}
/* Delete error address node from list and free memory */
@@ -458,6 +445,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
}
}
}
+#endif
static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, void *ras_error_status)
@@ -498,43 +486,49 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
.query_ras_error_address = umc_v12_0_query_ras_error_address,
};
-static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
struct amdgpu_device *adev = handle->adev;
- u64 status;
+ struct aca_bank_info info;
+ enum aca_error_type err_type;
+ u64 status, count;
+ u32 ext_error_code;
int ret;
- ret = aca_bank_info_decode(bank, &report->info);
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if (umc_v12_0_is_deferred_error(adev, status))
+ err_type = ACA_ERROR_TYPE_DEFERRED;
+ else if (umc_v12_0_is_uncorrectable_error(adev, status))
+ err_type = ACA_ERROR_TYPE_UE;
+ else if (umc_v12_0_is_correctable_error(adev, status))
+ err_type = ACA_ERROR_TYPE_CE;
+ else
+ return 0;
+
+ ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- switch (type) {
- case ACA_ERROR_TYPE_UE:
- if (umc_v12_0_is_uncorrectable_error(adev, status)) {
- report->count[type] = 1;
- }
- break;
- case ACA_ERROR_TYPE_CE:
- if (umc_v12_0_is_correctable_error(adev, status)) {
- report->count[type] = 1;
- }
- break;
- default:
- return -EINVAL;
- }
+ amdgpu_umc_update_ecc_status(adev,
+ bank->regs[ACA_REG_IDX_STATUS],
+ bank->regs[ACA_REG_IDX_IPID],
+ bank->regs[ACA_REG_IDX_ADDR]);
- return 0;
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+
+ return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
- .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report,
+ .aca_bank_parser = umc_v12_0_aca_bank_parser,
};
const struct aca_info umc_v12_0_aca_info = {
.hwip = ACA_HWIP_TYPE_UMC,
- .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
.bank_ops = &umc_v12_0_aca_bank_ops,
};
@@ -554,6 +548,152 @@ static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common
return 0;
}
+static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint16_t hwid, mcatype;
+ struct ta_ras_query_address_input addr_in;
+ uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
+ uint64_t err_addr, hash_val = 0;
+ struct ras_ecc_err *ecc_err;
+ int count;
+ int ret;
+
+ hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
+ mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
+
+ if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ return 0;
+
+ if (!status)
+ return 0;
+
+ if (!umc_v12_0_is_deferred_error(adev, status))
+ return 0;
+
+ err_addr = REG_GET_FIELD(addr,
+ MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ dev_info(adev->dev,
+ "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
+ ipid,
+ MCA_IPID_2_SOCKET_ID(ipid),
+ MCA_IPID_2_DIE_ID(ipid),
+ MCA_IPID_2_UMC_INST(ipid),
+ MCA_IPID_2_UMC_CH(ipid),
+ err_addr);
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
+ addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
+ addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
+
+ count = umc_v12_0_convert_err_addr(adev,
+ &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
+ return 0;
+ }
+
+ ret = amdgpu_umc_build_pages_hash(adev,
+ page_pfn, count, &hash_val);
+ if (ret) {
+ dev_err(adev->dev, "Fail to build error pages hash\n");
+ return ret;
+ }
+
+ ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
+ if (!ecc_err)
+ return -ENOMEM;
+
+ ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
+ if (!ecc_err->err_pages.pfn) {
+ kfree(ecc_err);
+ return -ENOMEM;
+ }
+
+ memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
+ ecc_err->err_pages.count = count;
+
+ ecc_err->hash_index = hash_val;
+ ecc_err->status = status;
+ ecc_err->ipid = ipid;
+ ecc_err->addr = addr;
+
+ ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
+ if (ret) {
+ if (ret == -EEXIST)
+ con->umc_ecc_log.de_updated = true;
+ else
+ dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
+
+ kfree(ecc_err->err_pages.pfn);
+ kfree(ecc_err);
+ return ret;
+ }
+
+ con->umc_ecc_log.de_updated = true;
+
+ return 0;
+}
+
+static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
+ struct ras_ecc_err *ecc_err, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t i = 0;
+ int ret = 0;
+
+ if (!err_data || !ecc_err)
+ return -EINVAL;
+
+ for (i = 0; i < ecc_err->err_pages.count; i++) {
+ ret = amdgpu_umc_fill_error_record(err_data,
+ ecc_err->addr,
+ ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
+ MCA_IPID_2_UMC_CH(ecc_err->ipid),
+ MCA_IPID_2_UMC_INST(ecc_err->ipid));
+ if (ret)
+ break;
+ }
+
+ err_data->de_count++;
+
+ return ret;
+}
+
+static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ struct radix_tree_root *ecc_tree;
+ int new_detected, ret, i;
+
+ ecc_tree = &con->umc_ecc_log.de_page_tree;
+
+ mutex_lock(&con->umc_ecc_log.lock);
+ new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
+ 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
+ if (ret) {
+ dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
+ break;
+ }
+ radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&con->umc_ecc_log.lock);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -561,8 +701,8 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
- .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
- .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
+ .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
+ .update_ecc_status = umc_v12_0_update_ecc_status,
};