summaryrefslogtreecommitdiff
path: root/drivers/ras/amd/atl/dehash.c
diff options
context:
space:
mode:
authorMuralidhara M K <muralidhara.mk@amd.com>2024-01-28 18:59:50 +0300
committerBorislav Petkov (AMD) <bp@alien8.de>2024-01-29 12:22:41 +0300
commit453f0ae797328e675840466c80e5b268d7feb9ba (patch)
tree4cf5c7f7f23d7252ecf12d7d3932f970733201f9 /drivers/ras/amd/atl/dehash.c
parent1289c431641f8beacc47db506210154dcea2492a (diff)
downloadlinux-453f0ae797328e675840466c80e5b268d7feb9ba.tar.xz
RAS/AMD/ATL: Add MI300 support
AMD MI300 systems include on-die HBM3 memory and a unique topology. And they fall under Data Fabric version 4.5 in overall design. Generally, topology information (IDs, etc.) is gathered from Data Fabric registers. However, the unique topology for MI300 means that some topology information is fixed in hardware and follows arbitrary mappings. Furthermore, not all hardware instances are software-visible, so register accesses must be adjusted. Recognize and add helper functions for the new MI300 interleave modes. Add lookup tables for fixed values where appropriate. Adjust how Die and Node IDs are found and used. Also, fix some register bitmasks that were mislabeled. Signed-off-by: Muralidhara M K <muralidhara.mk@amd.com> Co-developed-by: Yazen Ghannam <yazen.ghannam@amd.com> Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20240128155950.1434067-1-yazen.ghannam@amd.com
Diffstat (limited to 'drivers/ras/amd/atl/dehash.c')
-rw-r--r--drivers/ras/amd/atl/dehash.c95
1 files changed, 94 insertions, 1 deletions
diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c
index 6f414926e6fe..4ea46262c4f5 100644
--- a/drivers/ras/amd/atl/dehash.c
+++ b/drivers/ras/amd/atl/dehash.c
@@ -253,7 +253,7 @@ static int df4p5_dehash_addr(struct addr_ctx *ctx)
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
- hash_ctl_1T = FIELD_GET(DF4_HASH_CTL_1T, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
/*
* Generate a unique address to determine which bits
@@ -343,6 +343,94 @@ static int df4p5_dehash_addr(struct addr_ctx *ctx)
return 0;
}
+/*
+ * MI300 hash bits
+ * 4K 64K 2M 1G 1T 1T
+ * COH_ST_Select[0] = XOR of addr{8, 12, 15, 22, 29, 36, 43}
+ * COH_ST_Select[1] = XOR of addr{9, 13, 16, 23, 30, 37, 44}
+ * COH_ST_Select[2] = XOR of addr{10, 14, 17, 24, 31, 38, 45}
+ * COH_ST_Select[3] = XOR of addr{11, 18, 25, 32, 39, 46}
+ * COH_ST_Select[4] = XOR of addr{14, 19, 26, 33, 40, 47} aka Stack
+ * DieID[0] = XOR of addr{12, 20, 27, 34, 41 }
+ * DieID[1] = XOR of addr{13, 21, 28, 35, 42 }
+ */
+static int mi300_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_4k, hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ bool hashed_bit, intlv_bit, test_bit;
+ u8 num_intlv_bits, base_bit, i;
+
+ if (!map_bits_valid(ctx, 8, 8, 4, 1))
+ return -EINVAL;
+
+ hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ /* Channel bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 8 + i;
+
+ /* COH_ST_Select[4] jumps to a base bit of 14. */
+ if (i == 4)
+ base_bit = 14;
+
+ intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr;
+
+ hashed_bit = intlv_bit;
+
+ /* 4k hash bit only applies to the first 3 bits. */
+ if (i <= 2) {
+ test_bit = BIT_ULL(12 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_4k;
+ }
+
+ /* Use temporary 'test_bit' value to avoid Sparse warnings. */
+ test_bit = BIT_ULL(15 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_64k;
+ test_bit = BIT_ULL(22 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_2M;
+ test_bit = BIT_ULL(29 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1G;
+ test_bit = BIT_ULL(36 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+ test_bit = BIT_ULL(43 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ /* Die bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_dies);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 12 + i;
+
+ intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr;
+
+ hashed_bit = intlv_bit;
+
+ test_bit = BIT_ULL(20 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_64k;
+ test_bit = BIT_ULL(27 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_2M;
+ test_bit = BIT_ULL(34 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1G;
+ test_bit = BIT_ULL(41 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ return 0;
+}
+
int dehash_address(struct addr_ctx *ctx)
{
switch (ctx->map.intlv_mode) {
@@ -400,6 +488,11 @@ int dehash_address(struct addr_ctx *ctx)
case DF4p5_NPS1_16CHAN_2K_HASH:
return df4p5_dehash_addr(ctx);
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return mi300_dehash_addr(ctx);
+
default:
atl_debug_on_bad_intlv_mode(ctx);
return -EINVAL;