summaryrefslogtreecommitdiff
path: root/drivers/cxl/core
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-16 00:32:27 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-16 00:32:27 +0300
commit2e9250022e9f2c9cde3b98fd26dcad1c2a9aedf3 (patch)
treed2878cdf4393aade483d87041c1c86da0cbed064 /drivers/cxl/core
parentc405aa3ea36c1f973a9f10bbcfabc9aeeb38040c (diff)
parentd99f13843237cf9dbdc1bd873a901662b4aee16f (diff)
downloadlinux-2e9250022e9f2c9cde3b98fd26dcad1c2a9aedf3.tar.xz
Merge tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull CXL updates from Dave Jiang: - Three CXL mailbox passthrough commands are added to support the populating and clearing of vendor debug logs: - Get Log Capabilities - Get Supported Log Sub-List Commands - Clear Log - Add support of Device Phyiscal Address (DPA) to Host Physical Address (HPA) translation for CXL events of cxl_dram and cxl_general media. This allows user space to figure out which CXL region the event occured via trace event. - Connect CXL to CPER reporting. If a device is configured for firmware first, CXL event records are not sent directly to the host. Those records are reported through EFI Common Platform Error Records (CPER). Add support to route the CPER records through the CXL sub-system in order to provide DPA to HPA translation and also event decoding and tracing. This is useful for users to determine which system issues may correspond to specific hardware events. - A number of misc cleanups and fixes: - Fix for compile warning of cxl_security_ops - Add debug message for invalid interleave granularity - Enhancement to cxl-test event testing - Add dev_warn() on unsupported mixed mode decoder - Fix use of phys_to_target_node() for x86 - Use helper function for decoder enum instead of open coding - Include missing headers for cxl-event - Fix MAINTAINERS file entry - Fix cxlr_pmem memory leak - Cleanup __cxl_parse_cfmws via scope-based resource menagement - Convert cxl_pmem_region_alloc() to scope-based resource management * tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (21 commits) cxl/cper: Remove duplicated GUID defines cxl/cper: Fix non-ACPI-APEI-GHES build cxl/pci: Process CPER events acpi/ghes: Process CXL Component Events cxl/region: Convert cxl_pmem_region_alloc to scope-based resource management cxl/acpi: Cleanup __cxl_parse_cfmws() cxl/region: Fix cxlr_pmem leaks cxl/core: Add region info to cxl_general_media and cxl_dram events cxl/region: Move cxl_trace_hpa() work to the region driver cxl/region: Move cxl_dpa_to_region() work to the region driver cxl/trace: Correct DPA field masks for general_media & dram events MAINTAINERS: repair file entry in COMPUTE EXPRESS LINK cxl/cxl-event: include missing <linux/types.h> and <linux/uuid.h> cxl/hdm: Debug, use decoder name function cxl: Fix use of phys_to_target_node() for x86 cxl/hdm: dev_warn() on unsupported mixed mode decoder cxl/test: Enhance event testing cxl/hdm: Add debug message for invalid interleave granularity cxl: Fix compile warning for cxl_security_ops extern cxl/mbox: Add Clear Log mailbox command ...
Diffstat (limited to 'drivers/cxl/core')
-rw-r--r--drivers/cxl/core/core.h14
-rw-r--r--drivers/cxl/core/hdm.c13
-rw-r--r--drivers/cxl/core/mbox.c48
-rw-r--r--drivers/cxl/core/memdev.c44
-rw-r--r--drivers/cxl/core/region.c177
-rw-r--r--drivers/cxl/core/trace.c91
-rw-r--r--drivers/cxl/core/trace.h50
7 files changed, 253 insertions, 184 deletions
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index bc5a95665aa0..625394486459 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -27,7 +27,21 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
int cxl_region_init(void);
void cxl_region_exit(void);
int cxl_get_poison_by_endpoint(struct cxl_port *port);
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
+u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa);
+
#else
+static inline u64
+cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ return ULLONG_MAX;
+}
+static inline
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ return NULL;
+}
static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 7d97790b893d..784843fa2a22 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -319,8 +319,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
else if (resource_contains(&cxlds->ram_res, res))
cxled->mode = CXL_DECODER_RAM;
else {
- dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
- cxled->cxld.id, cxled->dpa_res);
+ dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
cxled->mode = CXL_DECODER_MIXED;
}
@@ -519,8 +519,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
- &avail);
+ cxl_decoder_mode_name(cxled->mode), &avail);
rc = -ENOSPC;
goto out;
}
@@ -888,8 +887,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
&cxld->interleave_granularity);
- if (rc)
+ if (rc) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
+ port->id, cxld->id, ctrl);
return rc;
+ }
dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 65185c9fa001..2626f3fff201 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -56,6 +56,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
+ CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
+ CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
@@ -331,6 +334,15 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
return false;
break;
}
+ case CXL_MBOX_OP_CLEAR_LOG: {
+ const uuid_t *uuid = (uuid_t *)payload_in;
+
+ /*
+ * Restrict the ‘Clear log’ action to only apply to
+ * Vendor debug logs.
+ */
+ return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
+ }
default:
break;
}
@@ -842,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt)
{
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
- trace_cxl_general_media(cxlmd, type, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
- trace_cxl_dram(cxlmd, type, &evt->dram);
- else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
+ if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
- else
+ return;
+ }
+ if (event_type == CXL_CPER_EVENT_GENERIC) {
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+ return;
+ }
+
+ if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
+ u64 dpa, hpa = ULLONG_MAX;
+ struct cxl_region *cxlr;
+
+ /*
+ * These trace points are annotated with HPA and region
+ * translations. Take topology mutation locks and lookup
+ * { HPA, REGION } from { DPA, MEMDEV } in the event record.
+ */
+ guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
+
+ dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK;
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ hpa = cxl_trace_hpa(cxlr, cxlmd, dpa);
+
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ trace_cxl_general_media(cxlmd, type, cxlr, hpa,
+ &evt->gen_media);
+ else if (event_type == CXL_CPER_EVENT_DRAM)
+ trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index d4e259f3a7e9..0277726afd04 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
-struct cxl_dpa_to_region_context {
- struct cxl_region *cxlr;
- u64 dpa;
-};
-
-static int __cxl_dpa_to_region(struct device *dev, void *arg)
-{
- struct cxl_dpa_to_region_context *ctx = arg;
- struct cxl_endpoint_decoder *cxled;
- u64 dpa = ctx->dpa;
-
- if (!is_endpoint_decoder(dev))
- return 0;
-
- cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
- return 0;
-
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
- return 0;
-
- dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
- dev_name(&cxled->cxld.region->dev));
-
- ctx->cxlr = cxled->cxld.region;
-
- return 1;
-}
-
-static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
-{
- struct cxl_dpa_to_region_context ctx;
- struct cxl_port *port;
-
- ctx = (struct cxl_dpa_to_region_context) {
- .dpa = dpa,
- };
- port = cxlmd->endpoint;
- if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
- device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
-
- return ctx.cxlr;
-}
-
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 5c186e0a39b9..00a9f0eef8dd 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2679,28 +2679,158 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
return rc;
}
+struct cxl_dpa_to_region_context {
+ struct cxl_region *cxlr;
+ u64 dpa;
+};
+
+static int __cxl_dpa_to_region(struct device *dev, void *arg)
+{
+ struct cxl_dpa_to_region_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ u64 dpa = ctx->dpa;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return 0;
+
+ if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ return 0;
+
+ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+ dev_name(&cxled->cxld.region->dev));
+
+ ctx->cxlr = cxled->cxld.region;
+
+ return 1;
+}
+
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dpa_to_region_context ctx;
+ struct cxl_port *port;
+
+ ctx = (struct cxl_dpa_to_region_context) {
+ .dpa = dpa,
+ };
+ port = cxlmd->endpoint;
+ if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
+ device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+
+ return ctx.cxlr;
+}
+
+static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int gran = p->interleave_granularity;
+ int ways = p->interleave_ways;
+ u64 offset;
+
+ /* Is the hpa within this region at all */
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in region\n", hpa);
+ return false;
+ }
+
+ /* Is the hpa in an expected chunk for its pos(-ition) */
+ offset = hpa - p->res->start;
+ offset = do_div(offset, gran * ways);
+ if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
+ return true;
+
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
+
+ return false;
+}
+
+static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
+ struct cxl_region_params *p = &cxlr->params;
+ int pos = cxled->pos;
+ u16 eig = 0;
+ u8 eiw = 0;
+
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
+ /*
+ * The device position in the region interleave set was removed
+ * from the offset at HPA->DPA translation. To reconstruct the
+ * HPA, place the 'pos' in the offset.
+ *
+ * The placement of 'pos' in the HPA is determined by interleave
+ * ways and granularity and is defined in the CXL Spec 3.0 Section
+ * 8.2.4.19.13 Implementation Note: Device Decode Logic
+ */
+
+ /* Remove the dpa base */
+ dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+
+ mask_upper = GENMASK_ULL(51, eig + 8);
+
+ if (eiw < 8) {
+ hpa_offset = (dpa_offset & mask_upper) << eiw;
+ hpa_offset |= pos << (eig + 8);
+ } else {
+ bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
+ bits_upper = bits_upper * 3;
+ hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
+ }
+
+ /* The lower bits remain unchanged */
+ hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
+
+ /* Apply the hpa_offset to the region base address */
+ hpa = hpa_offset + p->res->start;
+
+ if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
+ return ULLONG_MAX;
+
+ return hpa;
+}
+
+u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = NULL;
+
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxlmd == cxled_to_memdev(cxled))
+ break;
+ }
+ if (!cxled || cxlmd != cxled_to_memdev(cxled))
+ return ULLONG_MAX;
+
+ return cxl_dpa_to_hpa(dpa, cxlr, cxled);
+}
+
static struct lock_class_key cxl_pmem_region_key;
-static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_nvdimm_bridge *cxl_nvb;
- struct cxl_pmem_region *cxlr_pmem;
struct device *dev;
int i;
- down_read(&cxl_region_rwsem);
- if (p->state != CXL_CONFIG_COMMIT) {
- cxlr_pmem = ERR_PTR(-ENXIO);
- goto out;
- }
+ guard(rwsem_read)(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT)
+ return -ENXIO;
- cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
- GFP_KERNEL);
- if (!cxlr_pmem) {
- cxlr_pmem = ERR_PTR(-ENOMEM);
- goto out;
- }
+ struct cxl_pmem_region *cxlr_pmem __free(kfree) =
+ kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL);
+ if (!cxlr_pmem)
+ return -ENOMEM;
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
@@ -2718,10 +2848,8 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
*/
if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
- if (!cxl_nvb) {
- cxlr_pmem = ERR_PTR(-ENODEV);
- goto out;
- }
+ if (!cxl_nvb)
+ return -ENODEV;
cxlr->cxl_nvb = cxl_nvb;
}
m->cxlmd = cxlmd;
@@ -2732,18 +2860,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
}
dev = &cxlr_pmem->dev;
- cxlr_pmem->cxlr = cxlr;
- cxlr->cxlr_pmem = cxlr_pmem;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
device_set_pm_not_required(dev);
dev->parent = &cxlr->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_pmem_region_type;
-out:
- up_read(&cxl_region_rwsem);
+ cxlr_pmem->cxlr = cxlr;
+ cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
- return cxlr_pmem;
+ return 0;
}
static void cxl_dax_region_release(struct device *dev)
@@ -2860,9 +2986,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
struct device *dev;
int rc;
- cxlr_pmem = cxl_pmem_region_alloc(cxlr);
- if (IS_ERR(cxlr_pmem))
- return PTR_ERR(cxlr_pmem);
+ rc = cxl_pmem_region_alloc(cxlr);
+ if (rc)
+ return rc;
+ cxlr_pmem = cxlr->cxlr_pmem;
cxl_nvb = cxlr->cxl_nvb;
dev = &cxlr_pmem->dev;
diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c
index d0403dc3c8ab..7f2a9dd0d0e3 100644
--- a/drivers/cxl/core/trace.c
+++ b/drivers/cxl/core/trace.c
@@ -6,94 +6,3 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-
-static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
-{
- struct cxl_region_params *p = &cxlr->params;
- int gran = p->interleave_granularity;
- int ways = p->interleave_ways;
- u64 offset;
-
- /* Is the hpa within this region at all */
- if (hpa < p->res->start || hpa > p->res->end) {
- dev_dbg(&cxlr->dev,
- "Addr trans fail: hpa 0x%llx not in region\n", hpa);
- return false;
- }
-
- /* Is the hpa in an expected chunk for its pos(-ition) */
- offset = hpa - p->res->start;
- offset = do_div(offset, gran * ways);
- if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
- return true;
-
- dev_dbg(&cxlr->dev,
- "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
-
- return false;
-}
-
-static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
-{
- u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
- struct cxl_region_params *p = &cxlr->params;
- int pos = cxled->pos;
- u16 eig = 0;
- u8 eiw = 0;
-
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
-
- /*
- * The device position in the region interleave set was removed
- * from the offset at HPA->DPA translation. To reconstruct the
- * HPA, place the 'pos' in the offset.
- *
- * The placement of 'pos' in the HPA is determined by interleave
- * ways and granularity and is defined in the CXL Spec 3.0 Section
- * 8.2.4.19.13 Implementation Note: Device Decode Logic
- */
-
- /* Remove the dpa base */
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
-
- mask_upper = GENMASK_ULL(51, eig + 8);
-
- if (eiw < 8) {
- hpa_offset = (dpa_offset & mask_upper) << eiw;
- hpa_offset |= pos << (eig + 8);
- } else {
- bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
- bits_upper = bits_upper * 3;
- hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
- }
-
- /* The lower bits remain unchanged */
- hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
-
- /* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start;
-
- if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
- return ULLONG_MAX;
-
- return hpa;
-}
-
-u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd,
- u64 dpa)
-{
- struct cxl_region_params *p = &cxlr->params;
- struct cxl_endpoint_decoder *cxled = NULL;
-
- for (int i = 0; i < p->nr_targets; i++) {
- cxled = p->targets[i];
- if (cxlmd == cxled_to_memdev(cxled))
- break;
- }
- if (!cxled || cxlmd != cxled_to_memdev(cxled))
- return ULLONG_MAX;
-
- return cxl_dpa_to_hpa(dpa, cxlr, cxled);
-}
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index e5f13260fc52..07a0394b1d99 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event,
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
-#define CXL_DPA_FLAGS_MASK 0x3F
-#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
+#define CXL_DPA_FLAGS_MASK GENMASK(1, 0)
+#define CXL_DPA_MASK GENMASK_ULL(63, 6)
#define CXL_DPA_VOLATILE BIT(0)
#define CXL_DPA_NOT_REPAIRABLE BIT(1)
@@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event,
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_gen_media *rec),
+ struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media,
__field(u8, channel)
__field(u32, device)
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
- __field(u16, validity_flags)
/* Following are out of order to pack trace record */
+ __field(u64, hpa)
+ __field_struct(uuid_t, region_uuid)
+ __field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
@@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media,
memcpy(__entry->comp_id, &rec->component_id,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->validity_flags);
+ __entry->hpa = hpa;
+ if (cxlr) {
+ __assign_str(region_name, dev_name(&cxlr->dev));
+ uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
+ } else {
+ __assign_str(region_name, "");
+ uuid_copy(&__entry->region_uuid, &uuid_null);
+ }
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
"descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s'",
+ "device=%x comp_id=%s validity_flags='%s' " \
+ "hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
- show_valid_flags(__entry->validity_flags)
+ show_valid_flags(__entry->validity_flags),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
@@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media,
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_dram *rec),
+ struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram,
__field(u32, nibble_mask)
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
+ __field(u64, hpa)
+ __field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ __string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
@@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram,
__entry->column = get_unaligned_le16(rec->column);
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
+ __entry->hpa = hpa;
+ if (cxlr) {
+ __assign_str(region_name, dev_name(&cxlr->dev));
+ uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
+ } else {
+ __assign_str(region_name, "");
+ uuid_copy(&__entry->region_uuid, &uuid_null);
+ }
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
- "validity_flags='%s'",
+ "validity_flags='%s' " \
+ "hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
@@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
- show_dram_valid_flags(__entry->validity_flags)
+ show_dram_valid_flags(__entry->validity_flags),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
@@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module,
#define cxl_poison_overflow(flags, time) \
(flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0)
-u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
-
TRACE_EVENT(cxl_poison,
TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,