summaryrefslogtreecommitdiff
path: root/drivers/cxl/core
diff options
context:
space:
mode:
authorIra Weiny <ira.weiny@intel.com>2023-12-21 03:17:36 +0300
committerDan Williams <dan.j.williams@intel.com>2024-01-10 02:41:23 +0300
commitdc97f6344f205b0dfa144e1b3e16d6dc05383d57 (patch)
tree1d8e0ea26554f22f8385a329096c1d17c4dbaa84 /drivers/cxl/core
parentced085ef369af7a2b6da962ec2fbd01339f60693 (diff)
downloadlinux-dc97f6344f205b0dfa144e1b3e16d6dc05383d57.tar.xz
cxl/pci: Register for and process CPER events
If the firmware has configured CXL event support to be firmware first the OS can process those events through CPER records. The CXL layer has unique DPA to HPA knowledge and standard event trace parsing in place. CPER records contain Bus, Device, Function information which can be used to identify the PCI device which is sending the event. Change the PCI driver registration to include registration of a CXL CPER callback to process events through the trace subsystem. Use new scoped based management to simplify the handling of the PCI device object. Tested-by: Smita-Koralahalli <Smita.KoralahalliChannabasappa@amd.com> Reviewed-by: Smita-Koralahalli <Smita.KoralahalliChannabasappa@amd.com> Link: https://lore.kernel.org/r/20231220-cxl-cper-v5-9-1bb8a4ca2c7a@intel.com Signed-off-by: Ira Weiny <ira.weiny@intel.com> [djbw: use new pci_dev guard, flip init order] Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Acked-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl/core')
-rw-r--r--drivers/cxl/core/mbox.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 06957696247b..23021920aace 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -836,21 +836,37 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
-static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
- enum cxl_event_log_type type,
- struct cxl_event_record_raw *record)
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ enum cxl_event_type event_type,
+ const uuid_t *uuid, union cxl_event *evt)
{
- union cxl_event *evt = &record->event;
- uuid_t *id = &record->id;
-
- if (uuid_equal(id, &CXL_EVENT_GEN_MEDIA_UUID))
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, &evt->gen_media);
- else if (uuid_equal(id, &CXL_EVENT_DRAM_UUID))
+ else if (event_type == CXL_CPER_EVENT_DRAM)
trace_cxl_dram(cxlmd, type, &evt->dram);
- else if (uuid_equal(id, &CXL_EVENT_MEM_MODULE_UUID))
+ else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
else
- trace_cxl_generic_event(cxlmd, type, id, &evt->generic);
+ trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
+
+static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ struct cxl_event_record_raw *record)
+{
+ enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
+ const uuid_t *uuid = &record->id;
+
+ if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
+ ev_type = CXL_CPER_EVENT_GEN_MEDIA;
+ else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
+ ev_type = CXL_CPER_EVENT_DRAM;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_MODULE;
+
+ cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
static int cxl_clear_event_record(struct cxl_memdev_state *mds,
@@ -961,8 +977,8 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
break;
for (i = 0; i < nr_rec; i++)
- cxl_event_trace_record(cxlmd, type,
- &payload->records[i]);
+ __cxl_event_trace_record(cxlmd, type,
+ &payload->records[i]);
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
trace_cxl_overflow(cxlmd, type, payload);