summaryrefslogtreecommitdiff
path: root/drivers/cxl/core/hdm.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2022-02-01 23:24:30 +0300
committerDan Williams <dan.j.williams@intel.com>2022-02-09 09:57:30 +0300
commitd17d0540a0dbf109210f7b57a37571e2978da0fa (patch)
treed7f969f8c73c1134c480484ebb13a72a2950769e /drivers/cxl/core/hdm.c
parent98d2d3a264543680281fd8a4e6ae490ca26b4f85 (diff)
downloadlinux-d17d0540a0dbf109210f7b57a37571e2978da0fa.tar.xz
cxl/core/hdm: Add CXL standard decoder enumeration to the core
Unlike the decoder enumeration for "root decoders" described by platform firmware, standard decoders can be enumerated from the component registers space once the base address has been identified (via PCI, ACPI, or another mechanism). Add common infrastructure for HDM (Host-managed-Device-Memory) Decoder enumeration and share it between host-bridge, upstream switch port, and cxl_test defined decoders. The locking model for switch level decoders is to hold the port lock over the enumeration. This facilitates moving the dport and decoder enumeration to a 'port' driver. For now, the only enumerator of decoder resources is the cxl_acpi root driver. Co-developed-by: Ben Widawsky <ben.widawsky@intel.com> Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/164374688404.395335.9239248252443123526.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl/core/hdm.c')
-rw-r--r--drivers/cxl/core/hdm.c248
1 files changed, 248 insertions, 0 deletions
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
new file mode 100644
index 000000000000..84f4ed288a88
--- /dev/null
+++ b/drivers/cxl/core/hdm.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include "cxlmem.h"
+#include "core.h"
+
+/**
+ * DOC: cxl core hdm
+ *
+ * Compute Express Link Host Managed Device Memory, starting with the
+ * CXL 2.0 specification, is managed by an array of HDM Decoder register
+ * instances per CXL port and per CXL endpoint. Define common helpers
+ * for enumerating these registers and capabilities.
+ */
+
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
+ int *target_map)
+{
+ int rc;
+
+ rc = cxl_decoder_add_locked(cxld, target_map);
+ if (rc) {
+ put_device(&cxld->dev);
+ dev_err(&port->dev, "Failed to add decoder\n");
+ return rc;
+ }
+
+ rc = cxl_decoder_autoremove(&port->dev, cxld);
+ if (rc)
+ return rc;
+
+ dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+
+ return 0;
+}
+
+/*
+ * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
+ * single ported host-bridges need not publish a decoder capability when a
+ * passthrough decode can be assumed, i.e. all transactions that the uport sees
+ * are claimed and passed to the single dport. Disable the range until the first
+ * CXL region is enumerated / activated.
+ */
+int devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
+{
+ struct cxl_decoder *cxld;
+ struct cxl_dport *dport;
+ int single_port_map[1];
+
+ cxld = cxl_switch_decoder_alloc(port, 1);
+ if (IS_ERR(cxld))
+ return PTR_ERR(cxld);
+
+ device_lock_assert(&port->dev);
+
+ dport = list_first_entry(&port->dports, typeof(*dport), list);
+ single_port_map[0] = dport->port_id;
+
+ return add_hdm_decoder(port, cxld, single_port_map);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
+
+static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
+{
+ u32 hdm_cap;
+
+ hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
+ cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
+ cxlhdm->target_count =
+ FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
+ cxlhdm->interleave_mask |= GENMASK(11, 8);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
+ cxlhdm->interleave_mask |= GENMASK(14, 12);
+}
+
+static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
+ void __iomem *crb)
+{
+ struct cxl_component_reg_map map;
+
+ cxl_probe_component_regs(&port->dev, crb, &map);
+ if (!map.hdm_decoder.valid) {
+ dev_err(&port->dev, "HDM decoder registers invalid\n");
+ return IOMEM_ERR_PTR(-ENXIO);
+ }
+
+ return crb + map.hdm_decoder.offset;
+}
+
+/**
+ * devm_cxl_setup_hdm - map HDM decoder component registers
+ * @host: devm context for allocations
+ * @port: cxl_port to map
+ */
+struct cxl_hdm *devm_cxl_setup_hdm(struct device *host, struct cxl_port *port)
+{
+ struct device *dev = &port->dev;
+ void __iomem *crb, *hdm;
+ struct cxl_hdm *cxlhdm;
+
+ cxlhdm = devm_kzalloc(host, sizeof(*cxlhdm), GFP_KERNEL);
+ if (!cxlhdm)
+ return ERR_PTR(-ENOMEM);
+
+ cxlhdm->port = port;
+ crb = devm_cxl_iomap_block(host, port->component_reg_phys,
+ CXL_COMPONENT_REG_BLOCK_SIZE);
+ if (!crb) {
+ dev_err(dev, "No component registers mapped\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ hdm = map_hdm_decoder_regs(port, crb);
+ if (IS_ERR(hdm))
+ return ERR_CAST(hdm);
+ cxlhdm->regs.hdm_decoder = hdm;
+
+ parse_hdm_decoder_caps(cxlhdm);
+ if (cxlhdm->decoder_count == 0) {
+ dev_err(dev, "Spec violation. Caps invalid\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ return cxlhdm;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
+
+static int to_interleave_granularity(u32 ctrl)
+{
+ int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
+
+ return 256 << val;
+}
+
+static int to_interleave_ways(u32 ctrl)
+{
+ int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
+
+ switch (val) {
+ case 0 ... 4:
+ return 1 << val;
+ case 8 ... 10:
+ return 3 << (val - 8);
+ default:
+ return 0;
+ }
+}
+
+static void init_hdm_decoder(struct cxl_decoder *cxld, int *target_map,
+ void __iomem *hdm, int which)
+{
+ u64 size, base;
+ u32 ctrl;
+ int i;
+ union {
+ u64 value;
+ unsigned char target_id[8];
+ } target_list;
+
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
+ base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
+ size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+
+ if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
+ size = 0;
+
+ cxld->decoder_range = (struct range) {
+ .start = base,
+ .end = base + size - 1,
+ };
+
+ /* switch decoders are always enabled if committed */
+ if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+ if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
+ cxld->flags |= CXL_DECODER_F_LOCK;
+ }
+ cxld->interleave_ways = to_interleave_ways(ctrl);
+ cxld->interleave_granularity = to_interleave_granularity(ctrl);
+
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ else
+ cxld->target_type = CXL_DECODER_ACCELERATOR;
+
+ target_list.value =
+ ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ for (i = 0; i < cxld->interleave_ways; i++)
+ target_map[i] = target_list.target_id[i];
+}
+
+/**
+ * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
+ * @host: devm allocation context
+ * @cxlhdm: Structure to populate with HDM capabilities
+ */
+int devm_cxl_enumerate_decoders(struct device *host, struct cxl_hdm *cxlhdm)
+{
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ struct cxl_port *port = cxlhdm->port;
+ int i, committed;
+ u32 ctrl;
+
+ /*
+ * Since the register resource was recently claimed via request_region()
+ * be careful about trusting the "not-committed" status until the commit
+ * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
+ * 8.2.5.12.20), but double it to be tolerant of any clock skew between
+ * host and target.
+ */
+ for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
+ if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
+ committed++;
+ }
+
+ /* ensure that future checks of committed can be trusted */
+ if (committed != cxlhdm->decoder_count)
+ msleep(20);
+
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
+ int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
+ int rc, target_count = cxlhdm->target_count;
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxld)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxld);
+ }
+
+ init_hdm_decoder(cxld, target_map, cxlhdm->regs.hdm_decoder, i);
+ rc = add_hdm_decoder(port, cxld, target_map);
+ if (rc) {
+ dev_warn(&port->dev,
+ "Failed to add decoder to port\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);