summaryrefslogtreecommitdiff
path: root/drivers/cxl/core/hdm.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2022-05-24 22:04:58 +0300
committerDan Williams <dan.j.williams@intel.com>2022-07-22 03:19:23 +0300
commit0c33b3935265cd5aafa18904363bab0c545adeee (patch)
tree71c79965e07a1ccec583a3e52b4c66067ce2a539 /drivers/cxl/core/hdm.c
parent2c8669033f16f5d791e10a5bdd42e39c7380da57 (diff)
downloadlinux-0c33b3935265cd5aafa18904363bab0c545adeee.tar.xz
cxl/hdm: Track next decoder to allocate
The CXL specification enforces that endpoint decoders are committed in hw instance id order. In preparation for adding dynamic DPA allocation, record the hw instance id in endpoint decoders, and enforce allocations to occur in hw instance id order. Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/165784328827.1758207.9627538529944559954.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl/core/hdm.c')
-rw-r--r--drivers/cxl/core/hdm.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index c2cff5783fda..14354f4cd92e 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -160,6 +160,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct resource *res = cxled->dpa_res;
resource_size_t skip_start;
@@ -173,6 +174,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
__release_region(&cxlds->dpa_res, skip_start, cxled->skip);
cxled->skip = 0;
cxled->dpa_res = NULL;
+ port->hdm_end--;
}
static void cxl_dpa_release(void *cxled)
@@ -203,6 +205,18 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
return -EBUSY;
}
+ if (port->hdm_end + 1 != cxled->cxld.id) {
+ /*
+ * Assumes alloc and commit order is always in hardware instance
+ * order per expectations from 8.2.5.12.20 Committing Decoder
+ * Programming that enforce decoder[m] committed before
+ * decoder[m+1] commit start.
+ */
+ dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
+ cxled->cxld.id, port->id, port->hdm_end + 1);
+ return -EBUSY;
+ }
+
if (skipped) {
res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
dev_name(&cxled->cxld.dev), 0);
@@ -235,6 +249,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
cxled->cxld.id, cxled->dpa_res);
cxled->mode = CXL_DECODER_MIXED;
}
+ port->hdm_end++;
return 0;
}