summaryrefslogtreecommitdiff
path: root/drivers/accel/habanalabs/common/hw_queue.c
diff options
context:
space:
mode:
authorFarah Kassabri <fkassabri@habana.ai>2023-11-02 12:53:29 +0300
committerOded Gabbay <ogabbay@kernel.org>2024-02-26 10:30:40 +0300
commitf728c17fc97aea7a33151d9ba64106291c62bb02 (patch)
treeef840d707bbfa0b3c8135eb6efda8e890fd2cd75 /drivers/accel/habanalabs/common/hw_queue.c
parent246d8b6cfb80a31e3cc287e3c1db6a5515b7c20a (diff)
downloadlinux-f728c17fc97aea7a33151d9ba64106291c62bb02.tar.xz
accel/habanalabs/gaudi2: move HMMU page tables to device memory
Currently the HMMU page tables reside in the host memory, which will cause host access from the device for every page walk. This can affect PCIe bandwidth in certain scenarios. To prevent that problem, HMMU page tables will be moved to the device memory so the miss transaction will read the hops from there instead of going to the host. Signed-off-by: Farah Kassabri <fkassabri@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Diffstat (limited to 'drivers/accel/habanalabs/common/hw_queue.c')
-rw-r--r--drivers/accel/habanalabs/common/hw_queue.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/drivers/accel/habanalabs/common/hw_queue.c b/drivers/accel/habanalabs/common/hw_queue.c
index d0087c0ec48c..3d04a7507cce 100644
--- a/drivers/accel/habanalabs/common/hw_queue.c
+++ b/drivers/accel/habanalabs/common/hw_queue.c
@@ -84,6 +84,8 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
+ u64 addr;
+ int i;
bd = q->kernel_address;
bd += hl_pi_2_offset(q->pi);
@@ -91,7 +93,16 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd->len = cpu_to_le32(len);
bd->ptr = cpu_to_le64(ptr);
+ if (q->dram_bd)
+ for (i = 0 ; i < 2 ; i++) {
+ addr = q->pq_dram_address +
+ ((hl_pi_2_offset(q->pi) * sizeof(struct hl_bd)) + (i * sizeof(u64)));
+ hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM, addr,
+ (u64 *)(bd) + i, DEBUGFS_WRITE64);
+ }
+
q->pi = hl_queue_inc_ptr(q->pi);
+
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
@@ -1087,12 +1098,18 @@ int hl_hw_queues_create(struct hl_device *hdev)
q->supports_sync_stream =
asic->hw_queues_props[i].supports_sync_stream;
q->collective_mode = asic->hw_queues_props[i].collective_mode;
+ q->dram_bd = asic->hw_queues_props[i].dram_bd;
+
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
goto release_queues;
}
+
+ /* Set DRAM PQ address for the queue if it should be at DRAM */
+ if (q->dram_bd)
+ q->pq_dram_address = asic->hw_queues_props[i].q_dram_bd_address;
}
return 0;