summaryrefslogtreecommitdiff
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
authorLeonid Ravich <Leonid.Ravich@emc.com>2020-04-16 20:06:21 +0300
committerVinod Koul <vkoul@kernel.org>2020-04-17 14:58:39 +0300
commitbd2bf302eef21aafa6da2cf829b87a9e33150658 (patch)
tree1887924dc5c14c2c3e5eec3d3f8289474e184c99 /drivers/dma/ioat
parent2fea2906b5cbeffe49911e4735451af62fc121b4 (diff)
downloadlinux-bd2bf302eef21aafa6da2cf829b87a9e33150658.tar.xz
dmaengine: ioat: fixing chunk sizing macros dependency
changing macros which assumption is chunk size of 2M, which can be other size prepare for changing allocation chunk size. Acked-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com> Link: https://lore.kernel.org/r/20200416170628.16196-1-leonid.ravich@dell.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma.c14
-rw-r--r--drivers/dma/ioat/dma.h10
-rw-r--r--drivers/dma/ioat/init.c2
3 files changed, 15 insertions, 11 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 18c011e57592..1e0e6c1d5533 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -332,8 +332,8 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
u8 *pos;
off_t offs;
- chunk = idx / IOAT_DESCS_PER_2M;
- idx &= (IOAT_DESCS_PER_2M - 1);
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
offs = idx * IOAT_DESC_SZ;
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
phys = ioat_chan->descs[chunk].hw + offs;
@@ -370,7 +370,8 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
if (!ring)
return NULL;
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
+ ioat_chan->desc_chunks = chunks;
for (i = 0; i < chunks; i++) {
struct ioat_descs *descs = &ioat_chan->descs[i];
@@ -382,8 +383,9 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
for (idx = 0; idx < i; idx++) {
descs = &ioat_chan->descs[idx];
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
- descs->virt, descs->hw);
+ dma_free_coherent(to_dev(ioat_chan),
+ IOAT_CHUNK_SIZE,
+ descs->virt, descs->hw);
descs->virt = NULL;
descs->hw = 0;
}
@@ -404,7 +406,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
dma_free_coherent(to_dev(ioat_chan),
- SZ_2M,
+ IOAT_CHUNK_SIZE,
ioat_chan->descs[idx].virt,
ioat_chan->descs[idx].hw);
ioat_chan->descs[idx].virt = NULL;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8e8e0b9693c..5216c6b7c99e 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -81,6 +81,11 @@ struct ioatdma_device {
u32 msixpba;
};
+#define IOAT_MAX_ORDER 16
+#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
+#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
+
struct ioat_descs {
void *virt;
dma_addr_t hw;
@@ -128,7 +133,7 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
- struct ioat_descs descs[2];
+ struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
@@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-#define IOAT_MAX_ORDER 16
-#define IOAT_MAX_DESCS 65536
-#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 60e9afbb896c..58d13564f88b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
}
for (i = 0; i < ioat_chan->desc_chunks; i++) {
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;