summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2024-09-10 14:30:59 +0300
committerTakashi Iwai <tiwai@suse.de>2024-09-11 09:31:19 +0300
commit9408ace468c317d60159c011b863b2982dae5e05 (patch)
tree4bbf843ebe0ac2ce3788bd86d20fcab93dd7014b
parent0ccbc99e05ec80a7a2348e12132f8341f5b890aa (diff)
downloadlinux-9408ace468c317d60159c011b863b2982dae5e05.tar.xz
ALSA: memalloc: Drop Xen PV workaround again
Since recently in the commit e469e2045f1b ("ALSA: memalloc: Let IOMMU handle S/G primarily"), the SG buffer allocation code was modified to use the standard DMA code primarily and the fallback is applied only limitedly. This made the Xen PV specific workarounds we took in the commit 53466ebdec61 ("ALSA: memalloc: Workaround for Xen PV") rather superfluous. It was a hackish workaround for the regression at that time, and it seems that it's causing another issues (reportedly memory corruptions). So it's better to clean it up, after all. Link: https://lore.kernel.org/20240906184209.25423-1-ariadne@ariadne.space Cc: Ariadne Conill <ariadne@ariadne.space> Link: https://patch.msgid.link/20240910113100.32542-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
-rw-r--r--sound/core/memalloc.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 7237d77713be..b21dba4b374a 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -667,7 +667,6 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
#ifdef CONFIG_SND_DMA_SGBUF
/* Fallback SG-buffer allocations for x86 */
struct snd_dma_sg_fallback {
- bool use_dma_alloc_coherent;
size_t count;
struct page **pages;
/* DMA address array; the first page contains #pages in ~PAGE_MASK */
@@ -687,13 +686,8 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
size = sgbuf->addrs[i] & ~PAGE_MASK;
if (WARN_ON(!size))
break;
- if (sgbuf->use_dma_alloc_coherent)
- dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
- page_address(sgbuf->pages[i]),
- sgbuf->addrs[i] & PAGE_MASK);
- else
- do_free_pages(page_address(sgbuf->pages[i]),
- size << PAGE_SHIFT, false);
+ do_free_pages(page_address(sgbuf->pages[i]),
+ size << PAGE_SHIFT, false);
i += size;
}
}
@@ -715,7 +709,6 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
return NULL;
- sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
size = PAGE_ALIGN(size);
sgbuf->count = size >> PAGE_SHIFT;
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
@@ -728,10 +721,7 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
while (size > 0) {
chunk = min(size, chunk);
- if (sgbuf->use_dma_alloc_coherent)
- p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
- else
- p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
+ p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
if (!p) {
if (chunk <= PAGE_SIZE)
goto error;
@@ -803,9 +793,6 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
int type = dmab->dev.type;
void *p;
- if (cpu_feature_enabled(X86_FEATURE_XENPV))
- return snd_dma_sg_fallback_alloc(dmab, size);
-
/* try the standard DMA API allocation at first */
if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;