summaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-07-20 16:38:26 +0300
committerKonrad Rzeszutek Wilk <konrad@kernel.org>2021-07-24 03:18:02 +0300
commitad6c00283163cb7ad52cdf97d2850547446f7d98 (patch)
treecd71502ab4a48cc3887907e07276359d94a01b71 /kernel/dma
parent1efd3fc0ccf52e1aa5f0bf5b0d82847180d20951 (diff)
downloadlinux-ad6c00283163cb7ad52cdf97d2850547446f7d98.tar.xz
swiotlb: Free tbl memory in swiotlb_exit()
Although swiotlb_exit() frees the 'slots' metadata array referenced by 'io_tlb_default_mem', it leaves the underlying buffer pages allocated despite no longer being usable. Extend swiotlb_exit() to free the buffer pages as well as the slots array. Cc: Claire Chang <tientzu@chromium.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Nathan Chancellor <nathan@kernel.org> Tested-by: Claire Chang <tientzu@chromium.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad@kernel.org>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/swiotlb.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index b3c793ed9e64..87c40517e822 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -328,18 +328,27 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
void __init swiotlb_exit(void)
{
- size_t size;
struct io_tlb_mem *mem = &io_tlb_default_mem;
+ unsigned long tbl_vaddr;
+ size_t tbl_size, slots_size;
if (!mem->nslabs)
return;
pr_info("tearing down default memory pool\n");
- size = array_size(sizeof(*mem->slots), mem->nslabs);
- if (mem->late_alloc)
- free_pages((unsigned long)mem->slots, get_order(size));
- else
- memblock_free_late(__pa(mem->slots), PAGE_ALIGN(size));
+ tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
+ tbl_size = PAGE_ALIGN(mem->end - mem->start);
+ slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
+
+ set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
+ if (mem->late_alloc) {
+ free_pages(tbl_vaddr, get_order(tbl_size));
+ free_pages((unsigned long)mem->slots, get_order(slots_size));
+ } else {
+ memblock_free_late(mem->start, tbl_size);
+ memblock_free_late(__pa(mem->slots), slots_size);
+ }
+
memset(mem, 0, sizeof(*mem));
}