summaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorEric Farman <farman@linux.ibm.com>2022-10-21 17:59:13 +0300
committerHeiko Carstens <hca@linux.ibm.com>2023-01-09 16:34:08 +0300
commit62a97a56a64c97c3865e55d702babc22f3b2ea6a (patch)
tree6ced03ad5e881ce28234ca5105c6907336ef81a4 /drivers/s390/cio
parent4b946d65b8aa0071dbbc54b35b8502fa99c1ee22 (diff)
downloadlinux-62a97a56a64c97c3865e55d702babc22f3b2ea6a.tar.xz
vfio/ccw: pass page count to page_array struct
The allocation of our page_array struct calculates the number of 4K pages that would be needed to hold a certain number of bytes. But, since the number of pages that will be pinned is also calculated by the length of the IDAL, this logic is unnecessary. Let's pass that information in directly, and avoid the math within the allocator. Also, let's make this two allocations instead of one, to make it apparent what's happening within here. Signed-off-by: Eric Farman <farman@linux.ibm.com> Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 99332c6f6010..b1436736b7b6 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -43,7 +43,7 @@ struct ccwchain {
* page_array_alloc() - alloc memory for page array
* @pa: page_array on which to perform the operation
* @iova: target guest physical address
- * @len: number of bytes that should be pinned from @iova
+ * @len: number of pages that should be pinned from @iova
*
* Attempt to allocate memory for page array.
*
@@ -63,18 +63,20 @@ static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (!pa->pa_nr)
+ if (len == 0)
return -EINVAL;
- pa->pa_iova = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova)) {
- pa->pa_nr = 0;
+ pa->pa_nr = len;
+
+ pa->pa_iova = kcalloc(len, sizeof(*pa->pa_iova), GFP_KERNEL);
+ if (!pa->pa_iova)
+ return -ENOMEM;
+
+ pa->pa_page = kcalloc(len, sizeof(*pa->pa_page), GFP_KERNEL);
+ if (!pa->pa_page) {
+ kfree(pa->pa_iova);
return -ENOMEM;
}
- pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
pa->pa_iova[0] = iova;
pa->pa_page[0] = NULL;
@@ -167,6 +169,7 @@ err_out:
static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
{
page_array_unpin(pa, vdev, pa->pa_nr);
+ kfree(pa->pa_page);
kfree(pa->pa_iova);
}
@@ -545,7 +548,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
- ret = page_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, iova, idaw_nr);
if (ret < 0)
goto out_free_idaws;