summaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorEric Farman <farman@linux.ibm.com>2022-10-21 18:02:48 +0300
committerHeiko Carstens <hca@linux.ibm.com>2023-01-09 16:34:08 +0300
commit61783394f4eb3a8a0944005ea2761c011788a9c3 (patch)
treea96479ffcfb2be7092010909f69ae4b3b945bc2a /drivers/s390/cio
parent62a97a56a64c97c3865e55d702babc22f3b2ea6a (diff)
downloadlinux-61783394f4eb3a8a0944005ea2761c011788a9c3.tar.xz
vfio/ccw: populate page_array struct inline
There are two possible ways the list of addresses that get passed to vfio are calculated. One is from a guest IDAL, which would be an array of (probably) non-contiguous addresses. The other is built from contiguous pages that follow the starting address provided by ccw->cda. page_array_alloc() attempts to simplify things by pre-populating this array from the starting address, but that's not needed for a CCW with an IDAL anyway so doesn't need to be in the allocator. Move it to the caller in the non-IDAL case, since it will be overwritten when reading the guest IDAL. Remove the initialization of the pa_page output pointers, since it won't be explicitly needed for either case. Signed-off-by: Eric Farman <farman@linux.ibm.com> Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index b1436736b7b6..f448aa93007f 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -42,7 +42,6 @@ struct ccwchain {
/*
* page_array_alloc() - alloc memory for page array
* @pa: page_array on which to perform the operation
- * @iova: target guest physical address
* @len: number of pages that should be pinned from @iova
*
* Attempt to allocate memory for page array.
@@ -56,10 +55,8 @@ struct ccwchain {
* -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, unsigned int len)
{
- int i;
-
if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
@@ -78,13 +75,6 @@ static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
return -ENOMEM;
}
- pa->pa_iova[0] = iova;
- pa->pa_page[0] = NULL;
- for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
- pa->pa_page[i] = NULL;
- }
-
return 0;
}
@@ -548,7 +538,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
- ret = page_array_alloc(pa, iova, idaw_nr);
+ ret = page_array_alloc(pa, idaw_nr);
if (ret < 0)
goto out_free_idaws;
@@ -565,11 +555,9 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
for (i = 0; i < idaw_nr; i++)
pa->pa_iova[i] = idaws[i];
} else {
- /*
- * No action is required here; the iova addresses in page_array
- * were initialized sequentially in page_array_alloc() beginning
- * with the contents of ccw->cda.
- */
+ pa->pa_iova[0] = iova;
+ for (i = 1; i < pa->pa_nr; i++)
+ pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
}
if (ccw_does_data_transfer(ccw)) {