From 035e3a4321f73c352b6408ec2153fa5bc3feb459 Mon Sep 17 00:00:00 2001 From: Oleksandr Tyshchenko Date: Tue, 25 Oct 2022 19:20:03 +0300 Subject: xen/virtio: Optimize the setup of "xen-grant-dma" devices This is needed to avoid having to parse the same device-tree several times for a given device. For this to work we need to install the xen_virtio_restricted_mem_acc callback in Arm's xen_guest_init() which is same callback as x86's PV and HVM modes already use and remove the manual assignment in xen_setup_dma_ops(). Also we need to split the code to initialize backend_domid into a separate function. Prior to current patch we parsed the device-tree three times: 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device() 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid() 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device() With current patch we parse the device-tree only once in xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid() Other benefits are: - Not diverge from x86 when setting up Xen grant DMA ops - Drop several global functions Signed-off-by: Oleksandr Tyshchenko Reviewed-by: Xenia Ragiadakou Reviewed-by: Stefano Stabellini Link: https://lore.kernel.org/r/20221025162004.8501-2-olekstysh@gmail.com Signed-off-by: Juergen Gross --- drivers/xen/grant-dma-ops.c | 77 +++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 49 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index daa525df7bdc..1e797a043980 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = { .dma_supported = xen_grant_dma_supported, }; -static bool xen_is_dt_grant_dma_device(struct device *dev) -{ - struct device_node *iommu_np; - bool has_iommu; - - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); - has_iommu = iommu_np && - of_device_is_compatible(iommu_np, "xen,grant-dma"); - of_node_put(iommu_np); - - return has_iommu; -} - -bool xen_is_grant_dma_device(struct device *dev) -{ - /* XXX Handle only DT devices for now */ - if (dev->of_node) - return xen_is_dt_grant_dma_device(dev); - - return false; -} - -bool xen_virtio_mem_acc(struct virtio_device *dev) -{ - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) - return true; - - return xen_is_grant_dma_device(dev->dev.parent); -} - static int xen_dt_grant_init_backend_domid(struct device *dev, - struct xen_grant_dma_data *data) + domid_t *backend_domid) { struct of_phandle_args iommu_spec; if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", 0, &iommu_spec)) { - dev_err(dev, "Cannot parse iommus property\n"); + dev_dbg(dev, "Cannot parse iommus property\n"); return -ESRCH; } if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || iommu_spec.args_count != 1) { - dev_err(dev, "Incompatible IOMMU node\n"); + dev_dbg(dev, "Incompatible IOMMU node\n"); of_node_put(iommu_spec.np); return -ESRCH; } @@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev, * The endpoint ID here means the ID of the domain where the * corresponding backend is running */ - data->backend_domid = iommu_spec.args[0]; + *backend_domid = iommu_spec.args[0]; return 0; } -void xen_grant_setup_dma_ops(struct device *dev) +static int xen_grant_init_backend_domid(struct device *dev, + domid_t *backend_domid) +{ + int ret = -ENODEV; + + if (dev->of_node) { + ret = xen_dt_grant_init_backend_domid(dev, backend_domid); + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) { + dev_info(dev, "Using dom0 as backend\n"); + *backend_domid = 0; + ret = 0; + } + + return ret; +} + +static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid) { struct xen_grant_dma_data *data; @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev) if (!data) goto err; - if (dev->of_node) { - if (xen_dt_grant_init_backend_domid(dev, data)) - goto err; - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { - dev_info(dev, "Using dom0 as backend\n"); - data->backend_domid = 0; - } else { - /* XXX ACPI device unsupported for now */ - goto err; - } + data->backend_domid = backend_domid; if (store_xen_grant_dma_data(dev, data)) { dev_err(dev, "Cannot store Xen grant DMA data\n"); @@ -392,12 +369,14 @@ err: bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) { - bool ret = xen_virtio_mem_acc(dev); + domid_t backend_domid; - if (ret) - xen_grant_setup_dma_ops(dev->dev.parent); + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) { + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid); + return true; + } - return ret; + return false; } MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); -- cgit v1.2.3 From ef8ae384b4c9ccefecf4754f34644bd9fb0105b7 Mon Sep 17 00:00:00 2001 From: Oleksandr Tyshchenko Date: Tue, 25 Oct 2022 19:20:04 +0300 Subject: xen/virtio: Handle PCI devices which Host controller is described in DT Use the same "xen-grant-dma" device concept for the PCI devices behind device-tree based PCI Host controller, but with one modification. Unlike for platform devices, we cannot use generic IOMMU bindings (iommus property), as we need to support more flexible configuration. The problem is that PCI devices under the single PCI Host controller may have the backends running in different Xen domains and thus have different endpoints ID (backend domains ID). Add ability to deal with generic PCI-IOMMU bindings (iommu-map/ iommu-map-mask properties) which allows us to describe relationship between PCI devices and backend domains ID properly. To avoid having to look up for the PCI Host bridge twice and reduce the amount of checks pass an extra struct device_node *np to xen_dt_grant_init_backend_domid(). So with current patch the code expects iommus property for the platform devices and iommu-map/iommu-map-mask properties for PCI devices. The example of generated by the toolstack iommu-map property for two PCI devices 0000:00:01.0 and 0000:00:02.0 whose backends are running in different Xen domains with IDs 1 and 2 respectively: iommu-map = <0x08 0xfde9 0x01 0x08 0x10 0xfde9 0x02 0x08>; Signed-off-by: Oleksandr Tyshchenko Reviewed-by: Xenia Ragiadakou Reviewed-by: Stefano Stabellini Link: https://lore.kernel.org/r/20221025162004.8501-3-olekstysh@gmail.com Signed-off-by: Juergen Gross --- drivers/xen/grant-dma-ops.c | 46 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 1e797a043980..9784a77fa3c9 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -292,15 +293,43 @@ static const struct dma_map_ops xen_grant_dma_ops = { .dma_supported = xen_grant_dma_supported, }; +static struct device_node *xen_dt_get_node(struct device *dev) +{ + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->bus; + + /* Walk up to the root bus to look for PCI Host controller */ + while (!pci_is_root_bus(bus)) + bus = bus->parent; + + return of_node_get(bus->bridge->parent->of_node); + } + + return of_node_get(dev->of_node); +} + static int xen_dt_grant_init_backend_domid(struct device *dev, + struct device_node *np, domid_t *backend_domid) { - struct of_phandle_args iommu_spec; + struct of_phandle_args iommu_spec = { .args_count = 1 }; - if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", - 0, &iommu_spec)) { - dev_dbg(dev, "Cannot parse iommus property\n"); - return -ESRCH; + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); + + if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np, + iommu_spec.args)) { + dev_dbg(dev, "Cannot translate ID\n"); + return -ESRCH; + } + } else { + if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", + 0, &iommu_spec)) { + dev_dbg(dev, "Cannot parse iommus property\n"); + return -ESRCH; + } } if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || @@ -324,10 +353,13 @@ static int xen_dt_grant_init_backend_domid(struct device *dev, static int xen_grant_init_backend_domid(struct device *dev, domid_t *backend_domid) { + struct device_node *np; int ret = -ENODEV; - if (dev->of_node) { - ret = xen_dt_grant_init_backend_domid(dev, backend_domid); + np = xen_dt_get_node(dev); + if (np) { + ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid); + of_node_put(np); } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) { dev_info(dev, "Using dom0 as backend\n"); *backend_domid = 0; -- cgit v1.2.3 From 8b997b2bb2c53b76a6db6c195930e9ab8e4b0c79 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Fri, 25 Nov 2022 21:07:45 -0800 Subject: xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource() As 'kdata.num' is user-controlled data, if user tries to allocate memory larger than(>=) MAX_ORDER, then kcalloc() will fail, it creates a stack trace and messes up dmesg with a warning. Call trace: -> privcmd_ioctl --> privcmd_ioctl_mmap_resource Add __GFP_NOWARN in order to avoid too large allocation warning. This is detected by static analysis using smatch. Fixes: 3ad0876554ca ("xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE") Signed-off-by: Harshit Mogalapalli Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20221126050745.778967-1-harshit.m.mogalapalli@oracle.com Signed-off-by: Juergen Gross --- drivers/xen/privcmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index fae50a24630b..1edf45ee9890 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, goto out; } - pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL); + pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); if (!pfns) { rc = -ENOMEM; goto out; -- cgit v1.2.3