From f40a1e3705be23e845fbeec66acf3a6582524145 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 24 Jan 2018 00:42:13 +0800 Subject: soc: imx: gpc: ARM power domain should be always-on ARM power domain does NOT support runtime off, always-on flag should be set to avoid incorrect power state in pm_genpd_summary: Before: root@imx6qpdlsolox:~# cat /sys/kernel/debug/pm_genpd/pm_genpd_summary domain status slaves /device runtime status ---------------------------------------------------------------------- ARM off-0 After: root@imx6qpdlsolox:~# cat /sys/kernel/debug/pm_genpd/pm_genpd_summary domain status slaves /device runtime status ---------------------------------------------------------------------- ARM on Signed-off-by: Anson Huang Reviewed-by: Lucas Stach Acked-by: Dong Aisheng Signed-off-by: Shawn Guo --- drivers/soc/imx/gpc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 53f7275d6cbd..6cafa9b60bc6 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -254,6 +254,7 @@ static struct imx_pm_domain imx_gpc_domains[] = { { .base = { .name = "ARM", + .flags = GENPD_FLAG_ALWAYS_ON, }, }, { .base = { -- cgit v1.2.3 From e17ee5f08b9849b9f80fadab96364121c6bdd207 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Wed, 7 Feb 2018 14:20:03 +0800 Subject: soc: rockchip: disable jtag switching for RK3228/RK3229 SoCs Disable IO function switching between sdmmc and jtag for RK3228 and RK3229 SoCs. Signed-off-by: Shawn Lin Signed-off-by: Heiko Stuebner --- drivers/soc/rockchip/grf.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index 15e71fd6c513..dd81b87d79f0 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -43,6 +43,17 @@ static const struct rockchip_grf_info rk3036_grf __initconst = { .num_values = ARRAY_SIZE(rk3036_defaults), }; +#define RK3228_GRF_SOC_CON6 0x418 + +static const struct rockchip_grf_value rk3228_defaults[] __initconst = { + { "jtag switching", RK3228_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 8) }, +}; + +static const struct rockchip_grf_info rk3228_grf __initconst = { + .values = rk3228_defaults, + .num_values = ARRAY_SIZE(rk3228_defaults), +}; + #define RK3288_GRF_SOC_CON0 0x244 static const struct rockchip_grf_value rk3288_defaults[] __initconst = { @@ -91,6 +102,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = { { .compatible = "rockchip,rk3036-grf", .data = (void *)&rk3036_grf, + }, { + .compatible = "rockchip,rk3228-grf", + .data = (void *)&rk3228_grf, }, { .compatible = "rockchip,rk3288-grf", .data = (void *)&rk3288_grf, -- cgit v1.2.3 From f12bb91624f9511c1ee87e7c358e4dc11fb44ab3 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 9 Jan 2018 19:29:55 +0100 Subject: memory: samsung: Add SPDX license identifiers Replace GPL license statements with SPDX GPL-2.0 license identifiers. Signed-off-by: Krzysztof Kozlowski --- drivers/memory/samsung/Kconfig | 1 + drivers/memory/samsung/Makefile | 1 + drivers/memory/samsung/exynos-srom.c | 18 +++++++----------- drivers/memory/samsung/exynos-srom.h | 7 ++----- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig index 9de12222061c..79ce7ea58903 100644 --- a/drivers/memory/samsung/Kconfig +++ b/drivers/memory/samsung/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config SAMSUNG_MC bool "Samsung Exynos Memory Controller support" if COMPILE_TEST help diff --git a/drivers/memory/samsung/Makefile b/drivers/memory/samsung/Makefile index 9c554d5522ad..00587be66211 100644 --- a/drivers/memory/samsung/Makefile +++ b/drivers/memory/samsung/Makefile @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_EXYNOS_SROM) += exynos-srom.o diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c index bf827a666694..7edd7fb540f2 100644 --- a/drivers/memory/samsung/exynos-srom.c +++ b/drivers/memory/samsung/exynos-srom.c @@ -1,14 +1,10 @@ -/* - * Copyright (c) 2015 Samsung Electronics Co., Ltd. - * http://www.samsung.com/ - * - * EXYNOS - SROM Controller support - * Author: Pankaj Dubey - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2015 Samsung Electronics Co., Ltd. +// http://www.samsung.com/ +// +// EXYNOS - SROM Controller support +// Author: Pankaj Dubey #include #include diff --git a/drivers/memory/samsung/exynos-srom.h b/drivers/memory/samsung/exynos-srom.h index 34660c6a57a9..da612797f522 100644 --- a/drivers/memory/samsung/exynos-srom.h +++ b/drivers/memory/samsung/exynos-srom.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Exynos SROMC register definitions - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ + */ #ifndef __EXYNOS_SROM_H #define __EXYNOS_SROM_H __FILE__ -- cgit v1.2.3 From bcb41a53b0b075600cb821302e7177ca5ab62efd Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 9 Jan 2018 19:29:56 +0100 Subject: soc: samsung: Add SPDX license identifiers to headers Replace GPL license statements with SPDX GPL-2.0 license identifiers. Signed-off-by: Krzysztof Kozlowski --- include/linux/soc/samsung/exynos-pmu.h | 5 +---- include/linux/soc/samsung/exynos-regs-pmu.h | 6 +----- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h index e57eb4b6cc5a..fc0b445bb36b 100644 --- a/include/linux/soc/samsung/exynos-pmu.h +++ b/include/linux/soc/samsung/exynos-pmu.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Header for EXYNOS PMU Driver support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_SOC_EXYNOS_PMU_H diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index bebdde5dccd6..66dcb9ec273a 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS - Power management unit definition * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * * Notice: * This is not a list of all Exynos Power Management Unit SFRs. * There are too many of them, not mentioning subtle differences -- cgit v1.2.3 From 3a2ad7bd3151cc282f06d18948dfb7a0e1138fb2 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 21 Dec 2017 20:40:52 +0100 Subject: soc: amlogic: meson-gx-pwrc-vpu: don't print error message on probe deferral The error message may be misleading in case of probe deferral (happens on my Odroid-C2). Therefore don't print it in this case. Fixes: 75fcb5ca4b46 "soc: amlogic: add Meson GX VPU Domains driver" Signed-off-by: Heiner Kallweit Acked-by: Neil Armstrong Signed-off-by: Kevin Hilman --- drivers/soc/amlogic/meson-gx-pwrc-vpu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c index 2bdeebc48901..3adb2f2ecefd 100644 --- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c +++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c @@ -184,7 +184,8 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) rstc = devm_reset_control_array_get(&pdev->dev, false, false); if (IS_ERR(rstc)) { - dev_err(&pdev->dev, "failed to get reset lines\n"); + if (PTR_ERR(rstc) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get reset lines\n"); return PTR_ERR(rstc); } -- cgit v1.2.3 From 87f88732d25e6175cb4faa8070658f604660d720 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 21 Dec 2017 20:41:02 +0100 Subject: soc: amlogic: meson-gx-pwrc-vpu: fix error on shutdown when domain is powered off When operating the system headless headless, the domain is never powered on, leaving the clocks disabled. The shutdown function then tries to disable the already disabled clocks, resulting in errors. Therefore call meson_gx_pwrc_vpu_power_off() only if domain is powered on. This patch fixes the described issue on my system (Odorid-C2). Fixes: 339cd0ea0822 "soc: amlogic: meson-gx-pwrc-vpu: fix power-off when powered by bootloader" Signed-off-by: Heiner Kallweit Reviewed-by: Neil Armstrong Signed-off-by: Kevin Hilman --- drivers/soc/amlogic/meson-gx-pwrc-vpu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c index 3adb2f2ecefd..6289965c42e9 100644 --- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c +++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c @@ -225,7 +225,11 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev) { - meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd); + bool powered_off; + + powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd); + if (!powered_off) + meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd); } static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = { -- cgit v1.2.3 From 82a759c91801d1f9851196d73516a504064e472c Mon Sep 17 00:00:00 2001 From: "weiyongjun (A)" Date: Wed, 10 Jan 2018 14:19:40 +0000 Subject: meson-mx-socinfo: Make local function meson_mx_socinfo_init() static Fixes the following sparse warnings: drivers/soc/amlogic/meson-mx-socinfo.c:107:12: warning: symbol 'meson_mx_socinfo_init' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Kevin Hilman --- drivers/soc/amlogic/meson-mx-socinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c index 7bfff5ff22a2..78f0f1aeca57 100644 --- a/drivers/soc/amlogic/meson-mx-socinfo.c +++ b/drivers/soc/amlogic/meson-mx-socinfo.c @@ -104,7 +104,7 @@ static const struct of_device_id meson_mx_socinfo_analog_top_ids[] = { { /* sentinel */ } }; -int __init meson_mx_socinfo_init(void) +static int __init meson_mx_socinfo_init(void) { struct soc_device_attribute *soc_dev_attr; struct soc_device *soc_dev; -- cgit v1.2.3 From 01517dfc819f003855c1893d9382581cafe2877b Mon Sep 17 00:00:00 2001 From: "weiyongjun (A)" Date: Wed, 10 Jan 2018 14:19:48 +0000 Subject: meson-gx-socinfo: make local function meson_gx_socinfo_init static Fixes the following sparse warnings: drivers/soc/amlogic/meson-gx-socinfo.c:100:12: warning: symbol 'meson_gx_socinfo_init' was not declared. Should it be static? Signed-off-by: Wei Yongjun Acked-by: Neil Armstrong Signed-off-by: Kevin Hilman --- drivers/soc/amlogic/meson-gx-socinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index f2d8c3c53ea4..ea091f1f7dae 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -97,7 +97,7 @@ static const char *socinfo_to_soc_id(u32 socinfo) return "Unknown"; } -int __init meson_gx_socinfo_init(void) +static int __init meson_gx_socinfo_init(void) { struct soc_device_attribute *soc_dev_attr; struct soc_device *soc_dev; -- cgit v1.2.3 From 7353c54620732797dcc3b4b1fc6f3cc0c0d9b6ef Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 30 Jan 2018 22:18:16 +0100 Subject: soc: samsung: pmu: Populate children syscon nodes The syscon poweroff and restart nodes logically belong to the Power Management Unit so populate possible children. This also requires providing compatibles for Exynos5410 and Exynos7 so the PMU device and its children will be instantiated for them as well. Just like Exynos5433, these chipsets are not yet supported by the PMU driver. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Rob Herring Tested-by: Marek Szyprowski --- Documentation/devicetree/bindings/arm/samsung/pmu.txt | 6 ++++++ drivers/soc/samsung/exynos-pmu.c | 7 +++++++ 2 files changed, 13 insertions(+) diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt index 779f5614bcee..16685787d2bd 100644 --- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt +++ b/Documentation/devicetree/bindings/arm/samsung/pmu.txt @@ -43,6 +43,12 @@ following properties: - interrupt-parent: a phandle indicating which interrupt controller this PMU signals interrupts to. + +Optional nodes: + +- nodes defining the restart and poweroff syscon children + + Example : pmu_system_controller: system-controller@10040000 { compatible = "samsung,exynos5250-pmu", "syscon"; diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index f56adbd9fb8b..d34ca201b8b7 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -84,11 +84,15 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = { }, { .compatible = "samsung,exynos5250-pmu", .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data), + }, { + .compatible = "samsung,exynos5410-pmu", }, { .compatible = "samsung,exynos5420-pmu", .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data), }, { .compatible = "samsung,exynos5433-pmu", + }, { + .compatible = "samsung,exynos7-pmu", }, { /*sentinel*/ }, }; @@ -126,6 +130,9 @@ static int exynos_pmu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pmu_context); + if (devm_of_platform_populate(dev)) + dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n"); + dev_dbg(dev, "Exynos PMU Driver probe done\n"); return 0; } -- cgit v1.2.3 From 320da785db9b724cc099c79852de477f390b6cab Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 26 Dec 2017 12:50:41 +0100 Subject: reset: meson: enable level reset support on Meson8b Commit a5a10afe04ef ("reset: meson: add level reset support for GX SoC family") only enabled the level resets for the newer GX SoC family. However, the older 32-Meson SoCs (Meson8, Meson8b and Meson8m2) also support level resets using the same offset as the newer GX SoCs. This removes the separation between Meson8b and the GX SoCs from the reset-meson driver to enable the level resets also on Meson8b. Signed-off-by: Martin Blumenstingl Reviewed-by: Neil Armstrong Signed-off-by: Philipp Zabel --- drivers/reset/reset-meson.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c index 93cbee1ae8ef..5242e0679df7 100644 --- a/drivers/reset/reset-meson.c +++ b/drivers/reset/reset-meson.c @@ -124,29 +124,21 @@ static int meson_reset_deassert(struct reset_controller_dev *rcdev, return meson_reset_level(rcdev, id, false); } -static const struct reset_control_ops meson_reset_meson8_ops = { - .reset = meson_reset_reset, -}; - -static const struct reset_control_ops meson_reset_gx_ops = { +static const struct reset_control_ops meson_reset_ops = { .reset = meson_reset_reset, .assert = meson_reset_assert, .deassert = meson_reset_deassert, }; static const struct of_device_id meson_reset_dt_ids[] = { - { .compatible = "amlogic,meson8b-reset", - .data = &meson_reset_meson8_ops, }, - { .compatible = "amlogic,meson-gxbb-reset", - .data = &meson_reset_gx_ops, }, - { .compatible = "amlogic,meson-axg-reset", - .data = &meson_reset_gx_ops, }, + { .compatible = "amlogic,meson8b-reset" }, + { .compatible = "amlogic,meson-gxbb-reset" }, + { .compatible = "amlogic,meson-axg-reset" }, { /* sentinel */ }, }; static int meson_reset_probe(struct platform_device *pdev) { - const struct reset_control_ops *ops; struct meson_reset *data; struct resource *res; @@ -154,10 +146,6 @@ static int meson_reset_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - ops = of_device_get_match_data(&pdev->dev); - if (!ops) - return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->reg_base)) @@ -169,7 +157,7 @@ static int meson_reset_probe(struct platform_device *pdev) data->rcdev.owner = THIS_MODULE; data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG; - data->rcdev.ops = ops; + data->rcdev.ops = &meson_reset_ops; data->rcdev.of_node = pdev->dev.of_node; return devm_reset_controller_register(&pdev->dev, &data->rcdev); -- cgit v1.2.3 From 14b5057a2f84b9da246e5bda29c9fd914a8f691c Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Tue, 20 Feb 2018 12:13:28 +1030 Subject: dt-bindings: aspeed-lpc: Add reset controller This describes the reset controller present in the LPC address space. Reviewed-by: Rob Herring Signed-off-by: Joel Stanley [p.zabel@pengutronix.de: removed a space before tab in indent] Signed-off-by: Philipp Zabel --- .../devicetree/bindings/mfd/aspeed-lpc.txt | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt b/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt index 514d82ced95b..7136432f9905 100644 --- a/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt +++ b/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt @@ -135,3 +135,24 @@ lhc: lhc@20 { compatible = "aspeed,ast2500-lhc"; reg = <0x20 0x24 0x48 0x8>; }; + +LPC reset control +----------------- + +The UARTs present in the ASPEED SoC can have their resets tied to the reset +state of the LPC bus. Some systems may chose to modify this configuration. + +Required properties: + + - compatible: "aspeed,ast2500-lpc-reset" or + "aspeed,ast2400-lpc-reset" + - reg: offset and length of the IP in the LHC memory region + - #reset-controller indicates the number of reset cells expected + +Example: + +lpc_reset: reset-controller@18 { + compatible = "aspeed,ast2500-lpc-reset"; + reg = <0x18 0x4>; + #reset-cells = <1>; +}; -- cgit v1.2.3 From 1d7592f84f92c6344978186fdbe547af044274b5 Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Tue, 20 Feb 2018 12:13:29 +1030 Subject: reset: simple: Enable for ASPEED systems ASPEED BMC SoCs have a reset controller in the LPC IP that can be controlled using this driver to release the UARTs from reset. No special configuration is required, so only the compatible string is added. Signed-off-by: Joel Stanley Signed-off-by: Philipp Zabel --- drivers/reset/Kconfig | 10 +++++++--- drivers/reset/reset-simple.c | 2 ++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 7fc77696bb1e..18f152d251d7 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -83,14 +83,18 @@ config RESET_PISTACHIO config RESET_SIMPLE bool "Simple Reset Controller Driver" if COMPILE_TEST - default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX + default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED help This enables a simple reset controller driver for reset lines that that can be asserted and deasserted by toggling bits in a contiguous, exclusive register space. - Currently this driver supports Altera SoCFPGAs, the RCC reset - controller in STM32 MCUs, Allwinner SoCs, and ZTE's zx2967 family. + Currently this driver supports: + - Altera SoCFPGAs + - ASPEED BMC SoCs + - RCC reset controller in STM32 MCUs + - Allwinner SoCs + - ZTE's zx2967 family config RESET_SUNXI bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index 2d4f362ef025..f7ce8910a392 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -125,6 +125,8 @@ static const struct of_device_id reset_simple_dt_ids[] = { .data = &reset_simple_active_low }, { .compatible = "zte,zx296718-reset", .data = &reset_simple_active_low }, + { .compatible = "aspeed,ast2400-lpc-reset" }, + { .compatible = "aspeed,ast2500-lpc-reset" }, { /* sentinel */ }, }; -- cgit v1.2.3 From cd6f0602d2950efb488290571c2ae2ca92befe53 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:16:35 +0100 Subject: firmware: arm_scpi: remove two unneeded devm_kfree's in scpi_remove Both memory areas are free'd anyway when the device is destroyed, so we don't have to do it manually. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 7da9f1b83ebe..2a30d255e750 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -918,8 +918,6 @@ static int scpi_remove(struct platform_device *pdev) kfree(info->dvfs[i]->opps); kfree(info->dvfs[i]); } - devm_kfree(dev, info->channels); - devm_kfree(dev, info); return 0; } -- cgit v1.2.3 From c14f1db41d0af210986468637f555dfdc72b0d57 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:16:42 +0100 Subject: firmware: arm_scpi: make freeing mbox channels device-managed Make freeing the mbox channels device-managed, thus further simplifying scpi_remove and and one further step to get rid of scpi_remove. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 2a30d255e750..4447738d4b62 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -889,16 +889,13 @@ static struct attribute *versions_attrs[] = { }; ATTRIBUTE_GROUPS(versions); -static void -scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count) +static void scpi_free_channels(void *data) { + struct scpi_drvinfo *info = data; int i; - for (i = 0; i < count && pchan->chan; i++, pchan++) { - mbox_free_channel(pchan->chan); - devm_kfree(dev, pchan->xfers); - devm_iounmap(dev, pchan->rx_payload); - } + for (i = 0; i < info->num_chans; i++) + mbox_free_channel(info->channels[i].chan); } static int scpi_remove(struct platform_device *pdev) @@ -911,7 +908,6 @@ static int scpi_remove(struct platform_device *pdev) of_platform_depopulate(dev); sysfs_remove_groups(&dev->kobj, versions_groups); - scpi_free_channels(dev, info->channels, info->num_chans); platform_set_drvdata(pdev, NULL); for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { @@ -950,7 +946,6 @@ static int scpi_probe(struct platform_device *pdev) { int count, idx, ret; struct resource res; - struct scpi_chan *scpi_chan; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; @@ -967,13 +962,19 @@ static int scpi_probe(struct platform_device *pdev) return -ENODEV; } - scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); - if (!scpi_chan) + scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), + GFP_KERNEL); + if (!scpi_info->channels) return -ENOMEM; - for (idx = 0; idx < count; idx++) { + ret = devm_add_action(dev, scpi_free_channels, scpi_info); + if (ret) + return ret; + + for (; scpi_info->num_chans < count; scpi_info->num_chans++) { resource_size_t size; - struct scpi_chan *pchan = scpi_chan + idx; + int idx = scpi_info->num_chans; + struct scpi_chan *pchan = scpi_info->channels + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); @@ -981,15 +982,14 @@ static int scpi_probe(struct platform_device *pdev) of_node_put(shmem); if (ret) { dev_err(dev, "failed to get SCPI payload mem resource\n"); - goto err; + return ret; } size = resource_size(&res); pchan->rx_payload = devm_ioremap(dev, res.start, size); if (!pchan->rx_payload) { dev_err(dev, "failed to ioremap SCPI payload\n"); - ret = -EADDRNOTAVAIL; - goto err; + return -EADDRNOTAVAIL; } pchan->tx_payload = pchan->rx_payload + (size >> 1); @@ -1015,14 +1015,9 @@ static int scpi_probe(struct platform_device *pdev) dev_err(dev, "failed to get channel%d err %d\n", idx, ret); } -err: - scpi_free_channels(dev, scpi_chan, idx); - scpi_info = NULL; return ret; } - scpi_info->channels = scpi_chan; - scpi_info->num_chans = count; scpi_info->commands = scpi_std_commands; platform_set_drvdata(pdev, scpi_info); -- cgit v1.2.3 From 5abc7935ed5bf70aa69a6001eee8495df43a17c7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:16:48 +0100 Subject: firmware: arm_scpi: make scpi_probe completely device-managed Replace two remaining functions in probe with their devm versions. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 4447738d4b62..a6f6039ee3f9 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -901,15 +901,10 @@ static void scpi_free_channels(void *data) static int scpi_remove(struct platform_device *pdev) { int i; - struct device *dev = &pdev->dev; struct scpi_drvinfo *info = platform_get_drvdata(pdev); scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */ - of_platform_depopulate(dev); - sysfs_remove_groups(&dev->kobj, versions_groups); - platform_set_drvdata(pdev, NULL); - for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { kfree(info->dvfs[i]->opps); kfree(info->dvfs[i]); @@ -1036,7 +1031,6 @@ static int scpi_probe(struct platform_device *pdev) ret = scpi_init_versions(scpi_info); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); - scpi_remove(pdev); return ret; } @@ -1048,11 +1042,11 @@ static int scpi_probe(struct platform_device *pdev) FW_REV_PATCH(scpi_info->firmware_version)); scpi_info->scpi_ops = &scpi_ops; - ret = sysfs_create_groups(&dev->kobj, versions_groups); + ret = devm_device_add_groups(dev, versions_groups); if (ret) dev_err(dev, "unable to create sysfs version group\n"); - return of_platform_populate(dev->of_node, NULL, NULL, dev); + return devm_of_platform_populate(dev); } static const struct of_device_id scpi_of_match[] = { -- cgit v1.2.3 From a963d7c5264eaa544837ac8182a9eea55007a669 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:16:52 +0100 Subject: firmware: arm_scpi: improve struct dvfs_info to make code better readable Making the header subfields members of struct dvfs_info allows to make the code better readable and avoids some macro magic. In addition remove a useless statement using info->latency. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index a6f6039ee3f9..9eeb53b766e0 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -72,8 +72,6 @@ #define MAX_DVFS_DOMAINS 8 #define MAX_DVFS_OPPS 16 -#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) -#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) #define PROTOCOL_REV_MINOR_BITS 16 #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) @@ -328,7 +326,9 @@ struct legacy_clk_set_value { } __packed; struct dvfs_info { - __le32 header; + u8 domain; + u8 opp_count; + __le16 latency; struct { __le32 freq; __le32 m_volt; @@ -665,8 +665,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) if (!info) return ERR_PTR(-ENOMEM); - info->count = DVFS_OPP_COUNT(buf.header); - info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */ + info->count = buf.opp_count; + info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */ info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL); if (!info->opps) { @@ -713,9 +713,6 @@ static int scpi_dvfs_get_transition_latency(struct device *dev) if (IS_ERR(info)) return PTR_ERR(info); - if (!info->latency) - return 0; - return info->latency; } -- cgit v1.2.3 From 7cd49a264594251d87e504fe07dcb1fe2c99bbef Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:16:55 +0100 Subject: firmware: arm_scpi: improve handling of protocol and firmware version subfields By using FIELD_GET and proper masks we can avoid quite some shifting and masking macro magic and make the code better readable. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 43 +++++++++++++++++++------------------------ 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 9eeb53b766e0..63441e403f60 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -28,6 +28,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include @@ -73,18 +74,12 @@ #define MAX_DVFS_DOMAINS 8 #define MAX_DVFS_OPPS 16 -#define PROTOCOL_REV_MINOR_BITS 16 -#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) -#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) -#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) +#define PROTO_REV_MAJOR_MASK GENMASK(31, 16) +#define PROTO_REV_MINOR_MASK GENMASK(15, 0) -#define FW_REV_MAJOR_BITS 24 -#define FW_REV_MINOR_BITS 16 -#define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1) -#define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1) -#define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS) -#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS) -#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK) +#define FW_REV_MAJOR_MASK GENMASK(31, 24) +#define FW_REV_MINOR_MASK GENMASK(23, 16) +#define FW_REV_PATCH_MASK GENMASK(15, 0) #define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) @@ -861,9 +856,9 @@ static ssize_t protocol_version_show(struct device *dev, { struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); - return sprintf(buf, "%d.%d\n", - PROTOCOL_REV_MAJOR(scpi_info->protocol_version), - PROTOCOL_REV_MINOR(scpi_info->protocol_version)); + return sprintf(buf, "%lu.%lu\n", + FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), + FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version)); } static DEVICE_ATTR_RO(protocol_version); @@ -872,10 +867,10 @@ static ssize_t firmware_version_show(struct device *dev, { struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); - return sprintf(buf, "%d.%d.%d\n", - FW_REV_MAJOR(scpi_info->firmware_version), - FW_REV_MINOR(scpi_info->firmware_version), - FW_REV_PATCH(scpi_info->firmware_version)); + return sprintf(buf, "%lu.%lu.%lu\n", + FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), + FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), + FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); } static DEVICE_ATTR_RO(firmware_version); @@ -1031,12 +1026,12 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", - PROTOCOL_REV_MAJOR(scpi_info->protocol_version), - PROTOCOL_REV_MINOR(scpi_info->protocol_version), - FW_REV_MAJOR(scpi_info->firmware_version), - FW_REV_MINOR(scpi_info->firmware_version), - FW_REV_PATCH(scpi_info->firmware_version)); + dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", + FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), + FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version), + FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), + FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), + FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); scpi_info->scpi_ops = &scpi_ops; ret = devm_device_add_groups(dev, versions_groups); -- cgit v1.2.3 From 83a6060c0cd07cafdf8baa08e26c602a06977a2c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:16:58 +0100 Subject: firmware: arm_scpi: improve struct sensor_value lo_val and hi_val together in this order are a little endian 64 bit value. Therefore we can simplify struct sensor_value and the code by defining it as a __le64 value and by using le64_to_cpu. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 63441e403f60..4447af482fe9 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -347,9 +347,8 @@ struct _scpi_sensor_info { }; struct sensor_value { - __le32 lo_val; - __le32 hi_val; -} __packed; + __le64 val; +}; struct dev_pstate_set { __le16 dev_id; @@ -777,11 +776,10 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val) return ret; if (scpi_info->is_legacy) - /* only 32-bits supported, hi_val can be junk */ - *val = le32_to_cpu(buf.lo_val); + /* only 32-bits supported, upper 32 bits can be junk */ + *val = le32_to_cpup((__le32 *)&buf.val); else - *val = (u64)le32_to_cpu(buf.hi_val) << 32 | - le32_to_cpu(buf.lo_val); + *val = le64_to_cpu(buf.val); return 0; } -- cgit v1.2.3 From 27901cccf8dd7b9e8d77c2637a2b306d731e567d Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 5 Dec 2017 23:17:03 +0100 Subject: firmware: arm_scpi: drop unnecessary type cast to scpi_shared_mem This patch drops the only present type cast of the SCPI payload pointer to scpi_shared_mem inorder to align with other occurrences, IOW for consistency. Tested-by: Kevin Hilman Reviewed-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 4447af482fe9..e02d58208a94 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -453,7 +453,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) unsigned long flags; struct scpi_xfer *t = msg; struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); - struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; + struct scpi_shared_mem *mem = ch->tx_payload; if (t->tx_buf) { if (scpi_info->is_legacy) -- cgit v1.2.3 From c10bd41ab0b2508a98651ab3e4e9fbc85425eaad Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 5 Dec 2017 23:17:09 +0100 Subject: firmware: arm_scpi: remove all single element structures Both clk_get_value and sensor_value structures contains a single element and hence needs no packing making the whole structure defination unnecessary. This patch gets rid of both those unnecessary structures. Tested-by: Kevin Hilman Reviewed-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index e02d58208a94..3a722e5a6666 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -304,10 +304,6 @@ struct clk_get_info { u8 name[20]; } __packed; -struct clk_get_value { - __le32 rate; -} __packed; - struct clk_set_value { __le16 id; __le16 reserved; @@ -346,10 +342,6 @@ struct _scpi_sensor_info { char name[20]; }; -struct sensor_value { - __le64 val; -}; - struct dev_pstate_set { __le16 dev_id; u8 pstate; @@ -577,13 +569,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) static unsigned long scpi_clk_get_val(u16 clk_id) { int ret; - struct clk_get_value clk; + __le32 rate; __le16 le_clk_id = cpu_to_le16(clk_id); ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, - sizeof(le_clk_id), &clk, sizeof(clk)); + sizeof(le_clk_id), &rate, sizeof(rate)); - return ret ? ret : le32_to_cpu(clk.rate); + return ret ? ret : le32_to_cpu(rate); } static int scpi_clk_set_val(u16 clk_id, unsigned long rate) @@ -767,19 +759,19 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) static int scpi_sensor_get_value(u16 sensor, u64 *val) { __le16 id = cpu_to_le16(sensor); - struct sensor_value buf; + __le64 value; int ret; ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), - &buf, sizeof(buf)); + &value, sizeof(value)); if (ret) return ret; if (scpi_info->is_legacy) /* only 32-bits supported, upper 32 bits can be junk */ - *val = le32_to_cpup((__le32 *)&buf.val); + *val = le32_to_cpup((__le32 *)&value); else - *val = le64_to_cpu(buf.val); + *val = le64_to_cpu(value); return 0; } -- cgit v1.2.3 From 5204abd35243760116efcf565b55880915452bab Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:17:11 +0100 Subject: firmware: arm_scpi: fix incorrect __iomem accesses using correct accessors At several positions in the code sparse complains about incorrect access to __iomem annotated memory. Fix this and make sparse happy. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit [Sudeep Holla: changed the patch title to describe the change] Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 3a722e5a6666..b653ada42e1d 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -405,19 +405,20 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) unsigned int len; if (scpi_info->is_legacy) { - struct legacy_scpi_shared_mem *mem = ch->rx_payload; + struct legacy_scpi_shared_mem __iomem *mem = + ch->rx_payload; /* RX Length is not replied by the legacy Firmware */ len = match->rx_len; - match->status = le32_to_cpu(mem->status); + match->status = ioread32(&mem->status); memcpy_fromio(match->rx_buf, mem->payload, len); } else { - struct scpi_shared_mem *mem = ch->rx_payload; + struct scpi_shared_mem __iomem *mem = ch->rx_payload; len = min(match->rx_len, CMD_SIZE(cmd)); - match->status = le32_to_cpu(mem->status); + match->status = ioread32(&mem->status); memcpy_fromio(match->rx_buf, mem->payload, len); } @@ -431,11 +432,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) { struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); - struct scpi_shared_mem *mem = ch->rx_payload; + struct scpi_shared_mem __iomem *mem = ch->rx_payload; u32 cmd = 0; if (!scpi_info->is_legacy) - cmd = le32_to_cpu(mem->command); + cmd = ioread32(&mem->command); scpi_process_cmd(ch, cmd); } @@ -445,7 +446,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) unsigned long flags; struct scpi_xfer *t = msg; struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); - struct scpi_shared_mem *mem = ch->tx_payload; + struct scpi_shared_mem __iomem *mem = ch->tx_payload; if (t->tx_buf) { if (scpi_info->is_legacy) @@ -464,7 +465,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) } if (!scpi_info->is_legacy) - mem->command = cpu_to_le32(t->cmd); + iowrite32(t->cmd, &mem->command); } static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) -- cgit v1.2.3 From 17431b787460d5c365d81224f95bf88b4685588b Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:17:13 +0100 Subject: firmware: arm_scpi: remove struct sensor_capabilities One more single-element struct was left, remove it. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index b653ada42e1d..5695b1f4d6f9 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -331,10 +331,6 @@ struct dvfs_set { u8 index; } __packed; -struct sensor_capabilities { - __le16 sensors; -} __packed; - struct _scpi_sensor_info { __le16 sensor_id; u8 class; @@ -730,13 +726,13 @@ static int scpi_dvfs_add_opps_to_device(struct device *dev) static int scpi_sensor_get_capability(u16 *sensors) { - struct sensor_capabilities cap_buf; + __le16 cap; int ret; - ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, - sizeof(cap_buf)); + ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap, + sizeof(cap)); if (!ret) - *sensors = le16_to_cpu(cap_buf.sensors); + *sensors = le16_to_cpu(cap); return ret; } -- cgit v1.2.3 From 96fe77b6d4762604ba034d57319ad6c4ef071205 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:17:15 +0100 Subject: firmware: arm_scpi: use FIELD_GET/_PREP to simplify macro definitions Macro definitions can be simplified by making use of the FIELD_GET/_PREP bitfield macros. Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 5695b1f4d6f9..bc7055a6a617 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -46,27 +46,19 @@ #include #include -#define CMD_ID_SHIFT 0 -#define CMD_ID_MASK 0x7f -#define CMD_TOKEN_ID_SHIFT 8 -#define CMD_TOKEN_ID_MASK 0xff -#define CMD_DATA_SIZE_SHIFT 16 -#define CMD_DATA_SIZE_MASK 0x1ff -#define CMD_LEGACY_DATA_SIZE_SHIFT 20 -#define CMD_LEGACY_DATA_SIZE_MASK 0x1ff -#define PACK_SCPI_CMD(cmd_id, tx_sz) \ - ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ - (((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) -#define ADD_SCPI_TOKEN(cmd, token) \ - ((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT)) -#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \ - ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ - (((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT)) - -#define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK) -#define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \ - CMD_LEGACY_DATA_SIZE_MASK) -#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK) +#define CMD_ID_MASK GENMASK(6, 0) +#define CMD_TOKEN_ID_MASK GENMASK(15, 8) +#define CMD_DATA_SIZE_MASK GENMASK(24, 16) +#define CMD_LEGACY_DATA_SIZE_MASK GENMASK(28, 20) +#define PACK_SCPI_CMD(cmd_id, tx_sz) \ + (FIELD_PREP(CMD_ID_MASK, cmd_id) | \ + FIELD_PREP(CMD_DATA_SIZE_MASK, tx_sz)) +#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \ + (FIELD_PREP(CMD_ID_MASK, cmd_id) | \ + FIELD_PREP(CMD_LEGACY_DATA_SIZE_MASK, tx_sz)) + +#define CMD_SIZE(cmd) FIELD_GET(CMD_DATA_SIZE_MASK, cmd) +#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK | CMD_ID_MASK) #define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK) #define SCPI_SLOT 0 @@ -412,7 +404,7 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) } else { struct scpi_shared_mem __iomem *mem = ch->rx_payload; - len = min(match->rx_len, CMD_SIZE(cmd)); + len = min_t(unsigned int, match->rx_len, CMD_SIZE(cmd)); match->status = ioread32(&mem->status); memcpy_fromio(match->rx_buf, mem->payload, len); @@ -454,7 +446,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) if (t->rx_buf) { if (!(++ch->token)) ++ch->token; - ADD_SCPI_TOKEN(t->cmd, ch->token); + t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token); spin_lock_irqsave(&ch->rx_lock, flags); list_add_tail(&t->node, &ch->rx_pending); spin_unlock_irqrestore(&ch->rx_lock, flags); -- cgit v1.2.3 From 62c60efb63c868c1b1e54a1138c3295deae76eda Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 5 Dec 2017 23:17:19 +0100 Subject: firmware: arm_scpi: improve info message for pre-1.0 firmware On legacy pre-1.0 firmware versions so far the following message is printed which may cause some confusion: SCP Protocol 0.0 Firmware 0.0.0 version Therefore replace the message with the following if firmware doesn't provide usable version information: SCP Protocol legacy pre-1.0 firmware Tested-by: Kevin Hilman Signed-off-by: Heiner Kallweit Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index bc7055a6a617..6d7a6c0a5e07 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -1005,12 +1005,21 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", - FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), - FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version), - FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), - FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), - FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); + if (scpi_info->is_legacy && !scpi_info->protocol_version && + !scpi_info->firmware_version) + dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); + else + dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", + FIELD_GET(PROTO_REV_MAJOR_MASK, + scpi_info->protocol_version), + FIELD_GET(PROTO_REV_MINOR_MASK, + scpi_info->protocol_version), + FIELD_GET(FW_REV_MAJOR_MASK, + scpi_info->firmware_version), + FIELD_GET(FW_REV_MINOR_MASK, + scpi_info->firmware_version), + FIELD_GET(FW_REV_PATCH_MASK, + scpi_info->firmware_version)); scpi_info->scpi_ops = &scpi_ops; ret = devm_device_add_groups(dev, versions_groups); -- cgit v1.2.3 From 07455e4e4321129af0053c61191707ccf8289fc7 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 26 Jun 2017 16:02:39 +0100 Subject: dt-bindings: mailbox: add support for mailbox client shared memory Many users of the mailbox controllers depend on the shared memory between the two end points to exchange the main data while using simple doorbell mechanism to alert the end points of the presence of a message. This patch defines device tree bindings to represent such shared memory in a generic way. Cc: Mark Rutland Acked-by: Rob Herring Signed-off-by: Sudeep Holla --- .../devicetree/bindings/mailbox/mailbox.txt | 28 ++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/Documentation/devicetree/bindings/mailbox/mailbox.txt b/Documentation/devicetree/bindings/mailbox/mailbox.txt index be05b9746c69..af8ecee2ac68 100644 --- a/Documentation/devicetree/bindings/mailbox/mailbox.txt +++ b/Documentation/devicetree/bindings/mailbox/mailbox.txt @@ -23,6 +23,11 @@ Required property: Optional property: - mbox-names: List of identifier strings for each mailbox channel. +- shmem : List of phandle pointing to the shared memory(SHM) area between the + users of these mailboxes for IPC, one for each mailbox. This shared + memory can be part of any memory reserved for the purpose of this + communication between the mailbox client and the remote. + Example: pwr_cntrl: power { @@ -30,3 +35,26 @@ Example: mbox-names = "pwr-ctrl", "rpc"; mboxes = <&mailbox 0 &mailbox 1>; }; + +Example with shared memory(shmem): + + sram: sram@50000000 { + compatible = "mmio-sram"; + reg = <0x50000000 0x10000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x50000000 0x10000>; + + cl_shmem: shmem@0 { + compatible = "client-shmem"; + reg = <0x0 0x200>; + }; + }; + + client@2e000000 { + ... + mboxes = <&mailbox 0>; + shmem = <&cl_shmem>; + .. + }; -- cgit v1.2.3 From fe7be8b297b279189260f8ce084ea16fab0c2be0 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 5 Jun 2017 17:27:11 +0100 Subject: dt-bindings: arm: add support for ARM System Control and Management Interface(SCMI) protocol This patch adds devicetree binding for System Control and Management Interface (SCMI) Message Protocol used between the Application Cores(AP) and the System Control Processor(SCP). The MHU peripheral provides a mechanism for inter-processor communication between SCP's M3 processor and AP. SCP offers control and management of the core/cluster power states, various power domain DVFS including the core/cluster, certain system clocks configuration, thermal sensors and many others. SCMI protocol is developed as better replacement to the existing SCPI which is not flexible and easily extensible. Cc: Mark Rutland Acked-by: Rob Herring Signed-off-by: Sudeep Holla --- Documentation/devicetree/bindings/arm/arm,scmi.txt | 179 +++++++++++++++++++++ MAINTAINERS | 4 +- 2 files changed, 181 insertions(+), 2 deletions(-) create mode 100644 Documentation/devicetree/bindings/arm/arm,scmi.txt diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt new file mode 100644 index 000000000000..5f3719ab7075 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt @@ -0,0 +1,179 @@ +System Control and Management Interface (SCMI) Message Protocol +---------------------------------------------------------- + +The SCMI is intended to allow agents such as OSPM to manage various functions +that are provided by the hardware platform it is running on, including power +and performance functions. + +This binding is intended to define the interface the firmware implementing +the SCMI as described in ARM document number ARM DUI 0922B ("ARM System Control +and Management Interface Platform Design Document")[0] provide for OSPM in +the device tree. + +Required properties: + +The scmi node with the following properties shall be under the /firmware/ node. + +- compatible : shall be "arm,scmi" +- mboxes: List of phandle and mailbox channel specifiers. It should contain + exactly one or two mailboxes, one for transmitting messages("tx") + and another optional for receiving the notifications("rx") if + supported. +- shmem : List of phandle pointing to the shared memory(SHM) area as per + generic mailbox client binding. +- #address-cells : should be '1' if the device has sub-nodes, maps to + protocol identifier for a given sub-node. +- #size-cells : should be '0' as 'reg' property doesn't have any size + associated with it. + +Optional properties: + +- mbox-names: shall be "tx" or "rx" depending on mboxes entries. + +See Documentation/devicetree/bindings/mailbox/mailbox.txt for more details +about the generic mailbox controller and client driver bindings. + +The mailbox is the only permitted method of calling the SCMI firmware. +Mailbox doorbell is used as a mechanism to alert the presence of a +messages and/or notification. + +Each protocol supported shall have a sub-node with corresponding compatible +as described in the following sections. If the platform supports dedicated +communication channel for a particular protocol, the 3 properties namely: +mboxes, mbox-names and shmem shall be present in the sub-node corresponding +to that protocol. + +Clock/Performance bindings for the clocks/OPPs based on SCMI Message Protocol +------------------------------------------------------------ + +This binding uses the common clock binding[1]. + +Required properties: +- #clock-cells : Should be 1. Contains the Clock ID value used by SCMI commands. + +Power domain bindings for the power domains based on SCMI Message Protocol +------------------------------------------------------------ + +This binding for the SCMI power domain providers uses the generic power +domain binding[2]. + +Required properties: + - #power-domain-cells : Should be 1. Contains the device or the power + domain ID value used by SCMI commands. + +Sensor bindings for the sensors based on SCMI Message Protocol +-------------------------------------------------------------- +SCMI provides an API to access the various sensors on the SoC. + +Required properties: +- #thermal-sensor-cells: should be set to 1. This property follows the + thermal device tree bindings[3]. + + Valid cell values are raw identifiers (Sensor ID) + as used by the firmware. Refer to platform details + for your implementation for the IDs to use. + +SRAM and Shared Memory for SCMI +------------------------------- + +A small area of SRAM is reserved for SCMI communication between application +processors and SCP. + +The properties should follow the generic mmio-sram description found in [4] + +Each sub-node represents the reserved area for SCMI. + +Required sub-node properties: +- reg : The base offset and size of the reserved area with the SRAM +- compatible : should be "arm,scmi-shmem" for Non-secure SRAM based + shared memory + +[0] http://infocenter.arm.com/help/topic/com.arm.doc.den0056a/index.html +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt +[2] Documentation/devicetree/bindings/power/power_domain.txt +[3] Documentation/devicetree/bindings/thermal/thermal.txt +[4] Documentation/devicetree/bindings/sram/sram.txt + +Example: + +sram@50000000 { + compatible = "mmio-sram"; + reg = <0x0 0x50000000 0x0 0x10000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x50000000 0x10000>; + + cpu_scp_lpri: scp-shmem@0 { + compatible = "arm,scmi-shmem"; + reg = <0x0 0x200>; + }; + + cpu_scp_hpri: scp-shmem@200 { + compatible = "arm,scmi-shmem"; + reg = <0x200 0x200>; + }; +}; + +mailbox@40000000 { + .... + #mbox-cells = <1>; + reg = <0x0 0x40000000 0x0 0x10000>; +}; + +firmware { + + ... + + scmi { + compatible = "arm,scmi"; + mboxes = <&mailbox 0 &mailbox 1>; + mbox-names = "tx", "rx"; + shmem = <&cpu_scp_lpri &cpu_scp_hpri>; + #address-cells = <1>; + #size-cells = <0>; + + scmi_devpd: protocol@11 { + reg = <0x11>; + #power-domain-cells = <1>; + }; + + scmi_dvfs: protocol@13 { + reg = <0x13>; + #clock-cells = <1>; + }; + + scmi_clk: protocol@14 { + reg = <0x14>; + #clock-cells = <1>; + }; + + scmi_sensors0: protocol@15 { + reg = <0x15>; + #thermal-sensor-cells = <1>; + }; + }; +}; + +cpu@0 { + ... + reg = <0 0>; + clocks = <&scmi_dvfs 0>; +}; + +hdlcd@7ff60000 { + ... + reg = <0 0x7ff60000 0 0x1000>; + clocks = <&scmi_clk 4>; + power-domains = <&scmi_devpd 1>; +}; + +thermal-zones { + soc_thermal { + polling-delay-passive = <100>; + polling-delay = <1000>; + /* sensor ID */ + thermal-sensors = <&scmi_sensors0 3>; + ... + }; +}; diff --git a/MAINTAINERS b/MAINTAINERS index 3bdc260e36b7..5c8c55ba22a3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13379,11 +13379,11 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git S: Supported F: drivers/mfd/syscon.c -SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers +SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org S: Maintained -F: Documentation/devicetree/bindings/arm/arm,scpi.txt +F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt F: drivers/clk/clk-scpi.c F: drivers/cpufreq/scpi-cpufreq.c F: drivers/firmware/arm_scpi.c -- cgit v1.2.3 From aa4f886f3893f88146e8e02fd1e9c5c9e43cbcc1 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 28 Mar 2017 11:36:07 +0100 Subject: firmware: arm_scmi: add basic driver infrastructure for SCMI The SCMI is intended to allow OSPM to manage various functions that are provided by the hardware platform it is running on, including power and performance functions. SCMI provides two levels of abstraction, protocols and transports. Protocols define individual groups of system control and management messages. A protocol specification describes the messages that it supports. Transports describe the method by which protocol messages are communicated between agents and the platform. This patch adds basic infrastructure to manage the message allocation, initialisation, packing/unpacking and shared memory management. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- MAINTAINERS | 3 +- drivers/firmware/Kconfig | 21 ++ drivers/firmware/Makefile | 1 + drivers/firmware/arm_scmi/Makefile | 2 + drivers/firmware/arm_scmi/common.h | 66 ++++ drivers/firmware/arm_scmi/driver.c | 678 +++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 16 + 7 files changed, 786 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/Makefile create mode 100644 drivers/firmware/arm_scmi/common.h create mode 100644 drivers/firmware/arm_scmi/driver.c create mode 100644 include/linux/scmi_protocol.h diff --git a/MAINTAINERS b/MAINTAINERS index 5c8c55ba22a3..7cede6e7dfed 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13387,7 +13387,8 @@ F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt F: drivers/clk/clk-scpi.c F: drivers/cpufreq/scpi-cpufreq.c F: drivers/firmware/arm_scpi.c -F: include/linux/scpi_protocol.h +F: drivers/firmware/arm_scmi/ +F: include/linux/sc[mp]i_protocol.h SYSTEM RESET/SHUTDOWN DRIVERS M: Sebastian Reichel diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index b7c748248e53..704961e0473a 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -19,6 +19,27 @@ config ARM_PSCI_CHECKER on and off through hotplug, so for now torture tests and PSCI checker are mutually exclusive. +config ARM_SCMI_PROTOCOL + bool "ARM System Control and Management Interface (SCMI) Message Protocol" + depends on ARM || ARM64 || COMPILE_TEST + depends on MAILBOX + help + ARM System Control and Management Interface (SCMI) protocol is a + set of operating system-independent software interfaces that are + used in system management. SCMI is extensible and currently provides + interfaces for: Discovery and self-description of the interfaces + it supports, Power domain management which is the ability to place + a given device or domain into the various power-saving states that + it supports, Performance management which is the ability to control + the performance of a domain that is composed of compute engines + such as application processors and other accelerators, Clock + management which is the ability to set and inquire rates on platform + managed clocks and Sensor management which is the ability to read + sensor data, and be notified of sensor value. + + This protocol library provides interface for all the client drivers + making use of the features offered by the SCMI. + config ARM_SCPI_PROTOCOL tristate "ARM System Control and Power Interface (SCPI) Message Protocol" depends on ARM || ARM64 || COMPILE_TEST diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index b248238ddc6a..e18a041cfc53 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/ obj-y += broadcom/ obj-y += meson/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile new file mode 100644 index 000000000000..b2a24ba2b636 --- /dev/null +++ b/drivers/firmware/arm_scmi/Makefile @@ -0,0 +1,2 @@ +obj-y = scmi-driver.o +scmi-driver-y = driver.o diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h new file mode 100644 index 000000000000..d57eb1862f68 --- /dev/null +++ b/drivers/firmware/arm_scmi/common.h @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol + * driver common header file containing some definitions, structures + * and function prototypes used in all the different SCMI protocols. + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include + +/** + * struct scmi_msg_hdr - Message(Tx/Rx) header + * + * @id: The identifier of the command being sent + * @protocol_id: The identifier of the protocol used to send @id command + * @seq: The token to identify the message. when a message/command returns, + * the platform returns the whole message header unmodified including + * the token. + */ +struct scmi_msg_hdr { + u8 id; + u8 protocol_id; + u16 seq; + u32 status; + bool poll_completion; +}; + +/** + * struct scmi_msg - Message(Tx/Rx) structure + * + * @buf: Buffer pointer + * @len: Length of data in the Buffer + */ +struct scmi_msg { + void *buf; + size_t len; +}; + +/** + * struct scmi_xfer - Structure representing a message flow + * + * @hdr: Transmit message header + * @tx: Transmit message + * @rx: Receive message, the buffer should be pre-allocated to store + * message. If request-ACK protocol is used, we can reuse the same + * buffer for the rx path as we use for the tx path. + * @done: completion event + */ + +struct scmi_xfer { + void *con_priv; + struct scmi_msg_hdr hdr; + struct scmi_msg tx; + struct scmi_msg rx; + struct completion done; +}; + +void scmi_one_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); +int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer); +int scmi_one_xfer_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, + size_t tx_size, size_t rx_size, struct scmi_xfer **p); +int scmi_handle_put(const struct scmi_handle *handle); +struct scmi_handle *scmi_handle_get(struct device *dev); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c new file mode 100644 index 000000000000..7824cce54373 --- /dev/null +++ b/drivers/firmware/arm_scmi/driver.c @@ -0,0 +1,678 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol driver + * + * SCMI Message Protocol is used between the System Control Processor(SCP) + * and the Application Processors(AP). The Message Handling Unit(MHU) + * provides a mechanism for inter-processor communication between SCP's + * Cortex M3 and AP. + * + * SCP offers control and management of the core/cluster power states, + * various power domain DVFS including the core/cluster, certain system + * clocks configuration, thermal sensors and many others. + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define MSG_ID_SHIFT 0 +#define MSG_ID_MASK 0xff +#define MSG_TYPE_SHIFT 8 +#define MSG_TYPE_MASK 0x3 +#define MSG_PROTOCOL_ID_SHIFT 10 +#define MSG_PROTOCOL_ID_MASK 0xff +#define MSG_TOKEN_ID_SHIFT 18 +#define MSG_TOKEN_ID_MASK 0x3ff +#define MSG_XTRACT_TOKEN(header) \ + (((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK) + +enum scmi_error_codes { + SCMI_SUCCESS = 0, /* Success */ + SCMI_ERR_SUPPORT = -1, /* Not supported */ + SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ + SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ + SCMI_ERR_ENTRY = -4, /* Not found */ + SCMI_ERR_RANGE = -5, /* Value out of range */ + SCMI_ERR_BUSY = -6, /* Device busy */ + SCMI_ERR_COMMS = -7, /* Communication Error */ + SCMI_ERR_GENERIC = -8, /* Generic Error */ + SCMI_ERR_HARDWARE = -9, /* Hardware Error */ + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ + SCMI_ERR_MAX +}; + +/* List of all SCMI devices active in system */ +static LIST_HEAD(scmi_list); +/* Protection for the entire list */ +static DEFINE_MUTEX(scmi_list_mutex); + +/** + * struct scmi_xfers_info - Structure to manage transfer information + * + * @xfer_block: Preallocated Message array + * @xfer_alloc_table: Bitmap table for allocated messages. + * Index of this bitmap table is also used for message + * sequence identifier. + * @xfer_lock: Protection for message allocation + */ +struct scmi_xfers_info { + struct scmi_xfer *xfer_block; + unsigned long *xfer_alloc_table; + /* protect transfer allocation */ + spinlock_t xfer_lock; +}; + +/** + * struct scmi_desc - Description of SoC integration + * + * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) + * @max_msg: Maximum number of messages that can be pending + * simultaneously in the system + * @max_msg_size: Maximum size of data per message that can be handled. + */ +struct scmi_desc { + int max_rx_timeout_ms; + int max_msg; + int max_msg_size; +}; + +/** + * struct scmi_info - Structure representing a SCMI instance + * + * @dev: Device pointer + * @desc: SoC description for this instance + * @handle: Instance of SCMI handle to send to clients + * @cl: Mailbox Client + * @tx_chan: Transmit mailbox channel + * @tx_payload: Transmit mailbox channel payload area + * @minfo: Message info + * @node: list head + * @users: Number of users of this instance + */ +struct scmi_info { + struct device *dev; + const struct scmi_desc *desc; + struct scmi_handle handle; + struct mbox_client cl; + struct mbox_chan *tx_chan; + void __iomem *tx_payload; + struct scmi_xfers_info minfo; + struct list_head node; + int users; +}; + +#define client_to_scmi_info(c) container_of(c, struct scmi_info, cl) +#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) + +/* + * SCMI specification requires all parameters, message headers, return + * arguments or any protocol data to be expressed in little endian + * format only. + */ +struct scmi_shared_mem { + __le32 reserved; + __le32 channel_status; +#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) +#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) + __le32 reserved1[2]; + __le32 flags; +#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) + __le32 length; + __le32 msg_header; + u8 msg_payload[0]; +}; + +static const int scmi_linux_errmap[] = { + /* better than switch case as long as return value is continuous */ + 0, /* SCMI_SUCCESS */ + -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ + -EINVAL, /* SCMI_ERR_PARAM */ + -EACCES, /* SCMI_ERR_ACCESS */ + -ENOENT, /* SCMI_ERR_ENTRY */ + -ERANGE, /* SCMI_ERR_RANGE */ + -EBUSY, /* SCMI_ERR_BUSY */ + -ECOMM, /* SCMI_ERR_COMMS */ + -EIO, /* SCMI_ERR_GENERIC */ + -EREMOTEIO, /* SCMI_ERR_HARDWARE */ + -EPROTO, /* SCMI_ERR_PROTOCOL */ +}; + +static inline int scmi_to_linux_errno(int errno) +{ + if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) + return scmi_linux_errmap[-errno]; + return -EIO; +} + +/** + * scmi_dump_header_dbg() - Helper to dump a message header. + * + * @dev: Device pointer corresponding to the SCMI entity + * @hdr: pointer to header. + */ +static inline void scmi_dump_header_dbg(struct device *dev, + struct scmi_msg_hdr *hdr) +{ + dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n", + hdr->id, hdr->seq, hdr->protocol_id); +} + +static void scmi_fetch_response(struct scmi_xfer *xfer, + struct scmi_shared_mem __iomem *mem) +{ + xfer->hdr.status = ioread32(mem->msg_payload); + /* Skip the length of header and statues in payload area i.e 8 bytes*/ + xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); + + /* Take a copy to the rx buffer.. */ + memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len); +} + +/** + * scmi_rx_callback() - mailbox client callback for receive messages + * + * @cl: client pointer + * @m: mailbox message + * + * Processes one received message to appropriate transfer information and + * signals completion of the transfer. + * + * NOTE: This function will be invoked in IRQ context, hence should be + * as optimal as possible. + */ +static void scmi_rx_callback(struct mbox_client *cl, void *m) +{ + u16 xfer_id; + struct scmi_xfer *xfer; + struct scmi_info *info = client_to_scmi_info(cl); + struct scmi_xfers_info *minfo = &info->minfo; + struct device *dev = info->dev; + struct scmi_shared_mem __iomem *mem = info->tx_payload; + + xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); + + /* + * Are we even expecting this? + */ + if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { + dev_err(dev, "message for %d is not expected!\n", xfer_id); + return; + } + + xfer = &minfo->xfer_block[xfer_id]; + + scmi_dump_header_dbg(dev, &xfer->hdr); + /* Is the message of valid length? */ + if (xfer->rx.len > info->desc->max_msg_size) { + dev_err(dev, "unable to handle %zu xfer(max %d)\n", + xfer->rx.len, info->desc->max_msg_size); + return; + } + + scmi_fetch_response(xfer, mem); + complete(&xfer->done); +} + +/** + * pack_scmi_header() - packs and returns 32-bit header + * + * @hdr: pointer to header containing all the information on message id, + * protocol id and sequence id. + */ +static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) +{ + return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) | + ((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) | + ((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT); +} + +/** + * scmi_tx_prepare() - mailbox client callback to prepare for the transfer + * + * @cl: client pointer + * @m: mailbox message + * + * This function prepares the shared memory which contains the header and the + * payload. + */ +static void scmi_tx_prepare(struct mbox_client *cl, void *m) +{ + struct scmi_xfer *t = m; + struct scmi_info *info = client_to_scmi_info(cl); + struct scmi_shared_mem __iomem *mem = info->tx_payload; + + /* Mark channel busy + clear error */ + iowrite32(0x0, &mem->channel_status); + iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, + &mem->flags); + iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length); + iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header); + if (t->tx.buf) + memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len); +} + +/** + * scmi_one_xfer_get() - Allocate one message + * + * @handle: SCMI entity handle + * + * Helper function which is used by various command functions that are + * exposed to clients of this driver for allocating a message traffic event. + * + * This function can sleep depending on pending requests already in the system + * for the SCMI entity. Further, this also holds a spinlock to maintain + * integrity of internal data structures. + * + * Return: 0 if all went fine, else corresponding error. + */ +static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle) +{ + u16 xfer_id; + struct scmi_xfer *xfer; + unsigned long flags, bit_pos; + struct scmi_info *info = handle_to_scmi_info(handle); + struct scmi_xfers_info *minfo = &info->minfo; + + /* Keep the locked section as small as possible */ + spin_lock_irqsave(&minfo->xfer_lock, flags); + bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, + info->desc->max_msg); + if (bit_pos == info->desc->max_msg) { + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return ERR_PTR(-ENOMEM); + } + set_bit(bit_pos, minfo->xfer_alloc_table); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + xfer_id = bit_pos; + + xfer = &minfo->xfer_block[xfer_id]; + xfer->hdr.seq = xfer_id; + reinit_completion(&xfer->done); + + return xfer; +} + +/** + * scmi_one_xfer_put() - Release a message + * + * @minfo: transfer info pointer + * @xfer: message that was reserved by scmi_one_xfer_get + * + * This holds a spinlock to maintain integrity of internal data structures. + */ +void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) +{ + unsigned long flags; + struct scmi_info *info = handle_to_scmi_info(handle); + struct scmi_xfers_info *minfo = &info->minfo; + + /* + * Keep the locked section as small as possible + * NOTE: we might escape with smp_mb and no lock here.. + * but just be conservative and symmetric. + */ + spin_lock_irqsave(&minfo->xfer_lock, flags); + clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); +} + +/** + * scmi_do_xfer() - Do one transfer + * + * @info: Pointer to SCMI entity information + * @xfer: Transfer to initiate and wait for response + * + * Return: -ETIMEDOUT in case of no response, if transmit error, + * return corresponding error, else if all goes well, + * return 0. + */ +int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) +{ + int ret; + int timeout; + struct scmi_info *info = handle_to_scmi_info(handle); + struct device *dev = info->dev; + + ret = mbox_send_message(info->tx_chan, xfer); + if (ret < 0) { + dev_dbg(dev, "mbox send fail %d\n", ret); + return ret; + } + + /* mbox_send_message returns non-negative value on success, so reset */ + ret = 0; + + /* And we wait for the response. */ + timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); + if (!wait_for_completion_timeout(&xfer->done, timeout)) { + dev_err(dev, "mbox timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } else if (xfer->hdr.status) { + ret = scmi_to_linux_errno(xfer->hdr.status); + } + /* + * NOTE: we might prefer not to need the mailbox ticker to manage the + * transfer queueing since the protocol layer queues things by itself. + * Unfortunately, we have to kick the mailbox framework after we have + * received our message. + */ + mbox_client_txdone(info->tx_chan, ret); + + return ret; +} + +/** + * scmi_one_xfer_init() - Allocate and initialise one message + * + * @handle: SCMI entity handle + * @msg_id: Message identifier + * @msg_prot_id: Protocol identifier for the message + * @tx_size: transmit message size + * @rx_size: receive message size + * @p: pointer to the allocated and initialised message + * + * This function allocates the message using @scmi_one_xfer_get and + * initialise the header. + * + * Return: 0 if all went fine with @p pointing to message, else + * corresponding error. + */ +int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, + size_t tx_size, size_t rx_size, struct scmi_xfer **p) +{ + int ret; + struct scmi_xfer *xfer; + struct scmi_info *info = handle_to_scmi_info(handle); + struct device *dev = info->dev; + + /* Ensure we have sane transfer sizes */ + if (rx_size > info->desc->max_msg_size || + tx_size > info->desc->max_msg_size) + return -ERANGE; + + xfer = scmi_one_xfer_get(handle); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "failed to get free message slot(%d)\n", ret); + return ret; + } + + xfer->tx.len = tx_size; + xfer->rx.len = rx_size ? : info->desc->max_msg_size; + xfer->hdr.id = msg_id; + xfer->hdr.protocol_id = prot_id; + xfer->hdr.poll_completion = false; + + *p = xfer; + return 0; +} + +/** + * scmi_handle_get() - Get the SCMI handle for a device + * + * @dev: pointer to device for which we want SCMI handle + * + * NOTE: The function does not track individual clients of the framework + * and is expected to be maintained by caller of SCMI protocol library. + * scmi_handle_put must be balanced with successful scmi_handle_get + * + * Return: pointer to handle if successful, NULL on error + */ +struct scmi_handle *scmi_handle_get(struct device *dev) +{ + struct list_head *p; + struct scmi_info *info; + struct scmi_handle *handle = NULL; + + mutex_lock(&scmi_list_mutex); + list_for_each(p, &scmi_list) { + info = list_entry(p, struct scmi_info, node); + if (dev->parent == info->dev) { + handle = &info->handle; + info->users++; + break; + } + } + mutex_unlock(&scmi_list_mutex); + + return handle; +} + +/** + * scmi_handle_put() - Release the handle acquired by scmi_handle_get + * + * @handle: handle acquired by scmi_handle_get + * + * NOTE: The function does not track individual clients of the framework + * and is expected to be maintained by caller of SCMI protocol library. + * scmi_handle_put must be balanced with successful scmi_handle_get + * + * Return: 0 is successfully released + * if null was passed, it returns -EINVAL; + */ +int scmi_handle_put(const struct scmi_handle *handle) +{ + struct scmi_info *info; + + if (!handle) + return -EINVAL; + + info = handle_to_scmi_info(handle); + mutex_lock(&scmi_list_mutex); + if (!WARN_ON(!info->users)) + info->users--; + mutex_unlock(&scmi_list_mutex); + + return 0; +} + +static const struct scmi_desc scmi_generic_desc = { + .max_rx_timeout_ms = 30, /* we may increase this if required */ + .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ + .max_msg_size = 128, +}; + +/* Each compatible listed below must have descriptor associated with it */ +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi", .data = &scmi_generic_desc }, + { /* Sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, scmi_of_match); + +static int scmi_xfer_info_init(struct scmi_info *sinfo) +{ + int i; + struct scmi_xfer *xfer; + struct device *dev = sinfo->dev; + const struct scmi_desc *desc = sinfo->desc; + struct scmi_xfers_info *info = &sinfo->minfo; + + /* Pre-allocated messages, no more than what hdr.seq can support */ + if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) { + dev_err(dev, "Maximum message of %d exceeds supported %d\n", + desc->max_msg, MSG_TOKEN_ID_MASK + 1); + return -EINVAL; + } + + info->xfer_block = devm_kcalloc(dev, desc->max_msg, + sizeof(*info->xfer_block), GFP_KERNEL); + if (!info->xfer_block) + return -ENOMEM; + + info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg), + sizeof(long), GFP_KERNEL); + if (!info->xfer_alloc_table) + return -ENOMEM; + + bitmap_zero(info->xfer_alloc_table, desc->max_msg); + + /* Pre-initialize the buffer pointer to pre-allocated buffers */ + for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) { + xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, + GFP_KERNEL); + if (!xfer->rx.buf) + return -ENOMEM; + + xfer->tx.buf = xfer->rx.buf; + init_completion(&xfer->done); + } + + spin_lock_init(&info->xfer_lock); + + return 0; +} + +static int scmi_mailbox_check(struct device_node *np) +{ + struct of_phandle_args arg; + + return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg); +} + +static int scmi_mbox_free_channel(struct scmi_info *info) +{ + if (!IS_ERR_OR_NULL(info->tx_chan)) { + mbox_free_channel(info->tx_chan); + info->tx_chan = NULL; + } + + return 0; +} + +static int scmi_remove(struct platform_device *pdev) +{ + int ret = 0; + struct scmi_info *info = platform_get_drvdata(pdev); + + mutex_lock(&scmi_list_mutex); + if (info->users) + ret = -EBUSY; + else + list_del(&info->node); + mutex_unlock(&scmi_list_mutex); + + if (!ret) + /* Safe to free channels since no more users */ + return scmi_mbox_free_channel(info); + + return ret; +} + +static inline int scmi_mbox_chan_setup(struct scmi_info *info) +{ + int ret; + struct resource res; + resource_size_t size; + struct device *dev = info->dev; + struct device_node *shmem, *np = dev->of_node; + struct mbox_client *cl; + + cl = &info->cl; + cl->dev = dev; + cl->rx_callback = scmi_rx_callback; + cl->tx_prepare = scmi_tx_prepare; + cl->tx_block = false; + cl->knows_txdone = true; + + shmem = of_parse_phandle(np, "shmem", 0); + ret = of_address_to_resource(shmem, 0, &res); + of_node_put(shmem); + if (ret) { + dev_err(dev, "failed to get SCMI Tx payload mem resource\n"); + return ret; + } + + size = resource_size(&res); + info->tx_payload = devm_ioremap(dev, res.start, size); + if (!info->tx_payload) { + dev_err(dev, "failed to ioremap SCMI Tx payload\n"); + return -EADDRNOTAVAIL; + } + + /* Transmit channel is first entry i.e. index 0 */ + info->tx_chan = mbox_request_channel(cl, 0); + if (IS_ERR(info->tx_chan)) { + ret = PTR_ERR(info->tx_chan); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to request SCMI Tx mailbox\n"); + return ret; + } + + return 0; +} + +static int scmi_probe(struct platform_device *pdev) +{ + int ret; + struct scmi_handle *handle; + const struct scmi_desc *desc; + struct scmi_info *info; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + /* Only mailbox method supported, check for the presence of one */ + if (scmi_mailbox_check(np)) { + dev_err(dev, "no mailbox found in %pOF\n", np); + return -EINVAL; + } + + desc = of_match_device(scmi_of_match, dev)->data; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->dev = dev; + info->desc = desc; + INIT_LIST_HEAD(&info->node); + + ret = scmi_xfer_info_init(info); + if (ret) + return ret; + + platform_set_drvdata(pdev, info); + + handle = &info->handle; + handle->dev = info->dev; + + ret = scmi_mbox_chan_setup(info); + if (ret) + return ret; + + mutex_lock(&scmi_list_mutex); + list_add_tail(&info->node, &scmi_list); + mutex_unlock(&scmi_list_mutex); + + return 0; +} + +static struct platform_driver scmi_driver = { + .driver = { + .name = "arm-scmi", + .of_match_table = scmi_of_match, + }, + .probe = scmi_probe, + .remove = scmi_remove, +}; + +module_platform_driver(scmi_driver); + +MODULE_ALIAS("platform: arm-scmi"); +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM SCMI protocol driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h new file mode 100644 index 000000000000..1f0e89b270c6 --- /dev/null +++ b/include/linux/scmi_protocol.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Message Protocol driver header + * + * Copyright (C) 2018 ARM Ltd. + */ +#include + +/** + * struct scmi_handle - Handle returned to ARM SCMI clients for usage. + * + * @dev: pointer to the SCMI device + */ +struct scmi_handle { + struct device *dev; +}; -- cgit v1.2.3 From b6f20ff8bd94ad34032804a60bab5ee56752007e Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Jun 2017 11:16:15 +0100 Subject: firmware: arm_scmi: add common infrastructure and support for base protocol The base protocol describes the properties of the implementation and provide generic error management. The base protocol provides commands to describe protocol version, discover implementation specific attributes and vendor/sub-vendor identification, list of protocols implemented and the various agents are in the system including OSPM and the platform. It also supports registering for notifications of platform errors. This protocol is mandatory. This patch adds support for the same along with some basic infrastructure to add support for other protocols. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 3 +- drivers/firmware/arm_scmi/base.c | 253 +++++++++++++++++++++++++++++++++++++ drivers/firmware/arm_scmi/common.h | 37 ++++++ drivers/firmware/arm_scmi/driver.c | 53 ++++++++ include/linux/scmi_protocol.h | 37 ++++++ 5 files changed, 382 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/base.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index b2a24ba2b636..5d9c7ef35f0f 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,2 +1,3 @@ -obj-y = scmi-driver.o +obj-y = scmi-driver.o scmi-protocols.o scmi-driver-y = driver.o +scmi-protocols-y = base.o diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c new file mode 100644 index 000000000000..0d3806c0d432 --- /dev/null +++ b/drivers/firmware/arm_scmi/base.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Base Protocol + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include "common.h" + +enum scmi_base_protocol_cmd { + BASE_DISCOVER_VENDOR = 0x3, + BASE_DISCOVER_SUB_VENDOR = 0x4, + BASE_DISCOVER_IMPLEMENT_VERSION = 0x5, + BASE_DISCOVER_LIST_PROTOCOLS = 0x6, + BASE_DISCOVER_AGENT = 0x7, + BASE_NOTIFY_ERRORS = 0x8, +}; + +struct scmi_msg_resp_base_attributes { + u8 num_protocols; + u8 num_agents; + __le16 reserved; +}; + +/** + * scmi_base_attributes_get() - gets the implementation details + * that are associated with the base protocol. + * + * @handle - SCMI entity handle + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int scmi_base_attributes_get(const struct scmi_handle *handle) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_base_attributes *attr_info; + struct scmi_revision_info *rev = handle->version; + + ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, + SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t); + if (ret) + return ret; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + attr_info = t->rx.buf; + rev->num_protocols = attr_info->num_protocols; + rev->num_agents = attr_info->num_agents; + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +/** + * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string. + * + * @handle - SCMI entity handle + * @sub_vendor - specify true if sub-vendor ID is needed + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int +scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor) +{ + u8 cmd; + int ret, size; + char *vendor_id; + struct scmi_xfer *t; + struct scmi_revision_info *rev = handle->version; + + if (sub_vendor) { + cmd = BASE_DISCOVER_SUB_VENDOR; + vendor_id = rev->sub_vendor_id; + size = ARRAY_SIZE(rev->sub_vendor_id); + } else { + cmd = BASE_DISCOVER_VENDOR; + vendor_id = rev->vendor_id; + size = ARRAY_SIZE(rev->vendor_id); + } + + ret = scmi_one_xfer_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t); + if (ret) + return ret; + + ret = scmi_do_xfer(handle, t); + if (!ret) + memcpy(vendor_id, t->rx.buf, size); + + scmi_one_xfer_put(handle, t); + return ret; +} + +/** + * scmi_base_implementation_version_get() - gets a vendor-specific + * implementation 32-bit version. The format of the version number is + * vendor-specific + * + * @handle - SCMI entity handle + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int +scmi_base_implementation_version_get(const struct scmi_handle *handle) +{ + int ret; + __le32 *impl_ver; + struct scmi_xfer *t; + struct scmi_revision_info *rev = handle->version; + + ret = scmi_one_xfer_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION, + SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t); + if (ret) + return ret; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + impl_ver = t->rx.buf; + rev->impl_ver = le32_to_cpu(*impl_ver); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +/** + * scmi_base_implementation_list_get() - gets the list of protocols it is + * OSPM is allowed to access + * + * @handle - SCMI entity handle + * @protocols_imp - pointer to hold the list of protocol identifiers + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int scmi_base_implementation_list_get(const struct scmi_handle *handle, + u8 *protocols_imp) +{ + u8 *list; + int ret, loop; + struct scmi_xfer *t; + __le32 *num_skip, *num_ret; + u32 tot_num_ret = 0, loop_num_ret; + struct device *dev = handle->dev; + + ret = scmi_one_xfer_init(handle, BASE_DISCOVER_LIST_PROTOCOLS, + SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t); + if (ret) + return ret; + + num_skip = t->tx.buf; + num_ret = t->rx.buf; + list = t->rx.buf + sizeof(*num_ret); + + do { + /* Set the number of protocols to be skipped/already read */ + *num_skip = cpu_to_le32(tot_num_ret); + + ret = scmi_do_xfer(handle, t); + if (ret) + break; + + loop_num_ret = le32_to_cpu(*num_ret); + if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) { + dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP"); + break; + } + + for (loop = 0; loop < loop_num_ret; loop++) + protocols_imp[tot_num_ret + loop] = *(list + loop); + + tot_num_ret += loop_num_ret; + } while (loop_num_ret); + + scmi_one_xfer_put(handle, t); + return ret; +} + +/** + * scmi_base_discover_agent_get() - discover the name of an agent + * + * @handle - SCMI entity handle + * @id - Agent identifier + * @name - Agent identifier ASCII string + * + * An agent id of 0 is reserved to identify the platform itself. + * Generally operating system is represented as "OSPM" + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int scmi_base_discover_agent_get(const struct scmi_handle *handle, + int id, char *name) +{ + int ret; + struct scmi_xfer *t; + + ret = scmi_one_xfer_init(handle, BASE_DISCOVER_AGENT, + SCMI_PROTOCOL_BASE, sizeof(__le32), + SCMI_MAX_STR_SIZE, &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(id); + + ret = scmi_do_xfer(handle, t); + if (!ret) + memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); + + scmi_one_xfer_put(handle, t); + return ret; +} + +int scmi_base_protocol_init(struct scmi_handle *h) +{ + int id, ret; + u8 *prot_imp; + u32 version; + char name[SCMI_MAX_STR_SIZE]; + const struct scmi_handle *handle = h; + struct device *dev = handle->dev; + struct scmi_revision_info *rev = handle->version; + + ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version); + if (ret) + return ret; + + prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL); + if (!prot_imp) + return -ENOMEM; + + rev->major_ver = PROTOCOL_REV_MAJOR(version), + rev->minor_ver = PROTOCOL_REV_MINOR(version); + + scmi_base_attributes_get(handle); + scmi_base_vendor_id_get(handle, false); + scmi_base_vendor_id_get(handle, true); + scmi_base_implementation_version_get(handle); + scmi_base_implementation_list_get(handle, prot_imp); + scmi_setup_protocol_implemented(handle, prot_imp); + + dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n", + rev->major_ver, rev->minor_ver, rev->vendor_id, + rev->sub_vendor_id, rev->impl_ver); + dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols, + rev->num_agents); + + for (id = 0; id < rev->num_agents; id++) { + scmi_base_discover_agent_get(handle, id, name); + dev_dbg(dev, "Agent %d: %s\n", id, name); + } + + return 0; +} diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index d57eb1862f68..0fc9f5ae8684 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -8,9 +8,41 @@ */ #include +#include +#include +#include #include #include +#define PROTOCOL_REV_MINOR_BITS 16 +#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) +#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) +#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) +#define MAX_PROTOCOLS_IMP 16 + +enum scmi_common_cmd { + PROTOCOL_VERSION = 0x0, + PROTOCOL_ATTRIBUTES = 0x1, + PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, +}; + +/** + * struct scmi_msg_resp_prot_version - Response for a message + * + * @major_version: Major version of the ABI that firmware supports + * @minor_version: Minor version of the ABI that firmware supports + * + * In general, ABI version changes follow the rule that minor version increments + * are backward compatible. Major revision changes in ABI may not be + * backward compatible. + * + * Response to a generic message with message type SCMI_MSG_VERSION + */ +struct scmi_msg_resp_prot_version { + __le16 minor_version; + __le16 major_version; +}; + /** * struct scmi_msg_hdr - Message(Tx/Rx) header * @@ -64,3 +96,8 @@ int scmi_one_xfer_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, size_t tx_size, size_t rx_size, struct scmi_xfer **p); int scmi_handle_put(const struct scmi_handle *handle); struct scmi_handle *scmi_handle_get(struct device *dev); +int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version); +void scmi_setup_protocol_implemented(const struct scmi_handle *handle, + u8 *prot_imp); + +int scmi_base_protocol_init(struct scmi_handle *h); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 7824cce54373..49875cd68365 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -94,21 +94,27 @@ struct scmi_desc { * @dev: Device pointer * @desc: SoC description for this instance * @handle: Instance of SCMI handle to send to clients + * @version: SCMI revision information containing protocol version, + * implementation version and (sub-)vendor identification. * @cl: Mailbox Client * @tx_chan: Transmit mailbox channel * @tx_payload: Transmit mailbox channel payload area * @minfo: Message info + * @protocols_imp: list of protocols implemented, currently maximum of + * MAX_PROTOCOLS_IMP elements allocated by the base protocol * @node: list head * @users: Number of users of this instance */ struct scmi_info { struct device *dev; const struct scmi_desc *desc; + struct scmi_revision_info version; struct scmi_handle handle; struct mbox_client cl; struct mbox_chan *tx_chan; void __iomem *tx_payload; struct scmi_xfers_info minfo; + u8 *protocols_imp; struct list_head node; int users; }; @@ -421,6 +427,45 @@ int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, return 0; } +/** + * scmi_version_get() - command to get the revision of the SCMI entity + * + * @handle: Handle to SCMI entity information + * + * Updates the SCMI information in the internal data structure. + * + * Return: 0 if all went fine, else return appropriate error. + */ +int scmi_version_get(const struct scmi_handle *handle, u8 protocol, + u32 *version) +{ + int ret; + __le32 *rev_info; + struct scmi_xfer *t; + + ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0, + sizeof(*version), &t); + if (ret) + return ret; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + rev_info = t->rx.buf; + *version = le32_to_cpu(*rev_info); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +void scmi_setup_protocol_implemented(const struct scmi_handle *handle, + u8 *prot_imp) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + info->protocols_imp = prot_imp; +} + /** * scmi_handle_get() - Get the SCMI handle for a device * @@ -649,11 +694,19 @@ static int scmi_probe(struct platform_device *pdev) handle = &info->handle; handle->dev = info->dev; + handle->version = &info->version; ret = scmi_mbox_chan_setup(info); if (ret) return ret; + ret = scmi_base_protocol_init(handle); + if (ret) { + dev_err(dev, "unable to communicate with SCMI(%d)\n", ret); + scmi_mbox_free_channel(info); + return ret; + } + mutex_lock(&scmi_list_mutex); list_add_tail(&info->node, &scmi_list); mutex_unlock(&scmi_list_mutex); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 1f0e89b270c6..08fcc1dd0276 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -6,11 +6,48 @@ */ #include +#define SCMI_MAX_STR_SIZE 16 + +/** + * struct scmi_revision_info - version information structure + * + * @major_ver: Major ABI version. Change here implies risk of backward + * compatibility break. + * @minor_ver: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @num_protocols: Number of protocols that are implemented, excluding the + * base protocol. + * @num_agents: Number of agents in the system. + * @impl_ver: A vendor-specific implementation version. + * @vendor_id: A vendor identifier(Null terminated ASCII string) + * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string) + */ +struct scmi_revision_info { + u16 major_ver; + u16 minor_ver; + u8 num_protocols; + u8 num_agents; + u32 impl_ver; + char vendor_id[SCMI_MAX_STR_SIZE]; + char sub_vendor_id[SCMI_MAX_STR_SIZE]; +}; + /** * struct scmi_handle - Handle returned to ARM SCMI clients for usage. * * @dev: pointer to the SCMI device + * @version: pointer to the structure containing SCMI version information */ struct scmi_handle { struct device *dev; + struct scmi_revision_info *version; +}; + +enum scmi_std_protocol { + SCMI_PROTOCOL_BASE = 0x10, + SCMI_PROTOCOL_POWER = 0x11, + SCMI_PROTOCOL_SYSTEM = 0x12, + SCMI_PROTOCOL_PERF = 0x13, + SCMI_PROTOCOL_CLOCK = 0x14, + SCMI_PROTOCOL_SENSOR = 0x15, }; -- cgit v1.2.3 From 933c504424a2bc784fdb4cd5c318049d55da20e0 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 30 Oct 2017 18:33:30 +0000 Subject: firmware: arm_scmi: add scmi protocol bus to enumerate protocol devices The SCMI specification encompasses various protocols. However, not every protocol has to be present on a given platform/implementation as not every protocol is relevant for it. Furthermore, the platform chooses which protocols it exposes to a given agent. The only protocol that must be implemented is the base protocol. The base protocol is used by an agent to discover which protocols are available to it. In order to enumerate the discovered implemented protocols, this patch adds support for a separate scmi protocol bus. It also adds mechanism to register support for different protocols. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 3 +- drivers/firmware/arm_scmi/bus.c | 221 +++++++++++++++++++++++++++++++++++++ drivers/firmware/arm_scmi/common.h | 1 + include/linux/scmi_protocol.h | 64 +++++++++++ 4 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/bus.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 5d9c7ef35f0f..5f4ec2613db6 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,3 +1,4 @@ -obj-y = scmi-driver.o scmi-protocols.o +obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o +scmi-bus-y = bus.o scmi-driver-y = driver.o scmi-protocols-y = base.o diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c new file mode 100644 index 000000000000..f2760a596c28 --- /dev/null +++ b/drivers/firmware/arm_scmi/bus.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol bus layer + * + * Copyright (C) 2018 ARM Ltd. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "common.h" + +static DEFINE_IDA(scmi_bus_id); +static DEFINE_IDR(scmi_protocols); +static DEFINE_SPINLOCK(protocol_lock); + +static const struct scmi_device_id * +scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) +{ + const struct scmi_device_id *id = scmi_drv->id_table; + + if (!id) + return NULL; + + for (; id->protocol_id; id++) + if (id->protocol_id == scmi_dev->protocol_id) + return id; + + return NULL; +} + +static int scmi_dev_match(struct device *dev, struct device_driver *drv) +{ + struct scmi_driver *scmi_drv = to_scmi_driver(drv); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + const struct scmi_device_id *id; + + id = scmi_dev_match_id(scmi_dev, scmi_drv); + if (id) + return 1; + + return 0; +} + +static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle) +{ + scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id); + + if (unlikely(!fn)) + return -EINVAL; + return fn(handle); +} + +static int scmi_dev_probe(struct device *dev) +{ + struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + const struct scmi_device_id *id; + int ret; + + id = scmi_dev_match_id(scmi_dev, scmi_drv); + if (!id) + return -ENODEV; + + if (!scmi_dev->handle) + return -EPROBE_DEFER; + + ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle); + if (ret) + return ret; + + return scmi_drv->probe(scmi_dev); +} + +static int scmi_dev_remove(struct device *dev) +{ + struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + if (scmi_drv->remove) + scmi_drv->remove(scmi_dev); + + return 0; +} + +static struct bus_type scmi_bus_type = { + .name = "scmi_protocol", + .match = scmi_dev_match, + .probe = scmi_dev_probe, + .remove = scmi_dev_remove, +}; + +int scmi_driver_register(struct scmi_driver *driver, struct module *owner, + const char *mod_name) +{ + int retval; + + driver->driver.bus = &scmi_bus_type; + driver->driver.name = driver->name; + driver->driver.owner = owner; + driver->driver.mod_name = mod_name; + + retval = driver_register(&driver->driver); + if (!retval) + pr_debug("registered new scmi driver %s\n", driver->name); + + return retval; +} +EXPORT_SYMBOL_GPL(scmi_driver_register); + +void scmi_driver_unregister(struct scmi_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(scmi_driver_unregister); + +struct scmi_device * +scmi_device_create(struct device_node *np, struct device *parent, int protocol) +{ + int id, retval; + struct scmi_device *scmi_dev; + + id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); + if (id < 0) + return NULL; + + scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL); + if (!scmi_dev) + goto no_mem; + + scmi_dev->id = id; + scmi_dev->protocol_id = protocol; + scmi_dev->dev.parent = parent; + scmi_dev->dev.of_node = np; + scmi_dev->dev.bus = &scmi_bus_type; + dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); + + retval = device_register(&scmi_dev->dev); + if (!retval) + return scmi_dev; + + put_device(&scmi_dev->dev); + kfree(scmi_dev); +no_mem: + ida_simple_remove(&scmi_bus_id, id); + return NULL; +} + +void scmi_device_destroy(struct scmi_device *scmi_dev) +{ + scmi_handle_put(scmi_dev->handle); + device_unregister(&scmi_dev->dev); + ida_simple_remove(&scmi_bus_id, scmi_dev->id); + kfree(scmi_dev); +} + +void scmi_set_handle(struct scmi_device *scmi_dev) +{ + scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); +} + +int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn) +{ + int ret; + + spin_lock(&protocol_lock); + ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1, + GFP_ATOMIC); + if (ret != protocol_id) + pr_err("unable to allocate SCMI idr slot, err %d\n", ret); + spin_unlock(&protocol_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(scmi_protocol_register); + +void scmi_protocol_unregister(int protocol_id) +{ + spin_lock(&protocol_lock); + idr_remove(&scmi_protocols, protocol_id); + spin_unlock(&protocol_lock); +} +EXPORT_SYMBOL_GPL(scmi_protocol_unregister); + +static int __scmi_devices_unregister(struct device *dev, void *data) +{ + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + scmi_device_destroy(scmi_dev); + return 0; +} + +static void scmi_devices_unregister(void) +{ + bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister); +} + +static int __init scmi_bus_init(void) +{ + int retval; + + retval = bus_register(&scmi_bus_type); + if (retval) + pr_err("scmi protocol bus register failed (%d)\n", retval); + + return retval; +} +subsys_initcall(scmi_bus_init); + +static void __exit scmi_bus_exit(void) +{ + scmi_devices_unregister(); + bus_unregister(&scmi_bus_type); + ida_destroy(&scmi_bus_id); +} +module_exit(scmi_bus_exit); diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 0fc9f5ae8684..95053ed5ccb9 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -96,6 +96,7 @@ int scmi_one_xfer_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, size_t tx_size, size_t rx_size, struct scmi_xfer **p); int scmi_handle_put(const struct scmi_handle *handle); struct scmi_handle *scmi_handle_get(struct device *dev); +void scmi_set_handle(struct scmi_device *scmi_dev); int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version); void scmi_setup_protocol_implemented(const struct scmi_handle *handle, u8 *prot_imp); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 08fcc1dd0276..464086b9d8c5 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -4,6 +4,7 @@ * * Copyright (C) 2018 ARM Ltd. */ +#include #include #define SCMI_MAX_STR_SIZE 16 @@ -51,3 +52,66 @@ enum scmi_std_protocol { SCMI_PROTOCOL_CLOCK = 0x14, SCMI_PROTOCOL_SENSOR = 0x15, }; + +struct scmi_device { + u32 id; + u8 protocol_id; + struct device dev; + struct scmi_handle *handle; +}; + +#define to_scmi_dev(d) container_of(d, struct scmi_device, dev) + +struct scmi_device * +scmi_device_create(struct device_node *np, struct device *parent, int protocol); +void scmi_device_destroy(struct scmi_device *scmi_dev); + +struct scmi_device_id { + u8 protocol_id; +}; + +struct scmi_driver { + const char *name; + int (*probe)(struct scmi_device *sdev); + void (*remove)(struct scmi_device *sdev); + const struct scmi_device_id *id_table; + + struct device_driver driver; +}; + +#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver) + +#ifdef CONFIG_ARM_SCMI_PROTOCOL +int scmi_driver_register(struct scmi_driver *driver, + struct module *owner, const char *mod_name); +void scmi_driver_unregister(struct scmi_driver *driver); +#else +static inline int +scmi_driver_register(struct scmi_driver *driver, struct module *owner, + const char *mod_name) +{ + return -EINVAL; +} + +static inline void scmi_driver_unregister(struct scmi_driver *driver) {} +#endif /* CONFIG_ARM_SCMI_PROTOCOL */ + +#define scmi_register(driver) \ + scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define scmi_unregister(driver) \ + scmi_driver_unregister(driver) + +/** + * module_scmi_driver() - Helper macro for registering a scmi driver + * @__scmi_driver: scmi_driver structure + * + * Helper macro for scmi drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_scmi_driver(__scmi_driver) \ + module_driver(__scmi_driver, scmi_register, scmi_unregister) + +typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); +int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); +void scmi_protocol_unregister(int protocol_id); -- cgit v1.2.3 From a9e3fbfaa0ff885aacafe6f33e72448a2993d072 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Jun 2017 11:22:51 +0100 Subject: firmware: arm_scmi: add initial support for performance protocol The performance protocol is intended for the performance management of group(s) of device(s) that run in the same performance domain. It includes even the CPUs. A performance domain is defined by a set of devices that always have to run at the same performance level. For example, a set of CPUs that share a voltage domain, and have a common frequency control, is said to be in the same performance domain. The commands in this protocol provide functionality to describe the protocol version, describe various attribute flags, set and get the performance level of a domain. It also supports discovery of the list of performance levels supported by a performance domain, and the properties of each performance level. This patch adds basic support for the performance protocol. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 2 +- drivers/firmware/arm_scmi/common.h | 1 + drivers/firmware/arm_scmi/perf.c | 478 +++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 42 ++++ 4 files changed, 522 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/perf.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 5f4ec2613db6..687cbbfb3af6 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,4 +1,4 @@ obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-bus-y = bus.o scmi-driver-y = driver.o -scmi-protocols-y = base.o +scmi-protocols-y = base.o perf.o diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 95053ed5ccb9..0c30234f9098 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -19,6 +19,7 @@ #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) #define MAX_PROTOCOLS_IMP 16 +#define MAX_OPPS 16 enum scmi_common_cmd { PROTOCOL_VERSION = 0x0, diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c new file mode 100644 index 000000000000..9c56ea503890 --- /dev/null +++ b/drivers/firmware/arm_scmi/perf.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Performance Protocol + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include + +#include "common.h" + +enum scmi_performance_protocol_cmd { + PERF_DOMAIN_ATTRIBUTES = 0x3, + PERF_DESCRIBE_LEVELS = 0x4, + PERF_LIMITS_SET = 0x5, + PERF_LIMITS_GET = 0x6, + PERF_LEVEL_SET = 0x7, + PERF_LEVEL_GET = 0x8, + PERF_NOTIFY_LIMITS = 0x9, + PERF_NOTIFY_LEVEL = 0xa, +}; + +struct scmi_opp { + u32 perf; + u32 power; + u32 trans_latency_us; +}; + +struct scmi_msg_resp_perf_attributes { + __le16 num_domains; + __le16 flags; +#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0)) + __le32 stats_addr_low; + __le32 stats_addr_high; + __le32 stats_size; +}; + +struct scmi_msg_resp_perf_domain_attributes { + __le32 flags; +#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31)) +#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30)) +#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) +#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) + __le32 rate_limit_us; + __le32 sustained_freq_khz; + __le32 sustained_perf_level; + u8 name[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_msg_perf_describe_levels { + __le32 domain; + __le32 level_index; +}; + +struct scmi_perf_set_limits { + __le32 domain; + __le32 max_level; + __le32 min_level; +}; + +struct scmi_perf_get_limits { + __le32 max_level; + __le32 min_level; +}; + +struct scmi_perf_set_level { + __le32 domain; + __le32 level; +}; + +struct scmi_perf_notify_level_or_limits { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_msg_resp_perf_describe_levels { + __le16 num_returned; + __le16 num_remaining; + struct { + __le32 perf_val; + __le32 power; + __le16 transition_latency_us; + __le16 reserved; + } opp[0]; +}; + +struct perf_dom_info { + bool set_limits; + bool set_perf; + bool perf_limit_notify; + bool perf_level_notify; + u32 opp_count; + u32 sustained_freq_khz; + u32 sustained_perf_level; + u32 mult_factor; + char name[SCMI_MAX_STR_SIZE]; + struct scmi_opp opp[MAX_OPPS]; +}; + +struct scmi_perf_info { + int num_domains; + bool power_scale_mw; + u64 stats_addr; + u32 stats_size; + struct perf_dom_info *dom_info; +}; + +static int scmi_perf_attributes_get(const struct scmi_handle *handle, + struct scmi_perf_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_perf_attributes *attr; + + ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, + SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + u16 flags = le16_to_cpu(attr->flags); + + pi->num_domains = le16_to_cpu(attr->num_domains); + pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags); + pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | + (u64)le32_to_cpu(attr->stats_addr_high) << 32; + pi->stats_size = le32_to_cpu(attr->stats_size); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, + struct perf_dom_info *dom_info) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_perf_domain_attributes *attr; + + ret = scmi_one_xfer_init(handle, PERF_DOMAIN_ATTRIBUTES, + SCMI_PROTOCOL_PERF, sizeof(domain), + sizeof(*attr), &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(domain); + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + u32 flags = le32_to_cpu(attr->flags); + + dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); + dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); + dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); + dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); + dom_info->sustained_freq_khz = + le32_to_cpu(attr->sustained_freq_khz); + dom_info->sustained_perf_level = + le32_to_cpu(attr->sustained_perf_level); + dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / + dom_info->sustained_perf_level; + memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int opp_cmp_func(const void *opp1, const void *opp2) +{ + const struct scmi_opp *t1 = opp1, *t2 = opp2; + + return t1->perf - t2->perf; +} + +static int +scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain, + struct perf_dom_info *perf_dom) +{ + int ret, cnt; + u32 tot_opp_cnt = 0; + u16 num_returned, num_remaining; + struct scmi_xfer *t; + struct scmi_opp *opp; + struct scmi_msg_perf_describe_levels *dom_info; + struct scmi_msg_resp_perf_describe_levels *level_info; + + ret = scmi_one_xfer_init(handle, PERF_DESCRIBE_LEVELS, + SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t); + if (ret) + return ret; + + dom_info = t->tx.buf; + level_info = t->rx.buf; + + do { + dom_info->domain = cpu_to_le32(domain); + /* Set the number of OPPs to be skipped/already read */ + dom_info->level_index = cpu_to_le32(tot_opp_cnt); + + ret = scmi_do_xfer(handle, t); + if (ret) + break; + + num_returned = le16_to_cpu(level_info->num_returned); + num_remaining = le16_to_cpu(level_info->num_remaining); + if (tot_opp_cnt + num_returned > MAX_OPPS) { + dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS"); + break; + } + + opp = &perf_dom->opp[tot_opp_cnt]; + for (cnt = 0; cnt < num_returned; cnt++, opp++) { + opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val); + opp->power = le32_to_cpu(level_info->opp[cnt].power); + opp->trans_latency_us = le16_to_cpu + (level_info->opp[cnt].transition_latency_us); + + dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n", + opp->perf, opp->power, opp->trans_latency_us); + } + + tot_opp_cnt += num_returned; + /* + * check for both returned and remaining to avoid infinite + * loop due to buggy firmware + */ + } while (num_returned && num_remaining); + + perf_dom->opp_count = tot_opp_cnt; + scmi_one_xfer_put(handle, t); + + sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL); + return ret; +} + +static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain, + u32 max_perf, u32 min_perf) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_set_limits *limits; + + ret = scmi_one_xfer_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF, + sizeof(*limits), 0, &t); + if (ret) + return ret; + + limits = t->tx.buf; + limits->domain = cpu_to_le32(domain); + limits->max_level = cpu_to_le32(max_perf); + limits->min_level = cpu_to_le32(min_perf); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, + u32 *max_perf, u32 *min_perf) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_get_limits *limits; + + ret = scmi_one_xfer_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(domain); + + ret = scmi_do_xfer(handle, t); + if (!ret) { + limits = t->rx.buf; + + *max_perf = le32_to_cpu(limits->max_level); + *min_perf = le32_to_cpu(limits->min_level); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, u32 level) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_set_level *lvl; + + ret = scmi_one_xfer_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF, + sizeof(*lvl), 0, &t); + if (ret) + return ret; + + lvl = t->tx.buf; + lvl->domain = cpu_to_le32(domain); + lvl->level = cpu_to_le32(level); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, u32 *level) +{ + int ret; + struct scmi_xfer *t; + + ret = scmi_one_xfer_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF, + sizeof(u32), sizeof(u32), &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(domain); + + ret = scmi_do_xfer(handle, t); + if (!ret) + *level = le32_to_cpu(*(__le32 *)t->rx.buf); + + scmi_one_xfer_put(handle, t); + return ret; +} + +/* Device specific ops */ +static int scmi_dev_domain_id(struct device *dev) +{ + struct of_phandle_args clkspec; + + if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells", + 0, &clkspec)) + return -EINVAL; + + return clkspec.args[0]; +} + +static int scmi_dvfs_add_opps_to_device(const struct scmi_handle *handle, + struct device *dev) +{ + int idx, ret, domain; + unsigned long freq; + struct scmi_opp *opp; + struct perf_dom_info *dom; + struct scmi_perf_info *pi = handle->perf_priv; + + domain = scmi_dev_domain_id(dev); + if (domain < 0) + return domain; + + dom = pi->dom_info + domain; + if (!dom) + return -EIO; + + for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { + freq = opp->perf * dom->mult_factor; + + ret = dev_pm_opp_add(dev, freq, 0); + if (ret) { + dev_warn(dev, "failed to add opp %luHz\n", freq); + + while (idx-- > 0) { + freq = (--opp)->perf * dom->mult_factor; + dev_pm_opp_remove(dev, freq); + } + return ret; + } + } + return 0; +} + +static int scmi_dvfs_get_transition_latency(const struct scmi_handle *handle, + struct device *dev) +{ + struct perf_dom_info *dom; + struct scmi_perf_info *pi = handle->perf_priv; + int domain = scmi_dev_domain_id(dev); + + if (domain < 0) + return domain; + + dom = pi->dom_info + domain; + if (!dom) + return -EIO; + + /* uS to nS */ + return dom->opp[dom->opp_count - 1].trans_latency_us * 1000; +} + +static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain, + unsigned long freq) +{ + struct scmi_perf_info *pi = handle->perf_priv; + struct perf_dom_info *dom = pi->dom_info + domain; + + return scmi_perf_level_set(handle, domain, freq / dom->mult_factor); +} + +static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain, + unsigned long *freq) +{ + int ret; + u32 level; + struct scmi_perf_info *pi = handle->perf_priv; + struct perf_dom_info *dom = pi->dom_info + domain; + + ret = scmi_perf_level_get(handle, domain, &level); + if (!ret) + *freq = level * dom->mult_factor; + + return ret; +} + +static struct scmi_perf_ops perf_ops = { + .limits_set = scmi_perf_limits_set, + .limits_get = scmi_perf_limits_get, + .level_set = scmi_perf_level_set, + .level_get = scmi_perf_level_get, + .device_domain_id = scmi_dev_domain_id, + .get_transition_latency = scmi_dvfs_get_transition_latency, + .add_opps_to_device = scmi_dvfs_add_opps_to_device, + .freq_set = scmi_dvfs_freq_set, + .freq_get = scmi_dvfs_freq_get, +}; + +static int scmi_perf_protocol_init(struct scmi_handle *handle) +{ + int domain; + u32 version; + struct scmi_perf_info *pinfo; + + scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version); + + dev_dbg(handle->dev, "Performance Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + scmi_perf_attributes_get(handle, pinfo); + + pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains, + sizeof(*pinfo->dom_info), GFP_KERNEL); + if (!pinfo->dom_info) + return -ENOMEM; + + for (domain = 0; domain < pinfo->num_domains; domain++) { + struct perf_dom_info *dom = pinfo->dom_info + domain; + + scmi_perf_domain_attributes_get(handle, domain, dom); + scmi_perf_describe_levels_get(handle, domain, dom); + } + + handle->perf_ops = &perf_ops; + handle->perf_priv = pinfo; + + return 0; +} + +static int __init scmi_perf_init(void) +{ + return scmi_protocol_register(SCMI_PROTOCOL_PERF, + &scmi_perf_protocol_init); +} +subsys_initcall(scmi_perf_init); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 464086b9d8c5..57d4b1c099e5 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -33,15 +33,57 @@ struct scmi_revision_info { char sub_vendor_id[SCMI_MAX_STR_SIZE]; }; +struct scmi_handle; + +/** + * struct scmi_perf_ops - represents the various operations provided + * by SCMI Performance Protocol + * + * @limits_set: sets limits on the performance level of a domain + * @limits_get: gets limits on the performance level of a domain + * @level_set: sets the performance level of a domain + * @level_get: gets the performance level of a domain + * @device_domain_id: gets the scmi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @freq_set: sets the frequency for a given device using sustained frequency + * to sustained performance level mapping + * @freq_get: gets the frequency for a given device using sustained frequency + * to sustained performance level mapping + */ +struct scmi_perf_ops { + int (*limits_set)(const struct scmi_handle *handle, u32 domain, + u32 max_perf, u32 min_perf); + int (*limits_get)(const struct scmi_handle *handle, u32 domain, + u32 *max_perf, u32 *min_perf); + int (*level_set)(const struct scmi_handle *handle, u32 domain, + u32 level); + int (*level_get)(const struct scmi_handle *handle, u32 domain, + u32 *level); + int (*device_domain_id)(struct device *dev); + int (*get_transition_latency)(const struct scmi_handle *handle, + struct device *dev); + int (*add_opps_to_device)(const struct scmi_handle *handle, + struct device *dev); + int (*freq_set)(const struct scmi_handle *handle, u32 domain, + unsigned long rate); + int (*freq_get)(const struct scmi_handle *handle, u32 domain, + unsigned long *rate); +}; + /** * struct scmi_handle - Handle returned to ARM SCMI clients for usage. * * @dev: pointer to the SCMI device * @version: pointer to the structure containing SCMI version information + * @perf_ops: pointer to set of performance protocol operations */ struct scmi_handle { struct device *dev; struct scmi_revision_info *version; + struct scmi_perf_ops *perf_ops; + /* for protocol internal use */ + void *perf_priv; }; enum scmi_std_protocol { -- cgit v1.2.3 From 5f6c6430e904d21bfe5d0076b1ff3e8b9ed94ba0 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Jun 2017 11:27:57 +0100 Subject: firmware: arm_scmi: add initial support for clock protocol The clock protocol is intended for management of clocks. It is used to enable or disable clocks, and to set and get the clock rates. This protocol provides commands to describe the protocol version, discover various implementation specific attributes, describe a clock, enable and disable a clock and get/set the rate of the clock synchronously or asynchronously. This patch adds initial support for the clock protocol. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 2 +- drivers/firmware/arm_scmi/clock.c | 342 +++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 44 +++++ 3 files changed, 387 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/clock.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 687cbbfb3af6..2130ee9ac825 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,4 +1,4 @@ obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-bus-y = bus.o scmi-driver-y = driver.o -scmi-protocols-y = base.o perf.o +scmi-protocols-y = base.o clock.o perf.o diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c new file mode 100644 index 000000000000..e8ffad33a0ff --- /dev/null +++ b/drivers/firmware/arm_scmi/clock.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Clock Protocol + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include "common.h" + +enum scmi_clock_protocol_cmd { + CLOCK_ATTRIBUTES = 0x3, + CLOCK_DESCRIBE_RATES = 0x4, + CLOCK_RATE_SET = 0x5, + CLOCK_RATE_GET = 0x6, + CLOCK_CONFIG_SET = 0x7, +}; + +struct scmi_msg_resp_clock_protocol_attributes { + __le16 num_clocks; + u8 max_async_req; + u8 reserved; +}; + +struct scmi_msg_resp_clock_attributes { + __le32 attributes; +#define CLOCK_ENABLE BIT(0) + u8 name[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_clock_set_config { + __le32 id; + __le32 attributes; +}; + +struct scmi_msg_clock_describe_rates { + __le32 id; + __le32 rate_index; +}; + +struct scmi_msg_resp_clock_describe_rates { + __le32 num_rates_flags; +#define NUM_RETURNED(x) ((x) & 0xfff) +#define RATE_DISCRETE(x) !((x) & BIT(12)) +#define NUM_REMAINING(x) ((x) >> 16) + struct { + __le32 value_low; + __le32 value_high; + } rate[0]; +#define RATE_TO_U64(X) \ +({ \ + typeof(X) x = (X); \ + le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ +}) +}; + +struct scmi_clock_set_rate { + __le32 flags; +#define CLOCK_SET_ASYNC BIT(0) +#define CLOCK_SET_DELAYED BIT(1) +#define CLOCK_SET_ROUND_UP BIT(2) +#define CLOCK_SET_ROUND_AUTO BIT(3) + __le32 id; + __le32 value_low; + __le32 value_high; +}; + +struct clock_info { + int num_clocks; + int max_async_req; + struct scmi_clock_info *clk; +}; + +static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle, + struct clock_info *ci) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_clock_protocol_attributes *attr; + + ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, + SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + ci->num_clocks = le16_to_cpu(attr->num_clocks); + ci->max_async_req = attr->max_async_req; + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_clock_attributes_get(const struct scmi_handle *handle, + u32 clk_id, struct scmi_clock_info *clk) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_clock_attributes *attr; + + ret = scmi_one_xfer_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, + sizeof(clk_id), sizeof(*attr), &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(clk_id); + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) + memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); + else + clk->name[0] = '\0'; + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, + struct scmi_clock_info *clk) +{ + u64 *rate; + int ret, cnt; + bool rate_discrete; + u32 tot_rate_cnt = 0, rates_flag; + u16 num_returned, num_remaining; + struct scmi_xfer *t; + struct scmi_msg_clock_describe_rates *clk_desc; + struct scmi_msg_resp_clock_describe_rates *rlist; + + ret = scmi_one_xfer_init(handle, CLOCK_DESCRIBE_RATES, + SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); + if (ret) + return ret; + + clk_desc = t->tx.buf; + rlist = t->rx.buf; + + do { + clk_desc->id = cpu_to_le32(clk_id); + /* Set the number of rates to be skipped/already read */ + clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); + + ret = scmi_do_xfer(handle, t); + if (ret) + break; + + rates_flag = le32_to_cpu(rlist->num_rates_flags); + num_remaining = NUM_REMAINING(rates_flag); + rate_discrete = RATE_DISCRETE(rates_flag); + num_returned = NUM_RETURNED(rates_flag); + + if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { + dev_err(handle->dev, "No. of rates > MAX_NUM_RATES"); + break; + } + + if (!rate_discrete) { + clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); + clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); + clk->range.step_size = RATE_TO_U64(rlist->rate[2]); + dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n", + clk->range.min_rate, clk->range.max_rate, + clk->range.step_size); + break; + } + + rate = &clk->list.rates[tot_rate_cnt]; + for (cnt = 0; cnt < num_returned; cnt++, rate++) { + *rate = RATE_TO_U64(rlist->rate[cnt]); + dev_dbg(handle->dev, "Rate %llu Hz\n", *rate); + } + + tot_rate_cnt += num_returned; + /* + * check for both returned and remaining to avoid infinite + * loop due to buggy firmware + */ + } while (num_returned && num_remaining); + + if (rate_discrete) + clk->list.num_rates = tot_rate_cnt; + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) +{ + int ret; + struct scmi_xfer *t; + + ret = scmi_one_xfer_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, + sizeof(__le32), sizeof(u64), &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(clk_id); + + ret = scmi_do_xfer(handle, t); + if (!ret) { + __le32 *pval = t->rx.buf; + + *value = le32_to_cpu(*pval); + *value |= (u64)le32_to_cpu(*(pval + 1)) << 32; + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, + u32 config, u64 rate) +{ + int ret; + struct scmi_xfer *t; + struct scmi_clock_set_rate *cfg; + + ret = scmi_one_xfer_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->flags = cpu_to_le32(config); + cfg->id = cpu_to_le32(clk_id); + cfg->value_low = cpu_to_le32(rate & 0xffffffff); + cfg->value_high = cpu_to_le32(rate >> 32); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config) +{ + int ret; + struct scmi_xfer *t; + struct scmi_clock_set_config *cfg; + + ret = scmi_one_xfer_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(clk_id); + cfg->attributes = cpu_to_le32(config); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id) +{ + return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE); +} + +static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id) +{ + return scmi_clock_config_set(handle, clk_id, 0); +} + +static int scmi_clock_count_get(const struct scmi_handle *handle) +{ + struct clock_info *ci = handle->clk_priv; + + return ci->num_clocks; +} + +static const struct scmi_clock_info * +scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) +{ + struct clock_info *ci = handle->clk_priv; + struct scmi_clock_info *clk = ci->clk + clk_id; + + if (!clk->name || !clk->name[0]) + return NULL; + + return clk; +} + +static struct scmi_clk_ops clk_ops = { + .count_get = scmi_clock_count_get, + .info_get = scmi_clock_info_get, + .rate_get = scmi_clock_rate_get, + .rate_set = scmi_clock_rate_set, + .enable = scmi_clock_enable, + .disable = scmi_clock_disable, +}; + +static int scmi_clock_protocol_init(struct scmi_handle *handle) +{ + u32 version; + int clkid, ret; + struct clock_info *cinfo; + + scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version); + + dev_dbg(handle->dev, "Clock Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL); + if (!cinfo) + return -ENOMEM; + + scmi_clock_protocol_attributes_get(handle, cinfo); + + cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks, + sizeof(*cinfo->clk), GFP_KERNEL); + if (!cinfo->clk) + return -ENOMEM; + + for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { + struct scmi_clock_info *clk = cinfo->clk + clkid; + + ret = scmi_clock_attributes_get(handle, clkid, clk); + if (!ret) + scmi_clock_describe_rates_get(handle, clkid, clk); + } + + handle->clk_ops = &clk_ops; + handle->clk_priv = cinfo; + + return 0; +} + +static int __init scmi_clock_init(void) +{ + return scmi_protocol_register(SCMI_PROTOCOL_CLOCK, + &scmi_clock_protocol_init); +} +subsys_initcall(scmi_clock_init); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 57d4b1c099e5..5a3092f05011 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -8,6 +8,7 @@ #include #define SCMI_MAX_STR_SIZE 16 +#define SCMI_MAX_NUM_RATES 16 /** * struct scmi_revision_info - version information structure @@ -33,8 +34,48 @@ struct scmi_revision_info { char sub_vendor_id[SCMI_MAX_STR_SIZE]; }; +struct scmi_clock_info { + char name[SCMI_MAX_STR_SIZE]; + bool rate_discrete; + union { + struct { + int num_rates; + u64 rates[SCMI_MAX_NUM_RATES]; + } list; + struct { + u64 min_rate; + u64 max_rate; + u64 step_size; + } range; + }; +}; + struct scmi_handle; +/** + * struct scmi_clk_ops - represents the various operations provided + * by SCMI Clock Protocol + * + * @count_get: get the count of clocks provided by SCMI + * @info_get: get the information of the specified clock + * @rate_get: request the current clock rate of a clock + * @rate_set: set the clock rate of a clock + * @enable: enables the specified clock + * @disable: disables the specified clock + */ +struct scmi_clk_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_clock_info *(*info_get) + (const struct scmi_handle *handle, u32 clk_id); + int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, + u64 *rate); + int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, + u32 config, u64 rate); + int (*enable)(const struct scmi_handle *handle, u32 clk_id); + int (*disable)(const struct scmi_handle *handle, u32 clk_id); +}; + /** * struct scmi_perf_ops - represents the various operations provided * by SCMI Performance Protocol @@ -77,13 +118,16 @@ struct scmi_perf_ops { * @dev: pointer to the SCMI device * @version: pointer to the structure containing SCMI version information * @perf_ops: pointer to set of performance protocol operations + * @clk_ops: pointer to set of clock protocol operations */ struct scmi_handle { struct device *dev; struct scmi_revision_info *version; struct scmi_perf_ops *perf_ops; + struct scmi_clk_ops *clk_ops; /* for protocol internal use */ void *perf_priv; + void *clk_priv; }; enum scmi_std_protocol { -- cgit v1.2.3 From 76a6550990e296a7acbb4d33201c9740be912a8c Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Jun 2017 11:32:24 +0100 Subject: firmware: arm_scmi: add initial support for power protocol The power protocol is intended for management of power states of various power domains. The power domain management protocol provides commands to describe the protocol version, discover the implementation specific attributes, set and get the power state of a domain. This patch adds support for the above mention features of the protocol. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla -- drivers/firmware/arm_scmi/Makefile | 2 +- drivers/firmware/arm_scmi/power.c | 242 +++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 28 +++++ 3 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/power.c --- drivers/firmware/arm_scmi/Makefile | 2 +- drivers/firmware/arm_scmi/power.c | 221 +++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 28 +++++ 3 files changed, 250 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/power.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 2130ee9ac825..420c761ced94 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,4 +1,4 @@ obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-bus-y = bus.o scmi-driver-y = driver.o -scmi-protocols-y = base.o clock.o perf.o +scmi-protocols-y = base.o clock.o perf.o power.o diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c new file mode 100644 index 000000000000..087c2876cdf2 --- /dev/null +++ b/drivers/firmware/arm_scmi/power.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Power Protocol + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include "common.h" + +enum scmi_power_protocol_cmd { + POWER_DOMAIN_ATTRIBUTES = 0x3, + POWER_STATE_SET = 0x4, + POWER_STATE_GET = 0x5, + POWER_STATE_NOTIFY = 0x6, +}; + +struct scmi_msg_resp_power_attributes { + __le16 num_domains; + __le16 reserved; + __le32 stats_addr_low; + __le32 stats_addr_high; + __le32 stats_size; +}; + +struct scmi_msg_resp_power_domain_attributes { + __le32 flags; +#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30)) +#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29)) + u8 name[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_power_set_state { + __le32 flags; +#define STATE_SET_ASYNC BIT(0) + __le32 domain; + __le32 state; +}; + +struct scmi_power_state_notify { + __le32 domain; + __le32 notify_enable; +}; + +struct power_dom_info { + bool state_set_sync; + bool state_set_async; + bool state_set_notify; + char name[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_power_info { + int num_domains; + u64 stats_addr; + u32 stats_size; + struct power_dom_info *dom_info; +}; + +static int scmi_power_attributes_get(const struct scmi_handle *handle, + struct scmi_power_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_power_attributes *attr; + + ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, + SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + pi->num_domains = le16_to_cpu(attr->num_domains); + pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | + (u64)le32_to_cpu(attr->stats_addr_high) << 32; + pi->stats_size = le32_to_cpu(attr->stats_size); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain, + struct power_dom_info *dom_info) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_power_domain_attributes *attr; + + ret = scmi_one_xfer_init(handle, POWER_DOMAIN_ATTRIBUTES, + SCMI_PROTOCOL_POWER, sizeof(domain), + sizeof(*attr), &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(domain); + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + u32 flags = le32_to_cpu(attr->flags); + + dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); + dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); + dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); + memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state) +{ + int ret; + struct scmi_xfer *t; + struct scmi_power_set_state *st; + + ret = scmi_one_xfer_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER, + sizeof(*st), 0, &t); + if (ret) + return ret; + + st = t->tx.buf; + st->flags = cpu_to_le32(0); + st->domain = cpu_to_le32(domain); + st->state = cpu_to_le32(state); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state) +{ + int ret; + struct scmi_xfer *t; + + ret = scmi_one_xfer_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER, + sizeof(u32), sizeof(u32), &t); + if (ret) + return ret; + + *(__le32 *)t->tx.buf = cpu_to_le32(domain); + + ret = scmi_do_xfer(handle, t); + if (!ret) + *state = le32_to_cpu(*(__le32 *)t->rx.buf); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_power_num_domains_get(const struct scmi_handle *handle) +{ + struct scmi_power_info *pi = handle->power_priv; + + return pi->num_domains; +} + +static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain) +{ + struct scmi_power_info *pi = handle->power_priv; + struct power_dom_info *dom = pi->dom_info + domain; + + return dom->name; +} + +static struct scmi_power_ops power_ops = { + .num_domains_get = scmi_power_num_domains_get, + .name_get = scmi_power_name_get, + .state_set = scmi_power_state_set, + .state_get = scmi_power_state_get, +}; + +static int scmi_power_protocol_init(struct scmi_handle *handle) +{ + int domain; + u32 version; + struct scmi_power_info *pinfo; + + scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version); + + dev_dbg(handle->dev, "Power Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + scmi_power_attributes_get(handle, pinfo); + + pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains, + sizeof(*pinfo->dom_info), GFP_KERNEL); + if (!pinfo->dom_info) + return -ENOMEM; + + for (domain = 0; domain < pinfo->num_domains; domain++) { + struct power_dom_info *dom = pinfo->dom_info + domain; + + scmi_power_domain_attributes_get(handle, domain, dom); + } + + handle->power_ops = &power_ops; + handle->power_priv = pinfo; + + return 0; +} + +static int __init scmi_power_init(void) +{ + return scmi_protocol_register(SCMI_PROTOCOL_POWER, + &scmi_power_protocol_init); +} +subsys_initcall(scmi_power_init); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 5a3092f05011..8cd0348787bc 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -112,11 +112,37 @@ struct scmi_perf_ops { unsigned long *rate); }; +/** + * struct scmi_power_ops - represents the various operations provided + * by SCMI Power Protocol + * + * @num_domains_get: get the count of power domains provided by SCMI + * @name_get: gets the name of a power domain + * @state_set: sets the power state of a power domain + * @state_get: gets the power state of a power domain + */ +struct scmi_power_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + char *(*name_get)(const struct scmi_handle *handle, u32 domain); +#define SCMI_POWER_STATE_TYPE_SHIFT 30 +#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1) +#define SCMI_POWER_STATE_PARAM(type, id) \ + ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \ + ((id) & SCMI_POWER_STATE_ID_MASK)) +#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0) +#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0) + int (*state_set)(const struct scmi_handle *handle, u32 domain, + u32 state); + int (*state_get)(const struct scmi_handle *handle, u32 domain, + u32 *state); +}; + /** * struct scmi_handle - Handle returned to ARM SCMI clients for usage. * * @dev: pointer to the SCMI device * @version: pointer to the structure containing SCMI version information + * @power_ops: pointer to set of power protocol operations * @perf_ops: pointer to set of performance protocol operations * @clk_ops: pointer to set of clock protocol operations */ @@ -125,9 +151,11 @@ struct scmi_handle { struct scmi_revision_info *version; struct scmi_perf_ops *perf_ops; struct scmi_clk_ops *clk_ops; + struct scmi_power_ops *power_ops; /* for protocol internal use */ void *perf_priv; void *clk_priv; + void *power_priv; }; enum scmi_std_protocol { -- cgit v1.2.3 From 5179c523c1eae4b80fbafe9656bc24a375217cac Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Jun 2017 11:38:10 +0100 Subject: firmware: arm_scmi: add initial support for sensor protocol The sensor protocol provides functions to manage platform sensors, and provides the commands to describe the protocol version and the various attribute flags. It also provides commands to discover various sensors implemented and managed by the platform, read any sensor synchronously or asynchronously as allowed by the platform, program sensor attributes and/or configurations, if applicable. This patch adds support for most of the above features. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 2 +- drivers/firmware/arm_scmi/sensors.c | 291 ++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 46 ++++++ 3 files changed, 338 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/sensors.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 420c761ced94..3236890905b9 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,4 +1,4 @@ obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-bus-y = bus.o scmi-driver-y = driver.o -scmi-protocols-y = base.o clock.o perf.o power.o +scmi-protocols-y = base.o clock.o perf.o power.o sensors.o diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c new file mode 100644 index 000000000000..bbb469fea0ed --- /dev/null +++ b/drivers/firmware/arm_scmi/sensors.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Sensor Protocol + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include "common.h" + +enum scmi_sensor_protocol_cmd { + SENSOR_DESCRIPTION_GET = 0x3, + SENSOR_CONFIG_SET = 0x4, + SENSOR_TRIP_POINT_SET = 0x5, + SENSOR_READING_GET = 0x6, +}; + +struct scmi_msg_resp_sensor_attributes { + __le16 num_sensors; + u8 max_requests; + u8 reserved; + __le32 reg_addr_low; + __le32 reg_addr_high; + __le32 reg_size; +}; + +struct scmi_msg_resp_sensor_description { + __le16 num_returned; + __le16 num_remaining; + struct { + __le32 id; + __le32 attributes_low; +#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31)) +#define NUM_TRIP_POINTS(x) (((x) >> 4) & 0xff) + __le32 attributes_high; +#define SENSOR_TYPE(x) ((x) & 0xff) +#define SENSOR_SCALE(x) (((x) >> 11) & 0x3f) +#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f) +#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f) + u8 name[SCMI_MAX_STR_SIZE]; + } desc[0]; +}; + +struct scmi_msg_set_sensor_config { + __le32 id; + __le32 event_control; +}; + +struct scmi_msg_set_sensor_trip_point { + __le32 id; + __le32 event_control; +#define SENSOR_TP_EVENT_MASK (0x3) +#define SENSOR_TP_DISABLED 0x0 +#define SENSOR_TP_POSITIVE 0x1 +#define SENSOR_TP_NEGATIVE 0x2 +#define SENSOR_TP_BOTH 0x3 +#define SENSOR_TP_ID(x) (((x) & 0xff) << 4) + __le32 value_low; + __le32 value_high; +}; + +struct scmi_msg_sensor_reading_get { + __le32 id; + __le32 flags; +#define SENSOR_READ_ASYNC BIT(0) +}; + +struct sensors_info { + int num_sensors; + int max_requests; + u64 reg_addr; + u32 reg_size; + struct scmi_sensor_info *sensors; +}; + +static int scmi_sensor_attributes_get(const struct scmi_handle *handle, + struct sensors_info *si) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_sensor_attributes *attr; + + ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, + SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = scmi_do_xfer(handle, t); + if (!ret) { + si->num_sensors = le16_to_cpu(attr->num_sensors); + si->max_requests = attr->max_requests; + si->reg_addr = le32_to_cpu(attr->reg_addr_low) | + (u64)le32_to_cpu(attr->reg_addr_high) << 32; + si->reg_size = le32_to_cpu(attr->reg_size); + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_sensor_description_get(const struct scmi_handle *handle, + struct sensors_info *si) +{ + int ret, cnt; + u32 desc_index = 0; + u16 num_returned, num_remaining; + struct scmi_xfer *t; + struct scmi_msg_resp_sensor_description *buf; + + ret = scmi_one_xfer_init(handle, SENSOR_DESCRIPTION_GET, + SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t); + if (ret) + return ret; + + buf = t->rx.buf; + + do { + /* Set the number of sensors to be skipped/already read */ + *(__le32 *)t->tx.buf = cpu_to_le32(desc_index); + + ret = scmi_do_xfer(handle, t); + if (ret) + break; + + num_returned = le16_to_cpu(buf->num_returned); + num_remaining = le16_to_cpu(buf->num_remaining); + + if (desc_index + num_returned > si->num_sensors) { + dev_err(handle->dev, "No. of sensors can't exceed %d", + si->num_sensors); + break; + } + + for (cnt = 0; cnt < num_returned; cnt++) { + u32 attrh; + struct scmi_sensor_info *s; + + attrh = le32_to_cpu(buf->desc[cnt].attributes_high); + s = &si->sensors[desc_index + cnt]; + s->id = le32_to_cpu(buf->desc[cnt].id); + s->type = SENSOR_TYPE(attrh); + memcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE); + } + + desc_index += num_returned; + /* + * check for both returned and remaining to avoid infinite + * loop due to buggy firmware + */ + } while (num_returned && num_remaining); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int +scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id) +{ + int ret; + u32 evt_cntl = BIT(0); + struct scmi_xfer *t; + struct scmi_msg_set_sensor_config *cfg; + + ret = scmi_one_xfer_init(handle, SENSOR_CONFIG_SET, + SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(sensor_id); + cfg->event_control = cpu_to_le32(evt_cntl); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_sensor_trip_point_set(const struct scmi_handle *handle, + u32 sensor_id, u8 trip_id, u64 trip_value) +{ + int ret; + u32 evt_cntl = SENSOR_TP_BOTH; + struct scmi_xfer *t; + struct scmi_msg_set_sensor_trip_point *trip; + + ret = scmi_one_xfer_init(handle, SENSOR_TRIP_POINT_SET, + SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t); + if (ret) + return ret; + + trip = t->tx.buf; + trip->id = cpu_to_le32(sensor_id); + trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id)); + trip->value_low = cpu_to_le32(trip_value & 0xffffffff); + trip->value_high = cpu_to_le32(trip_value >> 32); + + ret = scmi_do_xfer(handle, t); + + scmi_one_xfer_put(handle, t); + return ret; +} + +static int scmi_sensor_reading_get(const struct scmi_handle *handle, + u32 sensor_id, bool async, u64 *value) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_sensor_reading_get *sensor; + + ret = scmi_one_xfer_init(handle, SENSOR_READING_GET, + SCMI_PROTOCOL_SENSOR, sizeof(*sensor), + sizeof(u64), &t); + if (ret) + return ret; + + sensor = t->tx.buf; + sensor->id = cpu_to_le32(sensor_id); + sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0); + + ret = scmi_do_xfer(handle, t); + if (!ret) { + __le32 *pval = t->rx.buf; + + *value = le32_to_cpu(*pval); + *value |= (u64)le32_to_cpu(*(pval + 1)) << 32; + } + + scmi_one_xfer_put(handle, t); + return ret; +} + +static const struct scmi_sensor_info * +scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id) +{ + struct sensors_info *si = handle->sensor_priv; + + return si->sensors + sensor_id; +} + +static int scmi_sensor_count_get(const struct scmi_handle *handle) +{ + struct sensors_info *si = handle->sensor_priv; + + return si->num_sensors; +} + +static struct scmi_sensor_ops sensor_ops = { + .count_get = scmi_sensor_count_get, + .info_get = scmi_sensor_info_get, + .configuration_set = scmi_sensor_configuration_set, + .trip_point_set = scmi_sensor_trip_point_set, + .reading_get = scmi_sensor_reading_get, +}; + +static int scmi_sensors_protocol_init(struct scmi_handle *handle) +{ + u32 version; + struct sensors_info *sinfo; + + scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version); + + dev_dbg(handle->dev, "Sensor Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + + scmi_sensor_attributes_get(handle, sinfo); + + sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors, + sizeof(*sinfo->sensors), GFP_KERNEL); + if (!sinfo->sensors) + return -ENOMEM; + + scmi_sensor_description_get(handle, sinfo); + + handle->sensor_ops = &sensor_ops; + handle->sensor_priv = sinfo; + + return 0; +} + +static int __init scmi_sensors_init(void) +{ + return scmi_protocol_register(SCMI_PROTOCOL_SENSOR, + &scmi_sensors_protocol_init); +} +subsys_initcall(scmi_sensors_init); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 8cd0348787bc..5d63da9435ba 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -137,6 +137,49 @@ struct scmi_power_ops { u32 *state); }; +struct scmi_sensor_info { + u32 id; + u8 type; + char name[SCMI_MAX_STR_SIZE]; +}; + +/* + * Partial list from Distributed Management Task Force (DMTF) specification: + * DSP0249 (Platform Level Data Model specification) + */ +enum scmi_sensor_class { + NONE = 0x0, + TEMPERATURE_C = 0x2, + VOLTAGE = 0x5, + CURRENT = 0x6, + POWER = 0x7, + ENERGY = 0x8, +}; + +/** + * struct scmi_sensor_ops - represents the various operations provided + * by SCMI Sensor Protocol + * + * @count_get: get the count of sensors provided by SCMI + * @info_get: get the information of the specified sensor + * @configuration_set: control notifications on cross-over events for + * the trip-points + * @trip_point_set: selects and configures a trip-point of interest + * @reading_get: gets the current value of the sensor + */ +struct scmi_sensor_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_sensor_info *(*info_get) + (const struct scmi_handle *handle, u32 sensor_id); + int (*configuration_set)(const struct scmi_handle *handle, + u32 sensor_id); + int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, + u8 trip_id, u64 trip_value); + int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, + bool async, u64 *value); +}; + /** * struct scmi_handle - Handle returned to ARM SCMI clients for usage. * @@ -145,6 +188,7 @@ struct scmi_power_ops { * @power_ops: pointer to set of power protocol operations * @perf_ops: pointer to set of performance protocol operations * @clk_ops: pointer to set of clock protocol operations + * @sensor_ops: pointer to set of sensor protocol operations */ struct scmi_handle { struct device *dev; @@ -152,10 +196,12 @@ struct scmi_handle { struct scmi_perf_ops *perf_ops; struct scmi_clk_ops *clk_ops; struct scmi_power_ops *power_ops; + struct scmi_sensor_ops *sensor_ops; /* for protocol internal use */ void *perf_priv; void *clk_priv; void *power_priv; + void *sensor_priv; }; enum scmi_std_protocol { -- cgit v1.2.3 From bc40081d9825c7ed34501ebfc0a533047a07b16c Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Jun 2017 11:39:08 +0100 Subject: firmware: arm_scmi: probe and initialise all the supported protocols Now that we have basic support for all the protocols in the specification, let's probe them individually and initialise them. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 51 +++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 49875cd68365..f242b2e7c4b1 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -466,6 +466,21 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle, info->protocols_imp = prot_imp; } +static bool +scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id) +{ + int i; + struct scmi_info *info = handle_to_scmi_info(handle); + + if (!info->protocols_imp) + return false; + + for (i = 0; i < MAX_PROTOCOLS_IMP; i++) + if (info->protocols_imp[i] == prot_id) + return true; + return false; +} + /** * scmi_handle_get() - Get the SCMI handle for a device * @@ -661,6 +676,23 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info) return 0; } +static inline void +scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, + int prot_id) +{ + struct scmi_device *sdev; + + sdev = scmi_device_create(np, info->dev, prot_id); + if (!sdev) { + dev_err(info->dev, "failed to create %d protocol device\n", + prot_id); + return; + } + + /* setup handle now as the transport is ready */ + scmi_set_handle(sdev); +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -668,7 +700,7 @@ static int scmi_probe(struct platform_device *pdev) const struct scmi_desc *desc; struct scmi_info *info; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; + struct device_node *child, *np = dev->of_node; /* Only mailbox method supported, check for the presence of one */ if (scmi_mailbox_check(np)) { @@ -711,6 +743,23 @@ static int scmi_probe(struct platform_device *pdev) list_add_tail(&info->node, &scmi_list); mutex_unlock(&scmi_list_mutex); + for_each_available_child_of_node(np, child) { + u32 prot_id; + + if (of_property_read_u32(child, "reg", &prot_id)) + continue; + + prot_id &= MSG_PROTOCOL_ID_MASK; + + if (!scmi_is_protocol_implemented(handle, prot_id)) { + dev_err(dev, "SCMI protocol %d not implemented\n", + prot_id); + continue; + } + + scmi_create_protocol_device(child, info, prot_id); + } + return 0; } -- cgit v1.2.3 From d4c3751a8de2deeaae546b97650f895b62bbd1b4 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 20 Jul 2017 14:39:57 +0100 Subject: firmware: arm_scmi: add support for polling based SCMI transfers It would be useful to have options to perform some SCMI transfers atomically by polling for the completion flag instead of interrupt driven. The SCMI specification has option to disable the interrupt and poll for the completion flag in the shared memory. This patch adds support for polling based SCMI transfers using that option. This might be used for uninterrupted/atomic DVFS operations from the scheduler context. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 55 ++++++++++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index f242b2e7c4b1..cf8a1719d425 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -18,10 +18,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include @@ -335,6 +337,30 @@ void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) spin_unlock_irqrestore(&minfo->xfer_lock, flags); } +static bool +scmi_xfer_poll_done(const struct scmi_info *info, struct scmi_xfer *xfer) +{ + struct scmi_shared_mem *mem = info->tx_payload; + u16 xfer_id = MSG_XTRACT_TOKEN(le32_to_cpu(mem->msg_header)); + + if (xfer->hdr.seq != xfer_id) + return false; + + return le32_to_cpu(mem->channel_status) & + (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); +} + +#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) + +static bool scmi_xfer_done_no_timeout(const struct scmi_info *info, + struct scmi_xfer *xfer, ktime_t stop) +{ + ktime_t __cur = ktime_get(); + + return scmi_xfer_poll_done(info, xfer) || ktime_after(__cur, stop); +} + /** * scmi_do_xfer() - Do one transfer * @@ -361,15 +387,28 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) /* mbox_send_message returns non-negative value on success, so reset */ ret = 0; - /* And we wait for the response. */ - timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); - if (!wait_for_completion_timeout(&xfer->done, timeout)) { - dev_err(dev, "mbox timed out in resp(caller: %pS)\n", - (void *)_RET_IP_); - ret = -ETIMEDOUT; - } else if (xfer->hdr.status) { - ret = scmi_to_linux_errno(xfer->hdr.status); + if (xfer->hdr.poll_completion) { + ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); + + spin_until_cond(scmi_xfer_done_no_timeout(info, xfer, stop)); + + if (ktime_before(ktime_get(), stop)) + scmi_fetch_response(xfer, info->tx_payload); + else + ret = -ETIMEDOUT; + } else { + /* And we wait for the response. */ + timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); + if (!wait_for_completion_timeout(&xfer->done, timeout)) { + dev_err(dev, "mbox timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } } + + if (!ret && xfer->hdr.status) + ret = scmi_to_linux_errno(xfer->hdr.status); + /* * NOTE: we might prefer not to need the mailbox ticker to manage the * transfer queueing since the protocol layer queues things by itself. -- cgit v1.2.3 From 5c4ba3cc85296398855d621bf90b78866ea80444 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Fri, 21 Jul 2017 11:42:24 +0100 Subject: firmware: arm_scmi: add option for polling based performance domain operations In order to implement fast CPU DVFS switching, we need to perform all DVFS operations atomically. Since SCMI transfer already provide option to choose between pooling vs interrupt driven(default), we can opt for polling based transfers for set,get performance domain operations. This patch adds option to choose between polling vs interrupt driven SCMI transfers for set,get performance level operations. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/perf.c | 19 +++++++++++-------- include/linux/scmi_protocol.h | 8 ++++---- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 9c56ea503890..987c64d19801 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -292,8 +292,8 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, return ret; } -static int -scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, u32 level) +static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, + u32 level, bool poll) { int ret; struct scmi_xfer *t; @@ -304,6 +304,7 @@ scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, u32 level) if (ret) return ret; + t->hdr.poll_completion = poll; lvl = t->tx.buf; lvl->domain = cpu_to_le32(domain); lvl->level = cpu_to_le32(level); @@ -314,8 +315,8 @@ scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, u32 level) return ret; } -static int -scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, u32 *level) +static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, + u32 *level, bool poll) { int ret; struct scmi_xfer *t; @@ -325,6 +326,7 @@ scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, u32 *level) if (ret) return ret; + t->hdr.poll_completion = poll; *(__le32 *)t->tx.buf = cpu_to_le32(domain); ret = scmi_do_xfer(handle, t); @@ -400,23 +402,24 @@ static int scmi_dvfs_get_transition_latency(const struct scmi_handle *handle, } static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain, - unsigned long freq) + unsigned long freq, bool poll) { struct scmi_perf_info *pi = handle->perf_priv; struct perf_dom_info *dom = pi->dom_info + domain; - return scmi_perf_level_set(handle, domain, freq / dom->mult_factor); + return scmi_perf_level_set(handle, domain, freq / dom->mult_factor, + poll); } static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain, - unsigned long *freq) + unsigned long *freq, bool poll) { int ret; u32 level; struct scmi_perf_info *pi = handle->perf_priv; struct perf_dom_info *dom = pi->dom_info + domain; - ret = scmi_perf_level_get(handle, domain, &level); + ret = scmi_perf_level_get(handle, domain, &level, poll); if (!ret) *freq = level * dom->mult_factor; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 5d63da9435ba..b458c87b866c 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -98,18 +98,18 @@ struct scmi_perf_ops { int (*limits_get)(const struct scmi_handle *handle, u32 domain, u32 *max_perf, u32 *min_perf); int (*level_set)(const struct scmi_handle *handle, u32 domain, - u32 level); + u32 level, bool poll); int (*level_get)(const struct scmi_handle *handle, u32 domain, - u32 *level); + u32 *level, bool poll); int (*device_domain_id)(struct device *dev); int (*get_transition_latency)(const struct scmi_handle *handle, struct device *dev); int (*add_opps_to_device)(const struct scmi_handle *handle, struct device *dev); int (*freq_set)(const struct scmi_handle *handle, u32 domain, - unsigned long rate); + unsigned long rate, bool poll); int (*freq_get)(const struct scmi_handle *handle, u32 domain, - unsigned long *rate); + unsigned long *rate, bool poll); }; /** -- cgit v1.2.3 From fbc4d81ad28545714a1e367963aaf2ffd9be5239 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 31 Jul 2017 15:25:32 +0100 Subject: firmware: arm_scmi: refactor in preparation to support per-protocol channels In order to support per-protocol channels if available, we need to factor out all the mailbox channel information(Tx/Rx payload and channel handle) out of the main SCMI instance information structure. This patch refactors the existing channel information into a separate chan_info structure. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 90 ++++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 34 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index cf8a1719d425..82171ec2b7a5 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -90,6 +90,22 @@ struct scmi_desc { int max_msg_size; }; +/** + * struct scmi_chan_info - Structure representing a SCMI channel informfation + * + * @cl: Mailbox Client + * @chan: Transmit/Receive mailbox channel + * @payload: Transmit/Receive mailbox channel payload area + * @dev: Reference to device in the SCMI hierarchy corresponding to this + * channel + */ +struct scmi_chan_info { + struct mbox_client cl; + struct mbox_chan *chan; + void __iomem *payload; + struct device *dev; +}; + /** * struct scmi_info - Structure representing a SCMI instance * @@ -98,10 +114,8 @@ struct scmi_desc { * @handle: Instance of SCMI handle to send to clients * @version: SCMI revision information containing protocol version, * implementation version and (sub-)vendor identification. - * @cl: Mailbox Client - * @tx_chan: Transmit mailbox channel - * @tx_payload: Transmit mailbox channel payload area * @minfo: Message info + * @tx_cinfo: Reference to SCMI channel information * @protocols_imp: list of protocols implemented, currently maximum of * MAX_PROTOCOLS_IMP elements allocated by the base protocol * @node: list head @@ -112,16 +126,14 @@ struct scmi_info { const struct scmi_desc *desc; struct scmi_revision_info version; struct scmi_handle handle; - struct mbox_client cl; - struct mbox_chan *tx_chan; - void __iomem *tx_payload; struct scmi_xfers_info minfo; + struct scmi_chan_info *tx_cinfo; u8 *protocols_imp; struct list_head node; int users; }; -#define client_to_scmi_info(c) container_of(c, struct scmi_info, cl) +#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl) #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) /* @@ -204,10 +216,11 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) { u16 xfer_id; struct scmi_xfer *xfer; - struct scmi_info *info = client_to_scmi_info(cl); + struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); + struct device *dev = cinfo->dev; + struct scmi_info *info = dev_get_drvdata(dev); struct scmi_xfers_info *minfo = &info->minfo; - struct device *dev = info->dev; - struct scmi_shared_mem __iomem *mem = info->tx_payload; + struct scmi_shared_mem __iomem *mem = cinfo->payload; xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); @@ -258,8 +271,8 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) static void scmi_tx_prepare(struct mbox_client *cl, void *m) { struct scmi_xfer *t = m; - struct scmi_info *info = client_to_scmi_info(cl); - struct scmi_shared_mem __iomem *mem = info->tx_payload; + struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); + struct scmi_shared_mem __iomem *mem = cinfo->payload; /* Mark channel busy + clear error */ iowrite32(0x0, &mem->channel_status); @@ -338,27 +351,27 @@ void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) } static bool -scmi_xfer_poll_done(const struct scmi_info *info, struct scmi_xfer *xfer) +scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) { - struct scmi_shared_mem *mem = info->tx_payload; - u16 xfer_id = MSG_XTRACT_TOKEN(le32_to_cpu(mem->msg_header)); + struct scmi_shared_mem __iomem *mem = cinfo->payload; + u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); if (xfer->hdr.seq != xfer_id) return false; - return le32_to_cpu(mem->channel_status) & + return ioread32(&mem->channel_status) & (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) -static bool scmi_xfer_done_no_timeout(const struct scmi_info *info, +static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, ktime_t stop) { ktime_t __cur = ktime_get(); - return scmi_xfer_poll_done(info, xfer) || ktime_after(__cur, stop); + return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop); } /** @@ -377,8 +390,9 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) int timeout; struct scmi_info *info = handle_to_scmi_info(handle); struct device *dev = info->dev; + struct scmi_chan_info *cinfo = info->tx_cinfo; - ret = mbox_send_message(info->tx_chan, xfer); + ret = mbox_send_message(cinfo->chan, xfer); if (ret < 0) { dev_dbg(dev, "mbox send fail %d\n", ret); return ret; @@ -390,10 +404,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) if (xfer->hdr.poll_completion) { ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); - spin_until_cond(scmi_xfer_done_no_timeout(info, xfer, stop)); + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); if (ktime_before(ktime_get(), stop)) - scmi_fetch_response(xfer, info->tx_payload); + scmi_fetch_response(xfer, cinfo->payload); else ret = -ETIMEDOUT; } else { @@ -415,7 +429,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) * Unfortunately, we have to kick the mailbox framework after we have * received our message. */ - mbox_client_txdone(info->tx_chan, ret); + mbox_client_txdone(cinfo->chan, ret); return ret; } @@ -643,11 +657,11 @@ static int scmi_mailbox_check(struct device_node *np) return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg); } -static int scmi_mbox_free_channel(struct scmi_info *info) +static int scmi_mbox_free_channel(struct scmi_chan_info *cinfo) { - if (!IS_ERR_OR_NULL(info->tx_chan)) { - mbox_free_channel(info->tx_chan); - info->tx_chan = NULL; + if (!IS_ERR_OR_NULL(cinfo->chan)) { + mbox_free_channel(cinfo->chan); + cinfo->chan = NULL; } return 0; @@ -667,7 +681,7 @@ static int scmi_remove(struct platform_device *pdev) if (!ret) /* Safe to free channels since no more users */ - return scmi_mbox_free_channel(info); + return scmi_mbox_free_channel(info->tx_cinfo); return ret; } @@ -679,9 +693,17 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info) resource_size_t size; struct device *dev = info->dev; struct device_node *shmem, *np = dev->of_node; + struct scmi_chan_info *cinfo; struct mbox_client *cl; - cl = &info->cl; + cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); + if (!cinfo) + return -ENOMEM; + + info->tx_cinfo = cinfo; + cinfo->dev = dev; + + cl = &cinfo->cl; cl->dev = dev; cl->rx_callback = scmi_rx_callback; cl->tx_prepare = scmi_tx_prepare; @@ -697,16 +719,16 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info) } size = resource_size(&res); - info->tx_payload = devm_ioremap(dev, res.start, size); - if (!info->tx_payload) { + cinfo->payload = devm_ioremap(info->dev, res.start, size); + if (!cinfo->payload) { dev_err(dev, "failed to ioremap SCMI Tx payload\n"); return -EADDRNOTAVAIL; } /* Transmit channel is first entry i.e. index 0 */ - info->tx_chan = mbox_request_channel(cl, 0); - if (IS_ERR(info->tx_chan)) { - ret = PTR_ERR(info->tx_chan); + cinfo->chan = mbox_request_channel(cl, 0); + if (IS_ERR(cinfo->chan)) { + ret = PTR_ERR(cinfo->chan); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to request SCMI Tx mailbox\n"); return ret; @@ -774,7 +796,7 @@ static int scmi_probe(struct platform_device *pdev) ret = scmi_base_protocol_init(handle); if (ret) { dev_err(dev, "unable to communicate with SCMI(%d)\n", ret); - scmi_mbox_free_channel(info); + scmi_mbox_free_channel(info->tx_cinfo); return ret; } -- cgit v1.2.3 From 907b6d14911db047e6e29979895d29daf2ec1e5f Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 31 Jul 2017 15:43:27 +0100 Subject: firmware: arm_scmi: add per-protocol channels support using idr objects In order to maintain the channel information per protocol, we need some sort of list or hashtable to hold all this information. IDR provides sparse array mapping of small integer ID numbers onto arbitrary pointers. In this case the arbitrary pointers can be pointers to the channel information. This patch adds support for per-protocol channels using those idr objects. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 54 +++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 82171ec2b7a5..14b147135a0c 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -104,6 +104,7 @@ struct scmi_chan_info { struct mbox_chan *chan; void __iomem *payload; struct device *dev; + struct scmi_handle *handle; }; /** @@ -115,7 +116,7 @@ struct scmi_chan_info { * @version: SCMI revision information containing protocol version, * implementation version and (sub-)vendor identification. * @minfo: Message info - * @tx_cinfo: Reference to SCMI channel information + * @tx_idr: IDR object to map protocol id to channel info pointer * @protocols_imp: list of protocols implemented, currently maximum of * MAX_PROTOCOLS_IMP elements allocated by the base protocol * @node: list head @@ -127,7 +128,7 @@ struct scmi_info { struct scmi_revision_info version; struct scmi_handle handle; struct scmi_xfers_info minfo; - struct scmi_chan_info *tx_cinfo; + struct idr tx_idr; u8 *protocols_imp; struct list_head node; int users; @@ -218,7 +219,7 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) struct scmi_xfer *xfer; struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); struct device *dev = cinfo->dev; - struct scmi_info *info = dev_get_drvdata(dev); + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); struct scmi_xfers_info *minfo = &info->minfo; struct scmi_shared_mem __iomem *mem = cinfo->payload; @@ -390,7 +391,11 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) int timeout; struct scmi_info *info = handle_to_scmi_info(handle); struct device *dev = info->dev; - struct scmi_chan_info *cinfo = info->tx_cinfo; + struct scmi_chan_info *cinfo; + + cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); + if (unlikely(!cinfo)) + return -EINVAL; ret = mbox_send_message(cinfo->chan, xfer); if (ret < 0) { @@ -657,13 +662,18 @@ static int scmi_mailbox_check(struct device_node *np) return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg); } -static int scmi_mbox_free_channel(struct scmi_chan_info *cinfo) +static int scmi_mbox_free_channel(int id, void *p, void *data) { + struct scmi_chan_info *cinfo = p; + struct idr *idr = data; + if (!IS_ERR_OR_NULL(cinfo->chan)) { mbox_free_channel(cinfo->chan); cinfo->chan = NULL; } + idr_remove(idr, id); + return 0; } @@ -671,6 +681,7 @@ static int scmi_remove(struct platform_device *pdev) { int ret = 0; struct scmi_info *info = platform_get_drvdata(pdev); + struct idr *idr = &info->tx_idr; mutex_lock(&scmi_list_mutex); if (info->users) @@ -679,28 +690,34 @@ static int scmi_remove(struct platform_device *pdev) list_del(&info->node); mutex_unlock(&scmi_list_mutex); - if (!ret) + if (!ret) { /* Safe to free channels since no more users */ - return scmi_mbox_free_channel(info->tx_cinfo); + ret = idr_for_each(idr, scmi_mbox_free_channel, idr); + idr_destroy(&info->tx_idr); + } return ret; } -static inline int scmi_mbox_chan_setup(struct scmi_info *info) +static inline int +scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id) { int ret; struct resource res; resource_size_t size; - struct device *dev = info->dev; struct device_node *shmem, *np = dev->of_node; struct scmi_chan_info *cinfo; struct mbox_client *cl; + if (scmi_mailbox_check(np)) { + cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); + goto idr_alloc; + } + cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); if (!cinfo) return -ENOMEM; - info->tx_cinfo = cinfo; cinfo->dev = dev; cl = &cinfo->cl; @@ -734,6 +751,14 @@ static inline int scmi_mbox_chan_setup(struct scmi_info *info) return ret; } +idr_alloc: + ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); + if (ret != prot_id) { + dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); + return ret; + } + + cinfo->handle = &info->handle; return 0; } @@ -750,6 +775,11 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, return; } + if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) { + dev_err(&sdev->dev, "failed to setup transport\n"); + scmi_device_destroy(sdev); + } + /* setup handle now as the transport is ready */ scmi_set_handle(sdev); } @@ -784,19 +814,19 @@ static int scmi_probe(struct platform_device *pdev) return ret; platform_set_drvdata(pdev, info); + idr_init(&info->tx_idr); handle = &info->handle; handle->dev = info->dev; handle->version = &info->version; - ret = scmi_mbox_chan_setup(info); + ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE); if (ret) return ret; ret = scmi_base_protocol_init(handle); if (ret) { dev_err(dev, "unable to communicate with SCMI(%d)\n", ret); - scmi_mbox_free_channel(info->tx_cinfo); return ret; } -- cgit v1.2.3 From 898216c97ed2ebfffda659ce12388da43534de6c Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 14 Jun 2017 13:48:26 +0100 Subject: firmware: arm_scmi: add device power domain support using genpd This patch hooks up the support for device power domain provided by SCMI using the Linux generic power domain infrastructure. Cc: Kevin Hilman Reviewed-by: Ulf Hansson Signed-off-by: Sudeep Holla --- drivers/firmware/Kconfig | 13 +++ drivers/firmware/arm_scmi/Makefile | 1 + drivers/firmware/arm_scmi/scmi_pm_domain.c | 129 +++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) create mode 100644 drivers/firmware/arm_scmi/scmi_pm_domain.c diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 704961e0473a..6e83880046d7 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -40,6 +40,19 @@ config ARM_SCMI_PROTOCOL This protocol library provides interface for all the client drivers making use of the features offered by the SCMI. +config ARM_SCMI_POWER_DOMAIN + tristate "SCMI power domain driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y + select PM_GENERIC_DOMAINS if PM + help + This enables support for the SCMI power domains which can be + enabled or disabled via the SCP firmware + + This driver can also be built as a module. If so, the module + will be called scmi_pm_domain. Note this may needed early in boot + before rootfs may be available. + config ARM_SCPI_PROTOCOL tristate "ARM System Control and Power Interface (SCPI) Message Protocol" depends on ARM || ARM64 || COMPILE_TEST diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 3236890905b9..99e36c580fbc 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -2,3 +2,4 @@ obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-bus-y = bus.o scmi-driver-y = driver.o scmi-protocols-y = base.o clock.o perf.o power.o sensors.o +obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c new file mode 100644 index 000000000000..87f737e01473 --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic power domain support. + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include +#include + +struct scmi_pm_domain { + struct generic_pm_domain genpd; + const struct scmi_handle *handle; + const char *name; + u32 domain; +}; + +#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd) + +static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on) +{ + int ret; + u32 state, ret_state; + struct scmi_pm_domain *pd = to_scmi_pd(domain); + const struct scmi_power_ops *ops = pd->handle->power_ops; + + if (power_on) + state = SCMI_POWER_STATE_GENERIC_ON; + else + state = SCMI_POWER_STATE_GENERIC_OFF; + + ret = ops->state_set(pd->handle, pd->domain, state); + if (!ret) + ret = ops->state_get(pd->handle, pd->domain, &ret_state); + if (!ret && state != ret_state) + return -EIO; + + return ret; +} + +static int scmi_pd_power_on(struct generic_pm_domain *domain) +{ + return scmi_pd_power(domain, true); +} + +static int scmi_pd_power_off(struct generic_pm_domain *domain) +{ + return scmi_pd_power(domain, false); +} + +static int scmi_pm_domain_probe(struct scmi_device *sdev) +{ + int num_domains, i; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + struct scmi_pm_domain *scmi_pd; + struct genpd_onecell_data *scmi_pd_data; + struct generic_pm_domain **domains; + const struct scmi_handle *handle = sdev->handle; + + if (!handle || !handle->power_ops) + return -ENODEV; + + num_domains = handle->power_ops->num_domains_get(handle); + if (num_domains < 0) { + dev_err(dev, "number of domains not found\n"); + return num_domains; + } + + scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL); + if (!scmi_pd) + return -ENOMEM; + + scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL); + if (!scmi_pd_data) + return -ENOMEM; + + domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL); + if (!domains) + return -ENOMEM; + + for (i = 0; i < num_domains; i++, scmi_pd++) { + u32 state; + + domains[i] = &scmi_pd->genpd; + + scmi_pd->domain = i; + scmi_pd->handle = handle; + scmi_pd->name = handle->power_ops->name_get(handle, i); + scmi_pd->genpd.name = scmi_pd->name; + scmi_pd->genpd.power_off = scmi_pd_power_off; + scmi_pd->genpd.power_on = scmi_pd_power_on; + + if (handle->power_ops->state_get(handle, i, &state)) { + dev_warn(dev, "failed to get state for domain %d\n", i); + continue; + } + + pm_genpd_init(&scmi_pd->genpd, NULL, + state == SCMI_POWER_STATE_GENERIC_OFF); + } + + scmi_pd_data->domains = domains; + scmi_pd_data->num_domains = num_domains; + + of_genpd_add_provider_onecell(np, scmi_pd_data); + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_POWER }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_power_domain_driver = { + .name = "scmi-power-domain", + .probe = scmi_pm_domain_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_power_domain_driver); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM SCMI power domain driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 6d6a1d82eaef705f150c441350559bf5daccb533 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 13 Jun 2017 17:19:36 +0100 Subject: clk: add support for clocks provided by SCMI On some ARM based systems, a separate Cortex-M based System Control Processor(SCP) provides the overall power, clock, reset and system control. System Control and Management Interface(SCMI) Message Protocol is defined for the communication between the Application Cores(AP) and the SCP. This patch adds support for the clocks provided by SCP using SCMI protocol. Cc: linux-clk@vger.kernel.org Cc: Michael Turquette Acked-by: Stephen Boyd Signed-off-by: Sudeep Holla --- MAINTAINERS | 2 +- drivers/clk/Kconfig | 10 +++ drivers/clk/Makefile | 1 + drivers/clk/clk-scmi.c | 202 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 214 insertions(+), 1 deletion(-) create mode 100644 drivers/clk/clk-scmi.c diff --git a/MAINTAINERS b/MAINTAINERS index 7cede6e7dfed..d6e9e3eae5ac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13384,7 +13384,7 @@ M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt -F: drivers/clk/clk-scpi.c +F: drivers/clk/clk-sc[mp]i.c F: drivers/cpufreq/scpi-cpufreq.c F: drivers/firmware/arm_scpi.c F: drivers/firmware/arm_scmi/ diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 98ce9fc6e6c0..7ae23b25b406 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -62,6 +62,16 @@ config COMMON_CLK_HI655X multi-function device has one fixed-rate oscillator, clocked at 32KHz. +config COMMON_CLK_SCMI + tristate "Clock driver controlled via SCMI interface" + depends on ARM_SCMI_PROTOCOL || COMPILE_TEST + ---help--- + This driver provides support for clocks that are controlled + by firmware that implements the SCMI interface. + + This driver uses SCMI Message Protocol to interact with the + firmware providing all the clock controls. + config COMMON_CLK_SCPI tristate "Clock driver controlled via SCPI interface" depends on ARM_SCPI_PROTOCOL || COMPILE_TEST diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 71ec41e6364f..6605513eaa94 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o +obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c new file mode 100644 index 000000000000..26f1476d4a79 --- /dev/null +++ b/drivers/clk/clk-scmi.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Power Interface (SCMI) Protocol based clock driver + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct scmi_clk { + u32 id; + struct clk_hw hw; + const struct scmi_clock_info *info; + const struct scmi_handle *handle; +}; + +#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw) + +static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + int ret; + u64 rate; + struct scmi_clk *clk = to_scmi_clk(hw); + + ret = clk->handle->clk_ops->rate_get(clk->handle, clk->id, &rate); + if (ret) + return 0; + return rate; +} + +static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + int step; + u64 fmin, fmax, ftmp; + struct scmi_clk *clk = to_scmi_clk(hw); + + /* + * We can't figure out what rate it will be, so just return the + * rate back to the caller. scmi_clk_recalc_rate() will be called + * after the rate is set and we'll know what rate the clock is + * running at then. + */ + if (clk->info->rate_discrete) + return rate; + + fmin = clk->info->range.min_rate; + fmax = clk->info->range.max_rate; + if (rate <= fmin) + return fmin; + else if (rate >= fmax) + return fmax; + + ftmp = rate - fmin; + ftmp += clk->info->range.step_size - 1; /* to round up */ + step = do_div(ftmp, clk->info->range.step_size); + + return step * clk->info->range.step_size + fmin; +} + +static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + return clk->handle->clk_ops->rate_set(clk->handle, clk->id, 0, rate); +} + +static int scmi_clk_enable(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + return clk->handle->clk_ops->enable(clk->handle, clk->id); +} + +static void scmi_clk_disable(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + clk->handle->clk_ops->disable(clk->handle, clk->id); +} + +static const struct clk_ops scmi_clk_ops = { + .recalc_rate = scmi_clk_recalc_rate, + .round_rate = scmi_clk_round_rate, + .set_rate = scmi_clk_set_rate, + /* + * We can't provide enable/disable callback as we can't perform the same + * in atomic context. Since the clock framework provides standard API + * clk_prepare_enable that helps cases using clk_enable in non-atomic + * context, it should be fine providing prepare/unprepare. + */ + .prepare = scmi_clk_enable, + .unprepare = scmi_clk_disable, +}; + +static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) +{ + int ret; + struct clk_init_data init = { + .flags = CLK_GET_RATE_NOCACHE, + .num_parents = 0, + .ops = &scmi_clk_ops, + .name = sclk->info->name, + }; + + sclk->hw.init = &init; + ret = devm_clk_hw_register(dev, &sclk->hw); + if (!ret) + clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate, + sclk->info->range.max_rate); + return ret; +} + +static int scmi_clocks_probe(struct scmi_device *sdev) +{ + int idx, count, err; + struct clk_hw **hws; + struct clk_hw_onecell_data *clk_data; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + const struct scmi_handle *handle = sdev->handle; + + if (!handle || !handle->clk_ops) + return -ENODEV; + + count = handle->clk_ops->count_get(handle); + if (count < 0) { + dev_err(dev, "%s: invalid clock output count\n", np->name); + return -EINVAL; + } + + clk_data = devm_kzalloc(dev, sizeof(*clk_data) + + sizeof(*clk_data->hws) * count, GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + clk_data->num = count; + hws = clk_data->hws; + + for (idx = 0; idx < count; idx++) { + struct scmi_clk *sclk; + + sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); + if (!sclk) + return -ENOMEM; + + sclk->info = handle->clk_ops->info_get(handle, idx); + if (!sclk->info) { + dev_dbg(dev, "invalid clock info for idx %d\n", idx); + continue; + } + + sclk->id = idx; + sclk->handle = handle; + + err = scmi_clk_ops_init(dev, sclk); + if (err) { + dev_err(dev, "failed to register clock %d\n", idx); + devm_kfree(dev, sclk); + hws[idx] = NULL; + } else { + dev_dbg(dev, "Registered clock:%s\n", sclk->info->name); + hws[idx] = &sclk->hw; + } + } + + return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); +} + +static void scmi_clocks_remove(struct scmi_device *sdev) +{ + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + + of_clk_del_provider(np); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_CLOCK }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_clocks_driver = { + .name = "scmi-clocks", + .probe = scmi_clocks_probe, + .remove = scmi_clocks_remove, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_clocks_driver); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM SCMI clock driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From d57538004b2e57be6a5d8583b65d1b049245abf7 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 27 Sep 2017 16:20:50 +0100 Subject: hwmon: (core) Add hwmon_max to hwmon_sensor_types enumeration It's useful to know the maximum types of sensor supported by hwmon framework. It can be used to allocate some data structures when sorting the monitors based on their type. This will be used by scmi hwmon support. Cc: linux-hwmon@vger.kernel.org Acked-by: Guenter Roeck Signed-off-by: Sudeep Holla --- include/linux/hwmon.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index ceb751987c40..e5fd2707b6df 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -29,6 +29,7 @@ enum hwmon_sensor_types { hwmon_humidity, hwmon_fan, hwmon_pwm, + hwmon_max, }; enum hwmon_chip_attributes { -- cgit v1.2.3 From b23688aefb8b2c5dd024c172f3143e8a99d2cf17 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 15 Jun 2017 10:53:17 +0100 Subject: hwmon: add support for sensors exported via ARM SCMI Create a driver to add support for SoC sensors exported by the System Control Processor (SCP) via the System Control and Management Interface (SCMI). The supported sensor types is one of voltage, temperature, current, and power. The sensor labels and values provided by the SCP are exported via the hwmon sysfs interface. Cc: linux-hwmon@vger.kernel.org Acked-by: Guenter Roeck Signed-off-by: Sudeep Holla --- drivers/hwmon/Kconfig | 12 +++ drivers/hwmon/Makefile | 1 + drivers/hwmon/scmi-hwmon.c | 225 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 238 insertions(+) create mode 100644 drivers/hwmon/scmi-hwmon.c diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index ef23553ff5cb..033e57366d56 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -317,6 +317,18 @@ config SENSORS_APPLESMC Say Y here if you have an applicable laptop and want to experience the awesome power of applesmc. +config SENSORS_ARM_SCMI + tristate "ARM SCMI Sensors" + depends on ARM_SCMI_PROTOCOL + depends on THERMAL || !THERMAL_OF + help + This driver provides support for temperature, voltage, current + and power sensors available on SCMI based platforms. The actual + number and type of sensors exported depend on the platform. + + This driver can also be built as a module. If so, the module + will be called scmi-hwmon. + config SENSORS_ARM_SCPI tristate "ARM SCPI Sensors" depends on ARM_SCPI_PROTOCOL diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index f814b4ace138..e7d52a36e6c4 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o +obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c new file mode 100644 index 000000000000..32e750373ced --- /dev/null +++ b/drivers/hwmon/scmi-hwmon.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface(SCMI) based hwmon sensor driver + * + * Copyright (C) 2018 ARM Ltd. + * Sudeep Holla + */ + +#include +#include +#include +#include +#include +#include + +struct scmi_sensors { + const struct scmi_handle *handle; + const struct scmi_sensor_info **info[hwmon_max]; +}; + +static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + int ret; + u64 value; + const struct scmi_sensor_info *sensor; + struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev); + const struct scmi_handle *h = scmi_sensors->handle; + + sensor = *(scmi_sensors->info[type] + channel); + ret = h->sensor_ops->reading_get(h, sensor->id, false, &value); + if (!ret) + *val = value; + + return ret; +} + +static int +scmi_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + const struct scmi_sensor_info *sensor; + struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev); + + sensor = *(scmi_sensors->info[type] + channel); + *str = sensor->name; + + return 0; +} + +static umode_t +scmi_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct scmi_sensor_info *sensor; + const struct scmi_sensors *scmi_sensors = drvdata; + + sensor = *(scmi_sensors->info[type] + channel); + if (sensor && sensor->name) + return S_IRUGO; + + return 0; +} + +static const struct hwmon_ops scmi_hwmon_ops = { + .is_visible = scmi_hwmon_is_visible, + .read = scmi_hwmon_read, + .read_string = scmi_hwmon_read_string, +}; + +static struct hwmon_chip_info scmi_chip_info = { + .ops = &scmi_hwmon_ops, + .info = NULL, +}; + +static int scmi_hwmon_add_chan_info(struct hwmon_channel_info *scmi_hwmon_chan, + struct device *dev, int num, + enum hwmon_sensor_types type, u32 config) +{ + int i; + u32 *cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL); + + if (!cfg) + return -ENOMEM; + + scmi_hwmon_chan->type = type; + scmi_hwmon_chan->config = cfg; + for (i = 0; i < num; i++, cfg++) + *cfg = config; + + return 0; +} + +static enum hwmon_sensor_types scmi_types[] = { + [TEMPERATURE_C] = hwmon_temp, + [VOLTAGE] = hwmon_in, + [CURRENT] = hwmon_curr, + [POWER] = hwmon_power, + [ENERGY] = hwmon_energy, +}; + +static u32 hwmon_attributes[] = { + [hwmon_chip] = HWMON_C_REGISTER_TZ, + [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, + [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, + [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL, + [hwmon_power] = HWMON_P_INPUT | HWMON_P_LABEL, + [hwmon_energy] = HWMON_E_INPUT | HWMON_E_LABEL, +}; + +static int scmi_hwmon_probe(struct scmi_device *sdev) +{ + int i, idx; + u16 nr_sensors; + enum hwmon_sensor_types type; + struct scmi_sensors *scmi_sensors; + const struct scmi_sensor_info *sensor; + int nr_count[hwmon_max] = {0}, nr_types = 0; + const struct hwmon_chip_info *chip_info; + struct device *hwdev, *dev = &sdev->dev; + struct hwmon_channel_info *scmi_hwmon_chan; + const struct hwmon_channel_info **ptr_scmi_ci; + const struct scmi_handle *handle = sdev->handle; + + if (!handle || !handle->sensor_ops) + return -ENODEV; + + nr_sensors = handle->sensor_ops->count_get(handle); + if (!nr_sensors) + return -EIO; + + scmi_sensors = devm_kzalloc(dev, sizeof(*scmi_sensors), GFP_KERNEL); + if (!scmi_sensors) + return -ENOMEM; + + scmi_sensors->handle = handle; + + for (i = 0; i < nr_sensors; i++) { + sensor = handle->sensor_ops->info_get(handle, i); + if (!sensor) + return PTR_ERR(sensor); + + switch (sensor->type) { + case TEMPERATURE_C: + case VOLTAGE: + case CURRENT: + case POWER: + case ENERGY: + type = scmi_types[sensor->type]; + if (!nr_count[type]) + nr_types++; + nr_count[type]++; + break; + } + } + + if (nr_count[hwmon_temp]) + nr_count[hwmon_chip]++, nr_types++; + + scmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*scmi_hwmon_chan), + GFP_KERNEL); + if (!scmi_hwmon_chan) + return -ENOMEM; + + ptr_scmi_ci = devm_kcalloc(dev, nr_types + 1, sizeof(*ptr_scmi_ci), + GFP_KERNEL); + if (!ptr_scmi_ci) + return -ENOMEM; + + scmi_chip_info.info = ptr_scmi_ci; + chip_info = &scmi_chip_info; + + for (type = 0; type < hwmon_max && nr_count[type]; type++) { + scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], + type, hwmon_attributes[type]); + *ptr_scmi_ci++ = scmi_hwmon_chan++; + + scmi_sensors->info[type] = + devm_kcalloc(dev, nr_count[type], + sizeof(*scmi_sensors->info), GFP_KERNEL); + if (!scmi_sensors->info[type]) + return -ENOMEM; + } + + for (i = nr_sensors - 1; i >= 0 ; i--) { + sensor = handle->sensor_ops->info_get(handle, i); + if (!sensor) + continue; + + switch (sensor->type) { + case TEMPERATURE_C: + case VOLTAGE: + case CURRENT: + case POWER: + case ENERGY: + type = scmi_types[sensor->type]; + idx = --nr_count[type]; + *(scmi_sensors->info[type] + idx) = sensor; + break; + } + } + + hwdev = devm_hwmon_device_register_with_info(dev, "scmi_sensors", + scmi_sensors, chip_info, + NULL); + + return PTR_ERR_OR_ZERO(hwdev); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_SENSOR }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_hwmon_drv = { + .name = "scmi-hwmon", + .probe = scmi_hwmon_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_hwmon_drv); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM SCMI HWMON interface driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 99d6bdf3387734d75e3e34e94a58b8a355b7a9c8 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Sun, 18 Jun 2017 09:38:11 +0100 Subject: cpufreq: add support for CPU DVFS based on SCMI message protocol On some ARM based systems, a separate Cortex-M based System Control Processor(SCP) provides the overall power, clock, reset and system control including CPU DVFS. SCMI Message Protocol is used to communicate with the SCP. This patch adds a cpufreq driver for such systems using SCMI interface to drive CPU DVFS. Cc: linux-pm@vger.kernel.org Acked-by: Rafael J. Wysocki Acked-by: Viresh Kumar Signed-off-by: Sudeep Holla --- MAINTAINERS | 2 +- drivers/cpufreq/Kconfig.arm | 11 ++ drivers/cpufreq/Makefile | 1 + drivers/cpufreq/scmi-cpufreq.c | 246 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 259 insertions(+), 1 deletion(-) create mode 100644 drivers/cpufreq/scmi-cpufreq.c diff --git a/MAINTAINERS b/MAINTAINERS index d6e9e3eae5ac..ef766eb17f8d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13385,7 +13385,7 @@ L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt F: drivers/clk/clk-sc[mp]i.c -F: drivers/cpufreq/scpi-cpufreq.c +F: drivers/cpufreq/sc[mp]i-cpufreq.c F: drivers/firmware/arm_scpi.c F: drivers/firmware/arm_scmi/ F: include/linux/sc[mp]i_protocol.h diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 3a88e33b0cfe..2e49673a9e21 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -238,6 +238,17 @@ config ARM_SA1100_CPUFREQ config ARM_SA1110_CPUFREQ bool +config ARM_SCMI_CPUFREQ + tristate "SCMI based CPUfreq driver" + depends on ARM_SCMI_PROTOCOL || COMPILE_TEST + select PM_OPP + help + This adds the CPUfreq driver support for ARM platforms using SCMI + protocol for CPU power management. + + This driver uses SCMI Message Protocol driver to interact with the + firmware providing the CPU DVFS functionality. + config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" depends on PLAT_SPEAR diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c60c1e141d9d..4987227b67df 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o +obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c new file mode 100644 index 000000000000..b04f07f1b1d2 --- /dev/null +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Power Interface (SCMI) based CPUFreq Interface driver + * + * Copyright (C) 2018 ARM Ltd. + * Sudeep Holla + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct scmi_data { + int domain_id; + struct device *cpu_dev; + struct thermal_cooling_device *cdev; +}; + +static const struct scmi_handle *handle; + +static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + struct scmi_perf_ops *perf_ops = handle->perf_ops; + struct scmi_data *priv = policy->driver_data; + unsigned long rate; + int ret; + + ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false); + if (ret) + return 0; + return rate / 1000; +} + +/* + * perf_ops->freq_set is not a synchronous, the actual OPP change will + * happen asynchronously and can get notified if the events are + * subscribed for by the SCMI firmware + */ +static int +scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) +{ + int ret; + struct scmi_data *priv = policy->driver_data; + struct scmi_perf_ops *perf_ops = handle->perf_ops; + u64 freq = policy->freq_table[index].frequency * 1000; + + ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); + if (!ret) + arch_set_freq_scale(policy->related_cpus, freq, + policy->cpuinfo.max_freq); + return ret; +} + +static int +scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +{ + int cpu, domain, tdomain; + struct device *tcpu_dev; + + domain = handle->perf_ops->device_domain_id(cpu_dev); + if (domain < 0) + return domain; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) + continue; + + tdomain = handle->perf_ops->device_domain_id(tcpu_dev); + if (tdomain == domain) + cpumask_set_cpu(cpu, cpumask); + } + + return 0; +} + +static int scmi_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret; + unsigned int latency; + struct device *cpu_dev; + struct scmi_data *priv; + struct cpufreq_frequency_table *freq_table; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", policy->cpu); + return -ENODEV; + } + + ret = handle->perf_ops->add_opps_to_device(handle, cpu_dev); + if (ret) { + dev_warn(cpu_dev, "failed to add opps to the device\n"); + return ret; + } + + ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); + if (ret) { + dev_warn(cpu_dev, "failed to get sharing cpumask\n"); + return ret; + } + + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); + if (ret) { + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", + __func__, ret); + return ret; + } + + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); + ret = -EPROBE_DEFER; + goto out_free_opp; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_free_opp; + } + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out_free_priv; + } + + priv->cpu_dev = cpu_dev; + priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev); + + policy->driver_data = priv; + + ret = cpufreq_table_validate_and_show(policy, freq_table); + if (ret) { + dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, + ret); + goto out_free_cpufreq_table; + } + + /* SCMI allows DVFS request for any domain from any CPU */ + policy->dvfs_possible_from_any_cpu = true; + + latency = handle->perf_ops->get_transition_latency(handle, cpu_dev); + if (!latency) + latency = CPUFREQ_ETERNAL; + + policy->cpuinfo.transition_latency = latency; + + return 0; + +out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_free_priv: + kfree(priv); +out_free_opp: + dev_pm_opp_cpumask_remove_table(policy->cpus); + + return ret; +} + +static int scmi_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct scmi_data *priv = policy->driver_data; + + cpufreq_cooling_unregister(priv->cdev); + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); + kfree(priv); + dev_pm_opp_cpumask_remove_table(policy->related_cpus); + + return 0; +} + +static void scmi_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct scmi_data *priv = policy->driver_data; + + priv->cdev = of_cpufreq_cooling_register(policy); +} + +static struct cpufreq_driver scmi_cpufreq_driver = { + .name = "scmi", + .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .attr = cpufreq_generic_attr, + .target_index = scmi_cpufreq_set_target, + .get = scmi_cpufreq_get_rate, + .init = scmi_cpufreq_init, + .exit = scmi_cpufreq_exit, + .ready = scmi_cpufreq_ready, +}; + +static int scmi_cpufreq_probe(struct scmi_device *sdev) +{ + int ret; + + handle = sdev->handle; + + if (!handle || !handle->perf_ops) + return -ENODEV; + + ret = cpufreq_register_driver(&scmi_cpufreq_driver); + if (ret) { + dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n", + __func__, ret); + } + + return ret; +} + +static void scmi_cpufreq_remove(struct scmi_device *sdev) +{ + cpufreq_unregister_driver(&scmi_cpufreq_driver); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_PERF }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_cpufreq_drv = { + .name = "scmi-cpufreq", + .probe = scmi_cpufreq_probe, + .remove = scmi_cpufreq_remove, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_cpufreq_drv); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 02f208c5c60549039445402505dea284e15f0f4f Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 20 Jul 2017 14:43:07 +0100 Subject: cpufreq: scmi: add support for fast frequency switching The cpufreq core provides option for drivers to implement fast_switch callback which is invoked for frequency switching from interrupt context. This patch adds support for fast_switch callback in SCMI cpufreq driver by making use of polling based SCMI transfer. It also sets the flag fast_switch_possible. Cc: linux-pm@vger.kernel.org Acked-by: Rafael J. Wysocki Acked-by: Viresh Kumar Signed-off-by: Sudeep Holla --- drivers/cpufreq/scmi-cpufreq.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index b04f07f1b1d2..959a1dbe3835 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -61,6 +61,22 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) return ret; } +static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct scmi_data *priv = policy->driver_data; + struct scmi_perf_ops *perf_ops = handle->perf_ops; + + if (!perf_ops->freq_set(handle, priv->domain_id, + target_freq * 1000, true)) { + arch_set_freq_scale(policy->related_cpus, target_freq, + policy->cpuinfo.max_freq); + return target_freq; + } + + return 0; +} + static int scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { @@ -160,6 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = latency; + policy->fast_switch_possible = true; return 0; out_free_cpufreq_table: @@ -198,6 +215,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .attr = cpufreq_generic_attr, .target_index = scmi_cpufreq_set_target, + .fast_switch = scmi_cpufreq_fast_switch, .get = scmi_cpufreq_get_rate, .init = scmi_cpufreq_init, .exit = scmi_cpufreq_exit, -- cgit v1.2.3 From 6e61c891a0bbe3cede4157b4ecdfbd1617098b5a Mon Sep 17 00:00:00 2001 From: Xiao Yao Date: Thu, 18 Jan 2018 17:56:45 +0800 Subject: soc: rockchip: disable jtag switching for RK3128 SoCs Disable IO function switching between sdmmc and jtag for RK3128 SoCs. Signed-off-by: Xiao Yao Reviewed-by: Shawn Lin Signed-off-by: Heiko Stuebner --- drivers/soc/rockchip/grf.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index dd81b87d79f0..96882ffde67e 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -43,6 +43,17 @@ static const struct rockchip_grf_info rk3036_grf __initconst = { .num_values = ARRAY_SIZE(rk3036_defaults), }; +#define RK3128_GRF_SOC_CON0 0x140 + +static const struct rockchip_grf_value rk3128_defaults[] __initconst = { + { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) }, +}; + +static const struct rockchip_grf_info rk3128_grf __initconst = { + .values = rk3128_defaults, + .num_values = ARRAY_SIZE(rk3128_defaults), +}; + #define RK3228_GRF_SOC_CON6 0x418 static const struct rockchip_grf_value rk3228_defaults[] __initconst = { @@ -102,6 +113,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = { { .compatible = "rockchip,rk3036-grf", .data = (void *)&rk3036_grf, + }, { + .compatible = "rockchip,rk3128-grf", + .data = (void *)&rk3128_grf, }, { .compatible = "rockchip,rk3228-grf", .data = (void *)&rk3228_grf, -- cgit v1.2.3 From d909072d0521a84e67fbe5cce602d7befffabf7e Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Wed, 28 Feb 2018 20:41:43 +0800 Subject: soc: rockchip: power-domain: use clk_bulk APIs Use clk_bulk APIs, and also add error handling for clk enable. Signed-off-by: Jeffy Chen Signed-off-by: Heiko Stuebner --- drivers/soc/rockchip/pm_domains.c | 90 ++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 5c342167b9db..ad96ddeaeb78 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -67,7 +67,7 @@ struct rockchip_pm_domain { struct regmap **qos_regmap; u32 *qos_save_regs[MAX_QOS_REGS_NUM]; int num_clks; - struct clk *clks[]; + struct clk_bulk_data *clks; }; struct rockchip_pmu { @@ -274,13 +274,18 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd, static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on) { - int i; + struct rockchip_pmu *pmu = pd->pmu; + int ret; - mutex_lock(&pd->pmu->mutex); + mutex_lock(&pmu->mutex); if (rockchip_pmu_domain_is_on(pd) != power_on) { - for (i = 0; i < pd->num_clks; i++) - clk_enable(pd->clks[i]); + ret = clk_bulk_enable(pd->num_clks, pd->clks); + if (ret < 0) { + dev_err(pmu->dev, "failed to enable clocks\n"); + mutex_unlock(&pmu->mutex); + return ret; + } if (!power_on) { rockchip_pmu_save_qos(pd); @@ -298,11 +303,10 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on) rockchip_pmu_restore_qos(pd); } - for (i = pd->num_clks - 1; i >= 0; i--) - clk_disable(pd->clks[i]); + clk_bulk_disable(pd->num_clks, pd->clks); } - mutex_unlock(&pd->pmu->mutex); + mutex_unlock(&pmu->mutex); return 0; } @@ -364,8 +368,6 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, const struct rockchip_domain_info *pd_info; struct rockchip_pm_domain *pd; struct device_node *qos_node; - struct clk *clk; - int clk_cnt; int i, j; u32 id; int error; @@ -391,41 +393,36 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, return -EINVAL; } - clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells"); - pd = devm_kzalloc(pmu->dev, - sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]), - GFP_KERNEL); + pd = devm_kzalloc(pmu->dev, sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; pd->info = pd_info; pd->pmu = pmu; - for (i = 0; i < clk_cnt; i++) { - clk = of_clk_get(node, i); - if (IS_ERR(clk)) { - error = PTR_ERR(clk); + pd->num_clks = of_count_phandle_with_args(node, "clocks", + "#clock-cells"); + + pd->clks = devm_kcalloc(pmu->dev, pd->num_clks, sizeof(*pd->clks), + GFP_KERNEL); + if (!pd->clks) + return -ENOMEM; + + for (i = 0; i < pd->num_clks; i++) { + pd->clks[i].clk = of_clk_get(node, i); + if (IS_ERR(pd->clks[i].clk)) { + error = PTR_ERR(pd->clks[i].clk); dev_err(pmu->dev, "%s: failed to get clk at index %d: %d\n", node->name, i, error); - goto err_out; - } - - error = clk_prepare(clk); - if (error) { - dev_err(pmu->dev, - "%s: failed to prepare clk %pC (index %d): %d\n", - node->name, clk, i, error); - clk_put(clk); - goto err_out; + return error; } - - pd->clks[pd->num_clks++] = clk; - - dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n", - clk, node->name); } + error = clk_bulk_prepare(pd->num_clks, pd->clks); + if (error) + goto err_put_clocks; + pd->num_qos = of_count_phandle_with_args(node, "pm_qos", NULL); @@ -435,7 +432,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, GFP_KERNEL); if (!pd->qos_regmap) { error = -ENOMEM; - goto err_out; + goto err_unprepare_clocks; } for (j = 0; j < MAX_QOS_REGS_NUM; j++) { @@ -445,7 +442,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, GFP_KERNEL); if (!pd->qos_save_regs[j]) { error = -ENOMEM; - goto err_out; + goto err_unprepare_clocks; } } @@ -453,13 +450,13 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, qos_node = of_parse_phandle(node, "pm_qos", j); if (!qos_node) { error = -ENODEV; - goto err_out; + goto err_unprepare_clocks; } pd->qos_regmap[j] = syscon_node_to_regmap(qos_node); if (IS_ERR(pd->qos_regmap[j])) { error = -ENODEV; of_node_put(qos_node); - goto err_out; + goto err_unprepare_clocks; } of_node_put(qos_node); } @@ -470,7 +467,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, dev_err(pmu->dev, "failed to power on domain '%s': %d\n", node->name, error); - goto err_out; + goto err_unprepare_clocks; } pd->genpd.name = node->name; @@ -486,17 +483,16 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, pmu->genpd_data.domains[id] = &pd->genpd; return 0; -err_out: - while (--i >= 0) { - clk_unprepare(pd->clks[i]); - clk_put(pd->clks[i]); - } +err_unprepare_clocks: + clk_bulk_unprepare(pd->num_clks, pd->clks); +err_put_clocks: + clk_bulk_put(pd->num_clks, pd->clks); return error; } static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd) { - int i, ret; + int ret; /* * We're in the error cleanup already, so we only complain, @@ -507,10 +503,8 @@ static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd) dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n", pd->genpd.name, ret); - for (i = 0; i < pd->num_clks; i++) { - clk_unprepare(pd->clks[i]); - clk_put(pd->clks[i]); - } + clk_bulk_unprepare(pd->num_clks, pd->clks); + clk_bulk_put(pd->num_clks, pd->clks); /* protect the zeroing of pm->num_clks */ mutex_lock(&pd->pmu->mutex); -- cgit v1.2.3 From b1271993aa3855bda5073c6061a095fd6e6febc6 Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Mon, 5 Mar 2018 17:17:22 +0800 Subject: soc: rockchip: power-domain: Add a sanity check on pd->num_clks The of_count_phandle_with_args() can fail and return error(for example, rk3399 pd_vio doesn't have clocks). That would break the pd probe. Add a sanity check on pd->num_clks to avoid that. Fixes: 65084121d59d ("soc: rockchip: power-domain: use clk_bulk APIs") Reported-by: Shawn Lin Signed-off-by: Jeffy Chen Tested-by: Shawn Lin Signed-off-by: Heiko Stuebner --- drivers/soc/rockchip/pm_domains.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index ad96ddeaeb78..53efc386b1ad 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -402,11 +402,16 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, pd->num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells"); - - pd->clks = devm_kcalloc(pmu->dev, pd->num_clks, sizeof(*pd->clks), - GFP_KERNEL); - if (!pd->clks) - return -ENOMEM; + if (pd->num_clks > 0) { + pd->clks = devm_kcalloc(pmu->dev, pd->num_clks, + sizeof(*pd->clks), GFP_KERNEL); + if (!pd->clks) + return -ENOMEM; + } else { + dev_dbg(pmu->dev, "%s: doesn't have clocks: %d\n", + node->name, pd->num_clks); + pd->num_clks = 0; + } for (i = 0; i < pd->num_clks; i++) { pd->clks[i].clk = of_clk_get(node, i); -- cgit v1.2.3 From d363a88b31fc03268eada426f5940ef685b8df21 Mon Sep 17 00:00:00 2001 From: SF Markus Elfring Date: Mon, 5 Mar 2018 16:21:26 -0800 Subject: memory-EMIF: Use seq_putc() in emif_regdump_show() A single character (line break) should be put into a sequence. Thus use the corresponding function "seq_putc". This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Santosh Shilimkar --- drivers/memory/emif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 04644e7b42b1..2f214440008c 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -127,7 +127,7 @@ static int emif_regdump_show(struct seq_file *s, void *unused) for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) { do_emif_regdump_show(s, emif, regs_cache[i]); - seq_printf(s, "\n"); + seq_putc(s, '\n'); } return 0; -- cgit v1.2.3 From a817e5d82a2dc5daaa0a8a66cfab268fdf2f6bb6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 5 Mar 2018 16:21:31 -0800 Subject: memory: ti-emif-sram: remove redundant dev_err call in ti_emif_probe() There is a error message within devm_ioremap_resource already, so remove the dev_err call to avoid redundant error message. Signed-off-by: Wei Yongjun Signed-off-by: Santosh Shilimkar --- drivers/memory/ti-emif-pm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c index 62a86c4bcd0b..632651f4b6e8 100644 --- a/drivers/memory/ti-emif-pm.c +++ b/drivers/memory/ti-emif-pm.c @@ -271,7 +271,6 @@ static int ti_emif_probe(struct platform_device *pdev) emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev, res); if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) { - dev_err(dev, "could not ioremap emif mem\n"); ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt); return ret; } -- cgit v1.2.3 From 7dd003aec2016e90d33f25f90ad4cebb12224a8a Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 15 Jan 2018 17:27:35 +0800 Subject: tee: correct max value for id allocation The privileged dev id range is [TEE_NUM_DEVICES / 2, TEE_NUM_DEVICES). The non-privileged dev id range is [0, TEE_NUM_DEVICES / 2). So when finding a slot for them, need to use different max value. Signed-off-by: Peng Fan Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 6c4b200a4560..0124a91c8d71 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -693,7 +693,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, { struct tee_device *teedev; void *ret; - int rc; + int rc, max_id; int offs = 0; if (!teedesc || !teedesc->name || !teedesc->ops || @@ -707,16 +707,20 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, goto err; } - if (teedesc->flags & TEE_DESC_PRIVILEGED) + max_id = TEE_NUM_DEVICES / 2; + + if (teedesc->flags & TEE_DESC_PRIVILEGED) { offs = TEE_NUM_DEVICES / 2; + max_id = TEE_NUM_DEVICES; + } spin_lock(&driver_lock); - teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs); - if (teedev->id < TEE_NUM_DEVICES) + teedev->id = find_next_zero_bit(dev_mask, max_id, offs); + if (teedev->id < max_id) set_bit(teedev->id, dev_mask); spin_unlock(&driver_lock); - if (teedev->id >= TEE_NUM_DEVICES) { + if (teedev->id >= max_id) { ret = ERR_PTR(-ENOMEM); goto err; } -- cgit v1.2.3 From 6e112de0427874500fb9c373595481653ae4078d Mon Sep 17 00:00:00 2001 From: Jérôme Forissier Date: Fri, 24 Nov 2017 15:47:17 +0100 Subject: tee: optee: GET_OS_REVISION: document a2 as a build identifier In the OPTEE_SMC_CALL_GET_OS_REVISION request, the previously reserved parameter a2 is now documented as being an optional build identifier (such as an SCM revision or commit ID, for instance). A new structure optee_smc_call_get_os_revision_result is introduced to be used when querying the secure OS version, instead of re-using the struct defined for OPTEE_SMC_CALLS_REVISION. Signed-off-by: Jerome Forissier Reviewed-by: Matthias Brugger Signed-off-by: Jens Wiklander --- drivers/tee/optee/optee_smc.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index 7cd327243ada..bbf0cf028c16 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -112,12 +112,20 @@ struct optee_smc_calls_revision_result { * Trusted OS, not of the API. * * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION - * described above. + * described above. May optionally return a 32-bit build identifier in a2, + * with zero meaning unspecified. */ #define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION #define OPTEE_SMC_CALL_GET_OS_REVISION \ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) +struct optee_smc_call_get_os_revision_result { + unsigned long major; + unsigned long minor; + unsigned long build_id; + unsigned long reserved1; +}; + /* * Call with struct optee_msg_arg as argument * -- cgit v1.2.3 From 5c5f80307ab27c53b56569245a0b12f4e3b577de Mon Sep 17 00:00:00 2001 From: Jérôme Forissier Date: Fri, 24 Nov 2017 15:47:18 +0100 Subject: tee: optee: report OP-TEE revision information When the driver initializes, report the following information about the OP-TEE OS: - major and minor version, - build identifier (if available). Signed-off-by: Jerome Forissier Reviewed-by: Matthias Brugger Signed-off-by: Jens Wiklander --- drivers/tee/optee/core.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index e9843c53fe31..e5fd5ed217da 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -356,6 +356,27 @@ static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) return false; } +static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_call_get_os_revision_result result; + } res = { + .result = { + .build_id = 0 + } + }; + + invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.build_id) + pr_info("revision %lu.%lu (%08lx)", res.result.major, + res.result.minor, res.result.build_id); + else + pr_info("revision %lu.%lu", res.result.major, res.result.minor); +} + static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) { union { @@ -547,6 +568,8 @@ static struct optee *optee_probe(struct device_node *np) return ERR_PTR(-EINVAL); } + optee_msg_get_os_revision(invoke_fn); + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); return ERR_PTR(-EINVAL); -- cgit v1.2.3 From 1888d3ddc3d6a2511be86045cfb2e7ea5fc67c44 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Feb 2018 18:51:41 +0000 Subject: drivers/bus: Move Arm CCN PMU driver The arm-ccn driver is purely a perf driver for the CCN PMU, not a bus driver in the sense of the other residents of drivers/bus/, so let's move it to the appropriate place for SoC PMU drivers. Not to mention moving the documentation accordingly as well. Acked-by: Pawel Moll Acked-by: Will Deacon Signed-off-by: Robin Murphy Signed-off-by: Arnd Bergmann --- Documentation/arm/CCN.txt | 59 - Documentation/devicetree/bindings/arm/ccn.txt | 22 - Documentation/devicetree/bindings/perf/arm-ccn.txt | 22 + Documentation/perf/arm-ccn.txt | 59 + drivers/bus/Kconfig | 8 - drivers/bus/Makefile | 2 - drivers/bus/arm-ccn.c | 1597 -------------------- drivers/perf/Kconfig | 7 + drivers/perf/Makefile | 1 + drivers/perf/arm-ccn.c | 1597 ++++++++++++++++++++ 10 files changed, 1686 insertions(+), 1688 deletions(-) delete mode 100644 Documentation/arm/CCN.txt delete mode 100644 Documentation/devicetree/bindings/arm/ccn.txt create mode 100644 Documentation/devicetree/bindings/perf/arm-ccn.txt create mode 100644 Documentation/perf/arm-ccn.txt delete mode 100644 drivers/bus/arm-ccn.c create mode 100644 drivers/perf/arm-ccn.c diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt deleted file mode 100644 index 15cdb7bc57c3..000000000000 --- a/Documentation/arm/CCN.txt +++ /dev/null @@ -1,59 +0,0 @@ -ARM Cache Coherent Network -========================== - -CCN-504 is a ring-bus interconnect consisting of 11 crosspoints -(XPs), with each crosspoint supporting up to two device ports, -so nodes (devices) 0 and 1 are connected to crosspoint 0, -nodes 2 and 3 to crosspoint 1 etc. - -PMU (perf) driver ------------------ - -The CCN driver registers a perf PMU driver, which provides -description of available events and configuration options -in sysfs, see /sys/bus/event_source/devices/ccn*. - -The "format" directory describes format of the config, config1 -and config2 fields of the perf_event_attr structure. The "events" -directory provides configuration templates for all documented -events, that can be used with perf tool. For example "xp_valid_flit" -is an equivalent of "type=0x8,event=0x4". Other parameters must be -explicitly specified. - -For events originating from device, "node" defines its index. - -Crosspoint PMU events require "xp" (index), "bus" (bus number) -and "vc" (virtual channel ID). - -Crosspoint watchpoint-based events (special "event" value 0xfe) -require "xp" and "vc" as as above plus "port" (device port index), -"dir" (transmit/receive direction), comparator values ("cmp_l" -and "cmp_h") and "mask", being index of the comparator mask. -Masks are defined separately from the event description -(due to limited number of the config values) in the "cmp_mask" -directory, with first 8 configurable by user and additional -4 hardcoded for the most frequent use cases. - -Cycle counter is described by a "type" value 0xff and does -not require any other settings. - -The driver also provides a "cpumask" sysfs attribute, which contains -a single CPU ID, of the processor which will be used to handle all -the CCN PMU events. It is recommended that the user space tools -request the events on this processor (if not, the perf_event->cpu value -will be overwritten anyway). In case of this processor being offlined, -the events are migrated to another one and the attribute is updated. - -Example of perf tool use: - -/ # perf list | grep ccn - ccn/cycles/ [Kernel PMU event] -<...> - ccn/xp_valid_flit,xp=?,port=?,vc=?,dir=?/ [Kernel PMU event] -<...> - -/ # perf stat -a -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \ - sleep 1 - -The driver does not support sampling, therefore "perf record" will -not work. Per-task (without "-a") perf sessions are not supported. diff --git a/Documentation/devicetree/bindings/arm/ccn.txt b/Documentation/devicetree/bindings/arm/ccn.txt deleted file mode 100644 index 43b5a71a5a9d..000000000000 --- a/Documentation/devicetree/bindings/arm/ccn.txt +++ /dev/null @@ -1,22 +0,0 @@ -* ARM CCN (Cache Coherent Network) - -Required properties: - -- compatible: (standard compatible string) should be one of: - "arm,ccn-502" - "arm,ccn-504" - "arm,ccn-508" - -- reg: (standard registers property) physical address and size - (16MB) of the configuration registers block - -- interrupts: (standard interrupt property) single interrupt - generated by the control block - -Example: - - ccn@2000000000 { - compatible = "arm,ccn-504"; - reg = <0x20 0x00000000 0 0x1000000>; - interrupts = <0 181 4>; - }; diff --git a/Documentation/devicetree/bindings/perf/arm-ccn.txt b/Documentation/devicetree/bindings/perf/arm-ccn.txt new file mode 100644 index 000000000000..43b5a71a5a9d --- /dev/null +++ b/Documentation/devicetree/bindings/perf/arm-ccn.txt @@ -0,0 +1,22 @@ +* ARM CCN (Cache Coherent Network) + +Required properties: + +- compatible: (standard compatible string) should be one of: + "arm,ccn-502" + "arm,ccn-504" + "arm,ccn-508" + +- reg: (standard registers property) physical address and size + (16MB) of the configuration registers block + +- interrupts: (standard interrupt property) single interrupt + generated by the control block + +Example: + + ccn@2000000000 { + compatible = "arm,ccn-504"; + reg = <0x20 0x00000000 0 0x1000000>; + interrupts = <0 181 4>; + }; diff --git a/Documentation/perf/arm-ccn.txt b/Documentation/perf/arm-ccn.txt new file mode 100644 index 000000000000..15cdb7bc57c3 --- /dev/null +++ b/Documentation/perf/arm-ccn.txt @@ -0,0 +1,59 @@ +ARM Cache Coherent Network +========================== + +CCN-504 is a ring-bus interconnect consisting of 11 crosspoints +(XPs), with each crosspoint supporting up to two device ports, +so nodes (devices) 0 and 1 are connected to crosspoint 0, +nodes 2 and 3 to crosspoint 1 etc. + +PMU (perf) driver +----------------- + +The CCN driver registers a perf PMU driver, which provides +description of available events and configuration options +in sysfs, see /sys/bus/event_source/devices/ccn*. + +The "format" directory describes format of the config, config1 +and config2 fields of the perf_event_attr structure. The "events" +directory provides configuration templates for all documented +events, that can be used with perf tool. For example "xp_valid_flit" +is an equivalent of "type=0x8,event=0x4". Other parameters must be +explicitly specified. + +For events originating from device, "node" defines its index. + +Crosspoint PMU events require "xp" (index), "bus" (bus number) +and "vc" (virtual channel ID). + +Crosspoint watchpoint-based events (special "event" value 0xfe) +require "xp" and "vc" as as above plus "port" (device port index), +"dir" (transmit/receive direction), comparator values ("cmp_l" +and "cmp_h") and "mask", being index of the comparator mask. +Masks are defined separately from the event description +(due to limited number of the config values) in the "cmp_mask" +directory, with first 8 configurable by user and additional +4 hardcoded for the most frequent use cases. + +Cycle counter is described by a "type" value 0xff and does +not require any other settings. + +The driver also provides a "cpumask" sysfs attribute, which contains +a single CPU ID, of the processor which will be used to handle all +the CCN PMU events. It is recommended that the user space tools +request the events on this processor (if not, the perf_event->cpu value +will be overwritten anyway). In case of this processor being offlined, +the events are migrated to another one and the attribute is updated. + +Example of perf tool use: + +/ # perf list | grep ccn + ccn/cycles/ [Kernel PMU event] +<...> + ccn/xp_valid_flit,xp=?,port=?,vc=?,dir=?/ [Kernel PMU event] +<...> + +/ # perf stat -a -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \ + sleep 1 + +The driver does not support sampling, therefore "perf record" will +not work. Per-task (without "-a") perf sessions are not supported. diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 57e011d36a79..116446c42c6b 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -48,14 +48,6 @@ config ARM_CCI5xx_PMU If unsure, say Y -config ARM_CCN - tristate "ARM CCN driver support" - depends on ARM || ARM64 - depends on PERF_EVENTS - help - PMU (perf) driver supporting the ARM CCN (Cache Coherent Network) - interconnect. - config BRCMSTB_GISB_ARB bool "Broadcom STB GISB bus arbiter" depends on ARM || ARM64 || MIPS diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 9bcd0bf3954b..19733afddd0a 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -5,8 +5,6 @@ # Interconnect bus drivers for ARM platforms obj-$(CONFIG_ARM_CCI) += arm-cci.o -obj-$(CONFIG_ARM_CCN) += arm-ccn.o - obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o obj-$(CONFIG_IMX_WEIM) += imx-weim.o obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c deleted file mode 100644 index b52332e52ca5..000000000000 --- a/drivers/bus/arm-ccn.c +++ /dev/null @@ -1,1597 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2014 ARM Limited - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CCN_NUM_XP_PORTS 2 -#define CCN_NUM_VCS 4 -#define CCN_NUM_REGIONS 256 -#define CCN_REGION_SIZE 0x10000 - -#define CCN_ALL_OLY_ID 0xff00 -#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0 -#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f -#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8 -#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f - -#define CCN_MN_ERRINT_STATUS 0x0008 -#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11 -#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02 -#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20 -#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22 -#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04 -#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40 -#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44 -#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08 -#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80 -#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88 -#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0 -#define CCN_MN_ERR_SIG_VAL_63_0 0x0300 -#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1) - -#define CCN_DT_ACTIVE_DSM 0x0000 -#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8) -#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff -#define CCN_DT_CTL 0x0028 -#define CCN_DT_CTL__DT_EN (1 << 0) -#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8) -#define CCN_DT_PMCCNTR 0x0140 -#define CCN_DT_PMCCNTRSR 0x0190 -#define CCN_DT_PMOVSR 0x0198 -#define CCN_DT_PMOVSR_CLR 0x01a0 -#define CCN_DT_PMOVSR_CLR__MASK 0x1f -#define CCN_DT_PMCR 0x01a8 -#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6) -#define CCN_DT_PMCR__PMU_EN (1 << 0) -#define CCN_DT_PMSR 0x01b0 -#define CCN_DT_PMSR_REQ 0x01b8 -#define CCN_DT_PMSR_CLR 0x01c0 - -#define CCN_HNF_PMU_EVENT_SEL 0x0600 -#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) -#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf - -#define CCN_XP_DT_CONFIG 0x0300 -#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4) -#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf -#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0 -#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1 -#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n)) -#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n)) -#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n)) -#define CCN_XP_DT_INTERFACE_SEL 0x0308 -#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8) -#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1 -#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8) -#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1 -#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8) -#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3 -#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40) -#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40) -#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40) -#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40) -#define CCN_XP_DT_CONTROL 0x0370 -#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0) -#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4) -#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf -#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf -#define CCN_XP_PMU_EVENT_SEL 0x0600 -#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7) -#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f - -#define CCN_SBAS_PMU_EVENT_SEL 0x0600 -#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) -#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf - -#define CCN_RNI_PMU_EVENT_SEL 0x0600 -#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) -#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf - -#define CCN_TYPE_MN 0x01 -#define CCN_TYPE_DT 0x02 -#define CCN_TYPE_HNF 0x04 -#define CCN_TYPE_HNI 0x05 -#define CCN_TYPE_XP 0x08 -#define CCN_TYPE_SBSX 0x0c -#define CCN_TYPE_SBAS 0x10 -#define CCN_TYPE_RNI_1P 0x14 -#define CCN_TYPE_RNI_2P 0x15 -#define CCN_TYPE_RNI_3P 0x16 -#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */ -#define CCN_TYPE_RND_2P 0x19 -#define CCN_TYPE_RND_3P 0x1a -#define CCN_TYPE_CYCLES 0xff /* Pseudotype */ - -#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */ - -#define CCN_NUM_PMU_EVENTS 4 -#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */ -#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */ -#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS - -#define CCN_NUM_PREDEFINED_MASKS 4 -#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0) -#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1) -#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2) -#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3) - -struct arm_ccn_component { - void __iomem *base; - u32 type; - - DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS); - union { - struct { - DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS); - } xp; - }; -}; - -#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \ - struct arm_ccn_dt, pmu), struct arm_ccn, dt) - -struct arm_ccn_dt { - int id; - void __iomem *base; - - spinlock_t config_lock; - - DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1); - struct { - struct arm_ccn_component *source; - struct perf_event *event; - } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1]; - - struct { - u64 l, h; - } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS]; - - struct hrtimer hrtimer; - - cpumask_t cpu; - struct hlist_node node; - - struct pmu pmu; -}; - -struct arm_ccn { - struct device *dev; - void __iomem *base; - unsigned int irq; - - unsigned sbas_present:1; - unsigned sbsx_present:1; - - int num_nodes; - struct arm_ccn_component *node; - - int num_xps; - struct arm_ccn_component *xp; - - struct arm_ccn_dt dt; - int mn_id; -}; - -static int arm_ccn_node_to_xp(int node) -{ - return node / CCN_NUM_XP_PORTS; -} - -static int arm_ccn_node_to_xp_port(int node) -{ - return node % CCN_NUM_XP_PORTS; -} - - -/* - * Bit shifts and masks in these defines must be kept in sync with - * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below! - */ -#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff) -#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff) -#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) -#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) -#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) -#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3) -#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) -#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) -#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) - -static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) -{ - *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24)); - *config |= (node_xp << 0) | (type << 8) | (port << 24); -} - -static ssize_t arm_ccn_pmu_format_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_ext_attribute *ea = container_of(attr, - struct dev_ext_attribute, attr); - - return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var); -} - -#define CCN_FORMAT_ATTR(_name, _config) \ - struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \ - { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \ - NULL), _config } - -static CCN_FORMAT_ATTR(node, "config:0-7"); -static CCN_FORMAT_ATTR(xp, "config:0-7"); -static CCN_FORMAT_ATTR(type, "config:8-15"); -static CCN_FORMAT_ATTR(event, "config:16-23"); -static CCN_FORMAT_ATTR(port, "config:24-25"); -static CCN_FORMAT_ATTR(bus, "config:24-25"); -static CCN_FORMAT_ATTR(vc, "config:26-28"); -static CCN_FORMAT_ATTR(dir, "config:29-29"); -static CCN_FORMAT_ATTR(mask, "config:30-33"); -static CCN_FORMAT_ATTR(cmp_l, "config1:0-62"); -static CCN_FORMAT_ATTR(cmp_h, "config2:0-59"); - -static struct attribute *arm_ccn_pmu_format_attrs[] = { - &arm_ccn_pmu_format_attr_node.attr.attr, - &arm_ccn_pmu_format_attr_xp.attr.attr, - &arm_ccn_pmu_format_attr_type.attr.attr, - &arm_ccn_pmu_format_attr_event.attr.attr, - &arm_ccn_pmu_format_attr_port.attr.attr, - &arm_ccn_pmu_format_attr_bus.attr.attr, - &arm_ccn_pmu_format_attr_vc.attr.attr, - &arm_ccn_pmu_format_attr_dir.attr.attr, - &arm_ccn_pmu_format_attr_mask.attr.attr, - &arm_ccn_pmu_format_attr_cmp_l.attr.attr, - &arm_ccn_pmu_format_attr_cmp_h.attr.attr, - NULL -}; - -static const struct attribute_group arm_ccn_pmu_format_attr_group = { - .name = "format", - .attrs = arm_ccn_pmu_format_attrs, -}; - - -struct arm_ccn_pmu_event { - struct device_attribute attr; - u32 type; - u32 event; - int num_ports; - int num_vcs; - const char *def; - int mask; -}; - -#define CCN_EVENT_ATTR(_name) \ - __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL) - -/* - * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on - * their ports in XP they are connected to. For the sake of usability they are - * explicitly defined here (and translated into a relevant watchpoint in - * arm_ccn_pmu_event_init()) so the user can easily request them without deep - * knowledge of the flit format. - */ - -#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \ - .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \ - .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \ - .def = _def, .mask = _mask, } - -#define CCN_EVENT_HNI(_name, _def, _mask) { \ - .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \ - .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ - .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } - -#define CCN_EVENT_SBSX(_name, _def, _mask) { \ - .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \ - .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ - .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } - -#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \ - .type = CCN_TYPE_HNF, .event = _event, } - -#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \ - .type = CCN_TYPE_XP, .event = _event, \ - .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, } - -/* - * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending - * on configuration. One of them is picked to represent the whole group, - * as they all share the same event types. - */ -#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \ - .type = CCN_TYPE_RNI_3P, .event = _event, } - -#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \ - .type = CCN_TYPE_SBAS, .event = _event, } - -#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \ - .type = CCN_TYPE_CYCLES } - - -static ssize_t arm_ccn_pmu_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); - struct arm_ccn_pmu_event *event = container_of(attr, - struct arm_ccn_pmu_event, attr); - ssize_t res; - - res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); - if (event->event) - res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", - event->event); - if (event->def) - res += snprintf(buf + res, PAGE_SIZE - res, ",%s", - event->def); - if (event->mask) - res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", - event->mask); - - /* Arguments required by an event */ - switch (event->type) { - case CCN_TYPE_CYCLES: - break; - case CCN_TYPE_XP: - res += snprintf(buf + res, PAGE_SIZE - res, - ",xp=?,vc=?"); - if (event->event == CCN_EVENT_WATCHPOINT) - res += snprintf(buf + res, PAGE_SIZE - res, - ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?"); - else - res += snprintf(buf + res, PAGE_SIZE - res, - ",bus=?"); - - break; - case CCN_TYPE_MN: - res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); - break; - default: - res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); - break; - } - - res += snprintf(buf + res, PAGE_SIZE - res, "\n"); - - return res; -} - -static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj, - struct attribute *attr, int index) -{ - struct device *dev = kobj_to_dev(kobj); - struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); - struct device_attribute *dev_attr = container_of(attr, - struct device_attribute, attr); - struct arm_ccn_pmu_event *event = container_of(dev_attr, - struct arm_ccn_pmu_event, attr); - - if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) - return 0; - if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) - return 0; - - return attr->mode; -} - -static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { - CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), - CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), - CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), - CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), - CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), - CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), - CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), - CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", - CCN_IDX_MASK_ORDER), - CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), - CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), - CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), - CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), - CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", - CCN_IDX_MASK_ORDER), - CCN_EVENT_HNF(cache_miss, 0x1), - CCN_EVENT_HNF(l3_sf_cache_access, 0x02), - CCN_EVENT_HNF(cache_fill, 0x3), - CCN_EVENT_HNF(pocq_retry, 0x4), - CCN_EVENT_HNF(pocq_reqs_recvd, 0x5), - CCN_EVENT_HNF(sf_hit, 0x6), - CCN_EVENT_HNF(sf_evictions, 0x7), - CCN_EVENT_HNF(snoops_sent, 0x8), - CCN_EVENT_HNF(snoops_broadcast, 0x9), - CCN_EVENT_HNF(l3_eviction, 0xa), - CCN_EVENT_HNF(l3_fill_invalid_way, 0xb), - CCN_EVENT_HNF(mc_retries, 0xc), - CCN_EVENT_HNF(mc_reqs, 0xd), - CCN_EVENT_HNF(qos_hh_retry, 0xe), - CCN_EVENT_RNI(rdata_beats_p0, 0x1), - CCN_EVENT_RNI(rdata_beats_p1, 0x2), - CCN_EVENT_RNI(rdata_beats_p2, 0x3), - CCN_EVENT_RNI(rxdat_flits, 0x4), - CCN_EVENT_RNI(txdat_flits, 0x5), - CCN_EVENT_RNI(txreq_flits, 0x6), - CCN_EVENT_RNI(txreq_flits_retried, 0x7), - CCN_EVENT_RNI(rrt_full, 0x8), - CCN_EVENT_RNI(wrt_full, 0x9), - CCN_EVENT_RNI(txreq_flits_replayed, 0xa), - CCN_EVENT_XP(upload_starvation, 0x1), - CCN_EVENT_XP(download_starvation, 0x2), - CCN_EVENT_XP(respin, 0x3), - CCN_EVENT_XP(valid_flit, 0x4), - CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT), - CCN_EVENT_SBAS(rdata_beats_p0, 0x1), - CCN_EVENT_SBAS(rxdat_flits, 0x4), - CCN_EVENT_SBAS(txdat_flits, 0x5), - CCN_EVENT_SBAS(txreq_flits, 0x6), - CCN_EVENT_SBAS(txreq_flits_retried, 0x7), - CCN_EVENT_SBAS(rrt_full, 0x8), - CCN_EVENT_SBAS(wrt_full, 0x9), - CCN_EVENT_SBAS(txreq_flits_replayed, 0xa), - CCN_EVENT_CYCLES(cycles), -}; - -/* Populated in arm_ccn_init() */ -static struct attribute - *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1]; - -static const struct attribute_group arm_ccn_pmu_events_attr_group = { - .name = "events", - .is_visible = arm_ccn_pmu_events_is_visible, - .attrs = arm_ccn_pmu_events_attrs, -}; - - -static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name) -{ - unsigned long i; - - if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1])) - return NULL; - i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a'; - - switch (name[1]) { - case 'l': - return &ccn->dt.cmp_mask[i].l; - case 'h': - return &ccn->dt.cmp_mask[i].h; - default: - return NULL; - } -} - -static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); - u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); - - return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL; -} - -static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); - u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); - int err = -EINVAL; - - if (mask) - err = kstrtoull(buf, 0, mask); - - return err ? err : count; -} - -#define CCN_CMP_MASK_ATTR(_name) \ - struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ - __ATTR(_name, S_IRUGO | S_IWUSR, \ - arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store) - -#define CCN_CMP_MASK_ATTR_RO(_name) \ - struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ - __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL) - -static CCN_CMP_MASK_ATTR(0l); -static CCN_CMP_MASK_ATTR(0h); -static CCN_CMP_MASK_ATTR(1l); -static CCN_CMP_MASK_ATTR(1h); -static CCN_CMP_MASK_ATTR(2l); -static CCN_CMP_MASK_ATTR(2h); -static CCN_CMP_MASK_ATTR(3l); -static CCN_CMP_MASK_ATTR(3h); -static CCN_CMP_MASK_ATTR(4l); -static CCN_CMP_MASK_ATTR(4h); -static CCN_CMP_MASK_ATTR(5l); -static CCN_CMP_MASK_ATTR(5h); -static CCN_CMP_MASK_ATTR(6l); -static CCN_CMP_MASK_ATTR(6h); -static CCN_CMP_MASK_ATTR(7l); -static CCN_CMP_MASK_ATTR(7h); -static CCN_CMP_MASK_ATTR_RO(8l); -static CCN_CMP_MASK_ATTR_RO(8h); -static CCN_CMP_MASK_ATTR_RO(9l); -static CCN_CMP_MASK_ATTR_RO(9h); -static CCN_CMP_MASK_ATTR_RO(al); -static CCN_CMP_MASK_ATTR_RO(ah); -static CCN_CMP_MASK_ATTR_RO(bl); -static CCN_CMP_MASK_ATTR_RO(bh); - -static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = { - &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr, - &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr, - &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr, - &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr, - &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr, - &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr, - &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr, - &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr, - &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr, - &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr, - &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr, - &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr, - NULL -}; - -static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { - .name = "cmp_mask", - .attrs = arm_ccn_pmu_cmp_mask_attrs, -}; - -static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); - - return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); -} - -static struct device_attribute arm_ccn_pmu_cpumask_attr = - __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL); - -static struct attribute *arm_ccn_pmu_cpumask_attrs[] = { - &arm_ccn_pmu_cpumask_attr.attr, - NULL, -}; - -static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = { - .attrs = arm_ccn_pmu_cpumask_attrs, -}; - -/* - * Default poll period is 10ms, which is way over the top anyway, - * as in the worst case scenario (an event every cycle), with 1GHz - * clocked bus, the smallest, 32 bit counter will overflow in - * more than 4s. - */ -static unsigned int arm_ccn_pmu_poll_period_us = 10000; -module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint, - S_IRUGO | S_IWUSR); - -static ktime_t arm_ccn_pmu_timer_period(void) -{ - return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000); -} - - -static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { - &arm_ccn_pmu_events_attr_group, - &arm_ccn_pmu_format_attr_group, - &arm_ccn_pmu_cmp_mask_attr_group, - &arm_ccn_pmu_cpumask_attr_group, - NULL -}; - - -static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size) -{ - int bit; - - do { - bit = find_first_zero_bit(bitmap, size); - if (bit >= size) - return -EAGAIN; - } while (test_and_set_bit(bit, bitmap)); - - return bit; -} - -/* All RN-I and RN-D nodes have identical PMUs */ -static int arm_ccn_pmu_type_eq(u32 a, u32 b) -{ - if (a == b) - return 1; - - switch (a) { - case CCN_TYPE_RNI_1P: - case CCN_TYPE_RNI_2P: - case CCN_TYPE_RNI_3P: - case CCN_TYPE_RND_1P: - case CCN_TYPE_RND_2P: - case CCN_TYPE_RND_3P: - switch (b) { - case CCN_TYPE_RNI_1P: - case CCN_TYPE_RNI_2P: - case CCN_TYPE_RNI_3P: - case CCN_TYPE_RND_1P: - case CCN_TYPE_RND_2P: - case CCN_TYPE_RND_3P: - return 1; - } - break; - } - - return 0; -} - -static int arm_ccn_pmu_event_alloc(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - u32 node_xp, type, event_id; - struct arm_ccn_component *source; - int bit; - - node_xp = CCN_CONFIG_NODE(event->attr.config); - type = CCN_CONFIG_TYPE(event->attr.config); - event_id = CCN_CONFIG_EVENT(event->attr.config); - - /* Allocate the cycle counter */ - if (type == CCN_TYPE_CYCLES) { - if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, - ccn->dt.pmu_counters_mask)) - return -EAGAIN; - - hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; - ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; - - return 0; - } - - /* Allocate an event counter */ - hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, - CCN_NUM_PMU_EVENT_COUNTERS); - if (hw->idx < 0) { - dev_dbg(ccn->dev, "No more counters available!\n"); - return -EAGAIN; - } - - if (type == CCN_TYPE_XP) - source = &ccn->xp[node_xp]; - else - source = &ccn->node[node_xp]; - ccn->dt.pmu_counters[hw->idx].source = source; - - /* Allocate an event source or a watchpoint */ - if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) - bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, - CCN_NUM_XP_WATCHPOINTS); - else - bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, - CCN_NUM_PMU_EVENTS); - if (bit < 0) { - dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", - node_xp); - clear_bit(hw->idx, ccn->dt.pmu_counters_mask); - return -EAGAIN; - } - hw->config_base = bit; - - ccn->dt.pmu_counters[hw->idx].event = event; - - return 0; -} - -static void arm_ccn_pmu_event_release(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - - if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { - clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); - } else { - struct arm_ccn_component *source = - ccn->dt.pmu_counters[hw->idx].source; - - if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && - CCN_CONFIG_EVENT(event->attr.config) == - CCN_EVENT_WATCHPOINT) - clear_bit(hw->config_base, source->xp.dt_cmp_mask); - else - clear_bit(hw->config_base, source->pmu_events_mask); - clear_bit(hw->idx, ccn->dt.pmu_counters_mask); - } - - ccn->dt.pmu_counters[hw->idx].source = NULL; - ccn->dt.pmu_counters[hw->idx].event = NULL; -} - -static int arm_ccn_pmu_event_init(struct perf_event *event) -{ - struct arm_ccn *ccn; - struct hw_perf_event *hw = &event->hw; - u32 node_xp, type, event_id; - int valid; - int i; - struct perf_event *sibling; - - if (event->attr.type != event->pmu->type) - return -ENOENT; - - ccn = pmu_to_arm_ccn(event->pmu); - - if (hw->sample_period) { - dev_warn(ccn->dev, "Sampling not supported!\n"); - return -EOPNOTSUPP; - } - - if (has_branch_stack(event) || event->attr.exclude_user || - event->attr.exclude_kernel || event->attr.exclude_hv || - event->attr.exclude_idle || event->attr.exclude_host || - event->attr.exclude_guest) { - dev_warn(ccn->dev, "Can't exclude execution levels!\n"); - return -EINVAL; - } - - if (event->cpu < 0) { - dev_warn(ccn->dev, "Can't provide per-task data!\n"); - return -EOPNOTSUPP; - } - /* - * Many perf core operations (eg. events rotation) operate on a - * single CPU context. This is obvious for CPU PMUs, where one - * expects the same sets of events being observed on all CPUs, - * but can lead to issues for off-core PMUs, like CCN, where each - * event could be theoretically assigned to a different CPU. To - * mitigate this, we enforce CPU assignment to one, selected - * processor (the one described in the "cpumask" attribute). - */ - event->cpu = cpumask_first(&ccn->dt.cpu); - - node_xp = CCN_CONFIG_NODE(event->attr.config); - type = CCN_CONFIG_TYPE(event->attr.config); - event_id = CCN_CONFIG_EVENT(event->attr.config); - - /* Validate node/xp vs topology */ - switch (type) { - case CCN_TYPE_MN: - if (node_xp != ccn->mn_id) { - dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); - return -EINVAL; - } - break; - case CCN_TYPE_XP: - if (node_xp >= ccn->num_xps) { - dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); - return -EINVAL; - } - break; - case CCN_TYPE_CYCLES: - break; - default: - if (node_xp >= ccn->num_nodes) { - dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp); - return -EINVAL; - } - if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { - dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n", - type, node_xp); - return -EINVAL; - } - break; - } - - /* Validate event ID vs available for the type */ - for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid; - i++) { - struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i]; - u32 port = CCN_CONFIG_PORT(event->attr.config); - u32 vc = CCN_CONFIG_VC(event->attr.config); - - if (!arm_ccn_pmu_type_eq(type, e->type)) - continue; - if (event_id != e->event) - continue; - if (e->num_ports && port >= e->num_ports) { - dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n", - port, node_xp); - return -EINVAL; - } - if (e->num_vcs && vc >= e->num_vcs) { - dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", - vc, node_xp); - return -EINVAL; - } - valid = 1; - } - if (!valid) { - dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", - event_id, node_xp); - return -EINVAL; - } - - /* Watchpoint-based event for a node is actually set on XP */ - if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) { - u32 port; - - type = CCN_TYPE_XP; - port = arm_ccn_node_to_xp_port(node_xp); - node_xp = arm_ccn_node_to_xp(node_xp); - - arm_ccn_pmu_config_set(&event->attr.config, - node_xp, type, port); - } - - /* - * We must NOT create groups containing mixed PMUs, although software - * events are acceptable (for example to create a CCN group - * periodically read when a hrtimer aka cpu-clock leader triggers). - */ - if (event->group_leader->pmu != event->pmu && - !is_software_event(event->group_leader)) - return -EINVAL; - - list_for_each_entry(sibling, &event->group_leader->sibling_list, - group_entry) - if (sibling->pmu != event->pmu && - !is_software_event(sibling)) - return -EINVAL; - - return 0; -} - -static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx) -{ - u64 res; - - if (idx == CCN_IDX_PMU_CYCLE_COUNTER) { -#ifdef readq - res = readq(ccn->dt.base + CCN_DT_PMCCNTR); -#else - /* 40 bit counter, can do snapshot and read in two parts */ - writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ); - while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1)) - ; - writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); - res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff; - res <<= 32; - res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR); -#endif - } else { - res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx)); - } - - return res; -} - -static void arm_ccn_pmu_event_update(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - u64 prev_count, new_count, mask; - - do { - prev_count = local64_read(&hw->prev_count); - new_count = arm_ccn_pmu_read_counter(ccn, hw->idx); - } while (local64_xchg(&hw->prev_count, new_count) != prev_count); - - mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1; - - local64_add((new_count - prev_count) & mask, &event->count); -} - -static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - struct arm_ccn_component *xp; - u32 val, dt_cfg; - - /* Nothing to do for cycle counter */ - if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) - return; - - if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) - xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; - else - xp = &ccn->xp[arm_ccn_node_to_xp( - CCN_CONFIG_NODE(event->attr.config))]; - - if (enable) - dt_cfg = hw->event_base; - else - dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH; - - spin_lock(&ccn->dt.config_lock); - - val = readl(xp->base + CCN_XP_DT_CONFIG); - val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK << - CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx)); - val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx); - writel(val, xp->base + CCN_XP_DT_CONFIG); - - spin_unlock(&ccn->dt.config_lock); -} - -static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - - local64_set(&event->hw.prev_count, - arm_ccn_pmu_read_counter(ccn, hw->idx)); - hw->state = 0; - - /* Set the DT bus input, engaging the counter */ - arm_ccn_pmu_xp_dt_config(event, 1); -} - -static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) -{ - struct hw_perf_event *hw = &event->hw; - - /* Disable counting, setting the DT bus to pass-through mode */ - arm_ccn_pmu_xp_dt_config(event, 0); - - if (flags & PERF_EF_UPDATE) - arm_ccn_pmu_event_update(event); - - hw->state |= PERF_HES_STOPPED; -} - -static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - struct arm_ccn_component *source = - ccn->dt.pmu_counters[hw->idx].source; - unsigned long wp = hw->config_base; - u32 val; - u64 cmp_l = event->attr.config1; - u64 cmp_h = event->attr.config2; - u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; - u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; - - hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp); - - /* Direction (RX/TX), device (port) & virtual channel */ - val = readl(source->base + CCN_XP_DT_INTERFACE_SEL); - val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK << - CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp)); - val |= CCN_CONFIG_DIR(event->attr.config) << - CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp); - val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK << - CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp)); - val |= CCN_CONFIG_PORT(event->attr.config) << - CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp); - val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK << - CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp)); - val |= CCN_CONFIG_VC(event->attr.config) << - CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp); - writel(val, source->base + CCN_XP_DT_INTERFACE_SEL); - - /* Comparison values */ - writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); - writel((cmp_l >> 32) & 0x7fffffff, - source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); - writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); - writel((cmp_h >> 32) & 0x0fffffff, - source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4); - - /* Mask */ - writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); - writel((mask_l >> 32) & 0x7fffffff, - source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); - writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); - writel((mask_h >> 32) & 0x0fffffff, - source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4); -} - -static void arm_ccn_pmu_xp_event_config(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - struct arm_ccn_component *source = - ccn->dt.pmu_counters[hw->idx].source; - u32 val, id; - - hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); - - id = (CCN_CONFIG_VC(event->attr.config) << 4) | - (CCN_CONFIG_BUS(event->attr.config) << 3) | - (CCN_CONFIG_EVENT(event->attr.config) << 0); - - val = readl(source->base + CCN_XP_PMU_EVENT_SEL); - val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK << - CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); - val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); - writel(val, source->base + CCN_XP_PMU_EVENT_SEL); -} - -static void arm_ccn_pmu_node_event_config(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - struct arm_ccn_component *source = - ccn->dt.pmu_counters[hw->idx].source; - u32 type = CCN_CONFIG_TYPE(event->attr.config); - u32 val, port; - - port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); - hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port, - hw->config_base); - - /* These *_event_sel regs should be identical, but let's make sure... */ - BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL); - BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL); - BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) != - CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1)); - BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) != - CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1)); - BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK != - CCN_SBAS_PMU_EVENT_SEL__ID__MASK); - BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK != - CCN_RNI_PMU_EVENT_SEL__ID__MASK); - if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS && - !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P))) - return; - - /* Set the event id for the pre-allocated counter */ - val = readl(source->base + CCN_HNF_PMU_EVENT_SEL); - val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK << - CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); - val |= CCN_CONFIG_EVENT(event->attr.config) << - CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); - writel(val, source->base + CCN_HNF_PMU_EVENT_SEL); -} - -static void arm_ccn_pmu_event_config(struct perf_event *event) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - struct hw_perf_event *hw = &event->hw; - u32 xp, offset, val; - - /* Cycle counter requires no setup */ - if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) - return; - - if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) - xp = CCN_CONFIG_XP(event->attr.config); - else - xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); - - spin_lock(&ccn->dt.config_lock); - - /* Set the DT bus "distance" register */ - offset = (hw->idx / 4) * 4; - val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); - val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK << - CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4)); - val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4); - writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); - - if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { - if (CCN_CONFIG_EVENT(event->attr.config) == - CCN_EVENT_WATCHPOINT) - arm_ccn_pmu_xp_watchpoint_config(event); - else - arm_ccn_pmu_xp_event_config(event); - } else { - arm_ccn_pmu_node_event_config(event); - } - - spin_unlock(&ccn->dt.config_lock); -} - -static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn) -{ - return bitmap_weight(ccn->dt.pmu_counters_mask, - CCN_NUM_PMU_EVENT_COUNTERS + 1); -} - -static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) -{ - int err; - struct hw_perf_event *hw = &event->hw; - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - - err = arm_ccn_pmu_event_alloc(event); - if (err) - return err; - - /* - * Pin the timer, so that the overflows are handled by the chosen - * event->cpu (this is the same one as presented in "cpumask" - * attribute). - */ - if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1) - hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), - HRTIMER_MODE_REL_PINNED); - - arm_ccn_pmu_event_config(event); - - hw->state = PERF_HES_STOPPED; - - if (flags & PERF_EF_START) - arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); - - return 0; -} - -static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); - - arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); - - arm_ccn_pmu_event_release(event); - - if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0) - hrtimer_cancel(&ccn->dt.hrtimer); -} - -static void arm_ccn_pmu_event_read(struct perf_event *event) -{ - arm_ccn_pmu_event_update(event); -} - -static void arm_ccn_pmu_enable(struct pmu *pmu) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); - - u32 val = readl(ccn->dt.base + CCN_DT_PMCR); - val |= CCN_DT_PMCR__PMU_EN; - writel(val, ccn->dt.base + CCN_DT_PMCR); -} - -static void arm_ccn_pmu_disable(struct pmu *pmu) -{ - struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); - - u32 val = readl(ccn->dt.base + CCN_DT_PMCR); - val &= ~CCN_DT_PMCR__PMU_EN; - writel(val, ccn->dt.base + CCN_DT_PMCR); -} - -static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) -{ - u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); - int idx; - - if (!pmovsr) - return IRQ_NONE; - - writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR); - - BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS); - - for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) { - struct perf_event *event = dt->pmu_counters[idx].event; - int overflowed = pmovsr & BIT(idx); - - WARN_ON_ONCE(overflowed && !event && - idx != CCN_IDX_PMU_CYCLE_COUNTER); - - if (!event || !overflowed) - continue; - - arm_ccn_pmu_event_update(event); - } - - return IRQ_HANDLED; -} - -static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) -{ - struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt, - hrtimer); - unsigned long flags; - - local_irq_save(flags); - arm_ccn_pmu_overflow_handler(dt); - local_irq_restore(flags); - - hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period()); - return HRTIMER_RESTART; -} - - -static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) -{ - struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node); - struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); - unsigned int target; - - if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) - return 0; - target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) - return 0; - perf_pmu_migrate_context(&dt->pmu, cpu, target); - cpumask_set_cpu(target, &dt->cpu); - if (ccn->irq) - WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); - return 0; -} - -static DEFINE_IDA(arm_ccn_pmu_ida); - -static int arm_ccn_pmu_init(struct arm_ccn *ccn) -{ - int i; - char *name; - int err; - - /* Initialize DT subsystem */ - ccn->dt.base = ccn->base + CCN_REGION_SIZE; - spin_lock_init(&ccn->dt.config_lock); - writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR); - writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL); - writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN, - ccn->dt.base + CCN_DT_PMCR); - writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); - for (i = 0; i < ccn->num_xps; i++) { - writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG); - writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << - CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) | - (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << - CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) | - CCN_XP_DT_CONTROL__DT_ENABLE, - ccn->xp[i].base + CCN_XP_DT_CONTROL); - } - ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0; - ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0; - ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0; - ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0; - ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0; - ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15); - ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0; - ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); - - /* Get a convenient /sys/event_source/devices/ name */ - ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); - if (ccn->dt.id == 0) { - name = "ccn"; - } else { - name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d", - ccn->dt.id); - if (!name) { - err = -ENOMEM; - goto error_choose_name; - } - } - - /* Perf driver registration */ - ccn->dt.pmu = (struct pmu) { - .module = THIS_MODULE, - .attr_groups = arm_ccn_pmu_attr_groups, - .task_ctx_nr = perf_invalid_context, - .event_init = arm_ccn_pmu_event_init, - .add = arm_ccn_pmu_event_add, - .del = arm_ccn_pmu_event_del, - .start = arm_ccn_pmu_event_start, - .stop = arm_ccn_pmu_event_stop, - .read = arm_ccn_pmu_event_read, - .pmu_enable = arm_ccn_pmu_enable, - .pmu_disable = arm_ccn_pmu_disable, - }; - - /* No overflow interrupt? Have to use a timer instead. */ - if (!ccn->irq) { - dev_info(ccn->dev, "No access to interrupts, using timer.\n"); - hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; - } - - /* Pick one CPU which we will use to collect data from CCN... */ - cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); - - /* Also make sure that the overflow interrupt is handled by this CPU */ - if (ccn->irq) { - err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); - if (err) { - dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); - goto error_set_affinity; - } - } - - err = perf_pmu_register(&ccn->dt.pmu, name, -1); - if (err) - goto error_pmu_register; - - cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, - &ccn->dt.node); - put_cpu(); - return 0; - -error_pmu_register: -error_set_affinity: - put_cpu(); -error_choose_name: - ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); - for (i = 0; i < ccn->num_xps; i++) - writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); - writel(0, ccn->dt.base + CCN_DT_PMCR); - return err; -} - -static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) -{ - int i; - - cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, - &ccn->dt.node); - if (ccn->irq) - irq_set_affinity_hint(ccn->irq, NULL); - for (i = 0; i < ccn->num_xps; i++) - writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); - writel(0, ccn->dt.base + CCN_DT_PMCR); - perf_pmu_unregister(&ccn->dt.pmu); - ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); -} - -static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, - int (*callback)(struct arm_ccn *ccn, int region, - void __iomem *base, u32 type, u32 id)) -{ - int region; - - for (region = 0; region < CCN_NUM_REGIONS; region++) { - u32 val, type, id; - void __iomem *base; - int err; - - val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 + - 4 * (region / 32)); - if (!(val & (1 << (region % 32)))) - continue; - - base = ccn->base + region * CCN_REGION_SIZE; - val = readl(base + CCN_ALL_OLY_ID); - type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) & - CCN_ALL_OLY_ID__OLY_ID__MASK; - id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) & - CCN_ALL_OLY_ID__NODE_ID__MASK; - - err = callback(ccn, region, base, type, id); - if (err) - return err; - } - - return 0; -} - -static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region, - void __iomem *base, u32 type, u32 id) -{ - - if (type == CCN_TYPE_XP && id >= ccn->num_xps) - ccn->num_xps = id + 1; - else if (id >= ccn->num_nodes) - ccn->num_nodes = id + 1; - - return 0; -} - -static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, - void __iomem *base, u32 type, u32 id) -{ - struct arm_ccn_component *component; - - dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type); - - switch (type) { - case CCN_TYPE_MN: - ccn->mn_id = id; - return 0; - case CCN_TYPE_DT: - return 0; - case CCN_TYPE_XP: - component = &ccn->xp[id]; - break; - case CCN_TYPE_SBSX: - ccn->sbsx_present = 1; - component = &ccn->node[id]; - break; - case CCN_TYPE_SBAS: - ccn->sbas_present = 1; - /* Fall-through */ - default: - component = &ccn->node[id]; - break; - } - - component->base = base; - component->type = type; - - return 0; -} - - -static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn, - const u32 *err_sig_val) -{ - /* This should be really handled by firmware... */ - dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n", - err_sig_val[5], err_sig_val[4], err_sig_val[3], - err_sig_val[2], err_sig_val[1], err_sig_val[0]); - dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n"); - writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE, - ccn->base + CCN_MN_ERRINT_STATUS); - - return IRQ_HANDLED; -} - - -static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id) -{ - irqreturn_t res = IRQ_NONE; - struct arm_ccn *ccn = dev_id; - u32 err_sig_val[6]; - u32 err_or; - int i; - - /* PMU overflow is a special case */ - err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0); - if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) { - err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT; - res = arm_ccn_pmu_overflow_handler(&ccn->dt); - } - - /* Have to read all err_sig_vals to clear them */ - for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) { - err_sig_val[i] = readl(ccn->base + - CCN_MN_ERR_SIG_VAL_63_0 + i * 4); - err_or |= err_sig_val[i]; - } - if (err_or) - res |= arm_ccn_error_handler(ccn, err_sig_val); - - if (res != IRQ_NONE) - writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT, - ccn->base + CCN_MN_ERRINT_STATUS); - - return res; -} - - -static int arm_ccn_probe(struct platform_device *pdev) -{ - struct arm_ccn *ccn; - struct resource *res; - unsigned int irq; - int err; - - ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); - if (!ccn) - return -ENOMEM; - ccn->dev = &pdev->dev; - platform_set_drvdata(pdev, ccn); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - - if (!devm_request_mem_region(ccn->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - ccn->base = devm_ioremap(ccn->dev, res->start, - resource_size(res)); - if (!ccn->base) - return -EFAULT; - - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) - return -EINVAL; - irq = res->start; - - /* Check if we can use the interrupt */ - writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, - ccn->base + CCN_MN_ERRINT_STATUS); - if (readl(ccn->base + CCN_MN_ERRINT_STATUS) & - CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) { - /* Can set 'disable' bits, so can acknowledge interrupts */ - writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, - ccn->base + CCN_MN_ERRINT_STATUS); - err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, - dev_name(ccn->dev), ccn); - if (err) - return err; - - ccn->irq = irq; - } - - - /* Build topology */ - - err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num); - if (err) - return err; - - ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node), - GFP_KERNEL); - ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node), - GFP_KERNEL); - if (!ccn->node || !ccn->xp) - return -ENOMEM; - - err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes); - if (err) - return err; - - return arm_ccn_pmu_init(ccn); -} - -static int arm_ccn_remove(struct platform_device *pdev) -{ - struct arm_ccn *ccn = platform_get_drvdata(pdev); - - arm_ccn_pmu_cleanup(ccn); - - return 0; -} - -static const struct of_device_id arm_ccn_match[] = { - { .compatible = "arm,ccn-502", }, - { .compatible = "arm,ccn-504", }, - {}, -}; -MODULE_DEVICE_TABLE(of, arm_ccn_match); - -static struct platform_driver arm_ccn_driver = { - .driver = { - .name = "arm-ccn", - .of_match_table = arm_ccn_match, - }, - .probe = arm_ccn_probe, - .remove = arm_ccn_remove, -}; - -static int __init arm_ccn_init(void) -{ - int i, ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE, - "perf/arm/ccn:online", NULL, - arm_ccn_pmu_offline_cpu); - if (ret) - return ret; - - for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) - arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; - - ret = platform_driver_register(&arm_ccn_driver); - if (ret) - cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); - return ret; -} - -static void __exit arm_ccn_exit(void) -{ - platform_driver_unregister(&arm_ccn_driver); - cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); -} - -module_init(arm_ccn_init); -module_exit(arm_ccn_exit); - -MODULE_AUTHOR("Pawel Moll "); -MODULE_LICENSE("GPL"); diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index da5724cd89cf..331b6d992b5a 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -5,6 +5,13 @@ menu "Performance monitor support" depends on PERF_EVENTS +config ARM_CCN + tristate "ARM CCN driver support" + depends on ARM || ARM64 + help + PMU (perf) driver supporting the ARM CCN (Cache Coherent Network) + interconnect. + config ARM_PMU depends on ARM || ARM64 bool "ARM PMU framework" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index c2f27419bdf0..5004abee0f3a 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ARM_CCN) += arm-ccn.o obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c new file mode 100644 index 000000000000..b52332e52ca5 --- /dev/null +++ b/drivers/perf/arm-ccn.c @@ -0,0 +1,1597 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CCN_NUM_XP_PORTS 2 +#define CCN_NUM_VCS 4 +#define CCN_NUM_REGIONS 256 +#define CCN_REGION_SIZE 0x10000 + +#define CCN_ALL_OLY_ID 0xff00 +#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0 +#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f +#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8 +#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f + +#define CCN_MN_ERRINT_STATUS 0x0008 +#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88 +#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0 +#define CCN_MN_ERR_SIG_VAL_63_0 0x0300 +#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1) + +#define CCN_DT_ACTIVE_DSM 0x0000 +#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8) +#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff +#define CCN_DT_CTL 0x0028 +#define CCN_DT_CTL__DT_EN (1 << 0) +#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8) +#define CCN_DT_PMCCNTR 0x0140 +#define CCN_DT_PMCCNTRSR 0x0190 +#define CCN_DT_PMOVSR 0x0198 +#define CCN_DT_PMOVSR_CLR 0x01a0 +#define CCN_DT_PMOVSR_CLR__MASK 0x1f +#define CCN_DT_PMCR 0x01a8 +#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6) +#define CCN_DT_PMCR__PMU_EN (1 << 0) +#define CCN_DT_PMSR 0x01b0 +#define CCN_DT_PMSR_REQ 0x01b8 +#define CCN_DT_PMSR_CLR 0x01c0 + +#define CCN_HNF_PMU_EVENT_SEL 0x0600 +#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_XP_DT_CONFIG 0x0300 +#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4) +#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf +#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0 +#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1 +#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n)) +#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n)) +#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n)) +#define CCN_XP_DT_INTERFACE_SEL 0x0308 +#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1 +#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1 +#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3 +#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40) +#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40) +#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40) +#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40) +#define CCN_XP_DT_CONTROL 0x0370 +#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0) +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4) +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf +#define CCN_XP_PMU_EVENT_SEL 0x0600 +#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7) +#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f + +#define CCN_SBAS_PMU_EVENT_SEL 0x0600 +#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_RNI_PMU_EVENT_SEL 0x0600 +#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_TYPE_MN 0x01 +#define CCN_TYPE_DT 0x02 +#define CCN_TYPE_HNF 0x04 +#define CCN_TYPE_HNI 0x05 +#define CCN_TYPE_XP 0x08 +#define CCN_TYPE_SBSX 0x0c +#define CCN_TYPE_SBAS 0x10 +#define CCN_TYPE_RNI_1P 0x14 +#define CCN_TYPE_RNI_2P 0x15 +#define CCN_TYPE_RNI_3P 0x16 +#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */ +#define CCN_TYPE_RND_2P 0x19 +#define CCN_TYPE_RND_3P 0x1a +#define CCN_TYPE_CYCLES 0xff /* Pseudotype */ + +#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */ + +#define CCN_NUM_PMU_EVENTS 4 +#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */ +#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */ +#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS + +#define CCN_NUM_PREDEFINED_MASKS 4 +#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0) +#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1) +#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2) +#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3) + +struct arm_ccn_component { + void __iomem *base; + u32 type; + + DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS); + union { + struct { + DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS); + } xp; + }; +}; + +#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \ + struct arm_ccn_dt, pmu), struct arm_ccn, dt) + +struct arm_ccn_dt { + int id; + void __iomem *base; + + spinlock_t config_lock; + + DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1); + struct { + struct arm_ccn_component *source; + struct perf_event *event; + } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1]; + + struct { + u64 l, h; + } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS]; + + struct hrtimer hrtimer; + + cpumask_t cpu; + struct hlist_node node; + + struct pmu pmu; +}; + +struct arm_ccn { + struct device *dev; + void __iomem *base; + unsigned int irq; + + unsigned sbas_present:1; + unsigned sbsx_present:1; + + int num_nodes; + struct arm_ccn_component *node; + + int num_xps; + struct arm_ccn_component *xp; + + struct arm_ccn_dt dt; + int mn_id; +}; + +static int arm_ccn_node_to_xp(int node) +{ + return node / CCN_NUM_XP_PORTS; +} + +static int arm_ccn_node_to_xp_port(int node) +{ + return node % CCN_NUM_XP_PORTS; +} + + +/* + * Bit shifts and masks in these defines must be kept in sync with + * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below! + */ +#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff) +#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff) +#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) +#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) +#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) +#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3) +#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) +#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) +#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) + +static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) +{ + *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24)); + *config |= (node_xp << 0) | (type << 8) | (port << 24); +} + +static ssize_t arm_ccn_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = container_of(attr, + struct dev_ext_attribute, attr); + + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var); +} + +#define CCN_FORMAT_ATTR(_name, _config) \ + struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \ + { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \ + NULL), _config } + +static CCN_FORMAT_ATTR(node, "config:0-7"); +static CCN_FORMAT_ATTR(xp, "config:0-7"); +static CCN_FORMAT_ATTR(type, "config:8-15"); +static CCN_FORMAT_ATTR(event, "config:16-23"); +static CCN_FORMAT_ATTR(port, "config:24-25"); +static CCN_FORMAT_ATTR(bus, "config:24-25"); +static CCN_FORMAT_ATTR(vc, "config:26-28"); +static CCN_FORMAT_ATTR(dir, "config:29-29"); +static CCN_FORMAT_ATTR(mask, "config:30-33"); +static CCN_FORMAT_ATTR(cmp_l, "config1:0-62"); +static CCN_FORMAT_ATTR(cmp_h, "config2:0-59"); + +static struct attribute *arm_ccn_pmu_format_attrs[] = { + &arm_ccn_pmu_format_attr_node.attr.attr, + &arm_ccn_pmu_format_attr_xp.attr.attr, + &arm_ccn_pmu_format_attr_type.attr.attr, + &arm_ccn_pmu_format_attr_event.attr.attr, + &arm_ccn_pmu_format_attr_port.attr.attr, + &arm_ccn_pmu_format_attr_bus.attr.attr, + &arm_ccn_pmu_format_attr_vc.attr.attr, + &arm_ccn_pmu_format_attr_dir.attr.attr, + &arm_ccn_pmu_format_attr_mask.attr.attr, + &arm_ccn_pmu_format_attr_cmp_l.attr.attr, + &arm_ccn_pmu_format_attr_cmp_h.attr.attr, + NULL +}; + +static const struct attribute_group arm_ccn_pmu_format_attr_group = { + .name = "format", + .attrs = arm_ccn_pmu_format_attrs, +}; + + +struct arm_ccn_pmu_event { + struct device_attribute attr; + u32 type; + u32 event; + int num_ports; + int num_vcs; + const char *def; + int mask; +}; + +#define CCN_EVENT_ATTR(_name) \ + __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL) + +/* + * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on + * their ports in XP they are connected to. For the sake of usability they are + * explicitly defined here (and translated into a relevant watchpoint in + * arm_ccn_pmu_event_init()) so the user can easily request them without deep + * knowledge of the flit format. + */ + +#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \ + .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \ + .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \ + .def = _def, .mask = _mask, } + +#define CCN_EVENT_HNI(_name, _def, _mask) { \ + .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \ + .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ + .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } + +#define CCN_EVENT_SBSX(_name, _def, _mask) { \ + .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \ + .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ + .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } + +#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \ + .type = CCN_TYPE_HNF, .event = _event, } + +#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \ + .type = CCN_TYPE_XP, .event = _event, \ + .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, } + +/* + * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending + * on configuration. One of them is picked to represent the whole group, + * as they all share the same event types. + */ +#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \ + .type = CCN_TYPE_RNI_3P, .event = _event, } + +#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \ + .type = CCN_TYPE_SBAS, .event = _event, } + +#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \ + .type = CCN_TYPE_CYCLES } + + +static ssize_t arm_ccn_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + struct arm_ccn_pmu_event *event = container_of(attr, + struct arm_ccn_pmu_event, attr); + ssize_t res; + + res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); + if (event->event) + res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", + event->event); + if (event->def) + res += snprintf(buf + res, PAGE_SIZE - res, ",%s", + event->def); + if (event->mask) + res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", + event->mask); + + /* Arguments required by an event */ + switch (event->type) { + case CCN_TYPE_CYCLES: + break; + case CCN_TYPE_XP: + res += snprintf(buf + res, PAGE_SIZE - res, + ",xp=?,vc=?"); + if (event->event == CCN_EVENT_WATCHPOINT) + res += snprintf(buf + res, PAGE_SIZE - res, + ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?"); + else + res += snprintf(buf + res, PAGE_SIZE - res, + ",bus=?"); + + break; + case CCN_TYPE_MN: + res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); + break; + default: + res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); + break; + } + + res += snprintf(buf + res, PAGE_SIZE - res, "\n"); + + return res; +} + +static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + struct device_attribute *dev_attr = container_of(attr, + struct device_attribute, attr); + struct arm_ccn_pmu_event *event = container_of(dev_attr, + struct arm_ccn_pmu_event, attr); + + if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) + return 0; + if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) + return 0; + + return attr->mode; +} + +static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { + CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), + CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", + CCN_IDX_MASK_ORDER), + CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", + CCN_IDX_MASK_ORDER), + CCN_EVENT_HNF(cache_miss, 0x1), + CCN_EVENT_HNF(l3_sf_cache_access, 0x02), + CCN_EVENT_HNF(cache_fill, 0x3), + CCN_EVENT_HNF(pocq_retry, 0x4), + CCN_EVENT_HNF(pocq_reqs_recvd, 0x5), + CCN_EVENT_HNF(sf_hit, 0x6), + CCN_EVENT_HNF(sf_evictions, 0x7), + CCN_EVENT_HNF(snoops_sent, 0x8), + CCN_EVENT_HNF(snoops_broadcast, 0x9), + CCN_EVENT_HNF(l3_eviction, 0xa), + CCN_EVENT_HNF(l3_fill_invalid_way, 0xb), + CCN_EVENT_HNF(mc_retries, 0xc), + CCN_EVENT_HNF(mc_reqs, 0xd), + CCN_EVENT_HNF(qos_hh_retry, 0xe), + CCN_EVENT_RNI(rdata_beats_p0, 0x1), + CCN_EVENT_RNI(rdata_beats_p1, 0x2), + CCN_EVENT_RNI(rdata_beats_p2, 0x3), + CCN_EVENT_RNI(rxdat_flits, 0x4), + CCN_EVENT_RNI(txdat_flits, 0x5), + CCN_EVENT_RNI(txreq_flits, 0x6), + CCN_EVENT_RNI(txreq_flits_retried, 0x7), + CCN_EVENT_RNI(rrt_full, 0x8), + CCN_EVENT_RNI(wrt_full, 0x9), + CCN_EVENT_RNI(txreq_flits_replayed, 0xa), + CCN_EVENT_XP(upload_starvation, 0x1), + CCN_EVENT_XP(download_starvation, 0x2), + CCN_EVENT_XP(respin, 0x3), + CCN_EVENT_XP(valid_flit, 0x4), + CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT), + CCN_EVENT_SBAS(rdata_beats_p0, 0x1), + CCN_EVENT_SBAS(rxdat_flits, 0x4), + CCN_EVENT_SBAS(txdat_flits, 0x5), + CCN_EVENT_SBAS(txreq_flits, 0x6), + CCN_EVENT_SBAS(txreq_flits_retried, 0x7), + CCN_EVENT_SBAS(rrt_full, 0x8), + CCN_EVENT_SBAS(wrt_full, 0x9), + CCN_EVENT_SBAS(txreq_flits_replayed, 0xa), + CCN_EVENT_CYCLES(cycles), +}; + +/* Populated in arm_ccn_init() */ +static struct attribute + *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1]; + +static const struct attribute_group arm_ccn_pmu_events_attr_group = { + .name = "events", + .is_visible = arm_ccn_pmu_events_is_visible, + .attrs = arm_ccn_pmu_events_attrs, +}; + + +static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name) +{ + unsigned long i; + + if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1])) + return NULL; + i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a'; + + switch (name[1]) { + case 'l': + return &ccn->dt.cmp_mask[i].l; + case 'h': + return &ccn->dt.cmp_mask[i].h; + default: + return NULL; + } +} + +static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); + + return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL; +} + +static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); + int err = -EINVAL; + + if (mask) + err = kstrtoull(buf, 0, mask); + + return err ? err : count; +} + +#define CCN_CMP_MASK_ATTR(_name) \ + struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ + __ATTR(_name, S_IRUGO | S_IWUSR, \ + arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store) + +#define CCN_CMP_MASK_ATTR_RO(_name) \ + struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ + __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL) + +static CCN_CMP_MASK_ATTR(0l); +static CCN_CMP_MASK_ATTR(0h); +static CCN_CMP_MASK_ATTR(1l); +static CCN_CMP_MASK_ATTR(1h); +static CCN_CMP_MASK_ATTR(2l); +static CCN_CMP_MASK_ATTR(2h); +static CCN_CMP_MASK_ATTR(3l); +static CCN_CMP_MASK_ATTR(3h); +static CCN_CMP_MASK_ATTR(4l); +static CCN_CMP_MASK_ATTR(4h); +static CCN_CMP_MASK_ATTR(5l); +static CCN_CMP_MASK_ATTR(5h); +static CCN_CMP_MASK_ATTR(6l); +static CCN_CMP_MASK_ATTR(6h); +static CCN_CMP_MASK_ATTR(7l); +static CCN_CMP_MASK_ATTR(7h); +static CCN_CMP_MASK_ATTR_RO(8l); +static CCN_CMP_MASK_ATTR_RO(8h); +static CCN_CMP_MASK_ATTR_RO(9l); +static CCN_CMP_MASK_ATTR_RO(9h); +static CCN_CMP_MASK_ATTR_RO(al); +static CCN_CMP_MASK_ATTR_RO(ah); +static CCN_CMP_MASK_ATTR_RO(bl); +static CCN_CMP_MASK_ATTR_RO(bh); + +static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = { + &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr, + &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr, + &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr, + &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr, + &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr, + &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr, + &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr, + &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr, + &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr, + &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr, + &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr, + &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr, + NULL +}; + +static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { + .name = "cmp_mask", + .attrs = arm_ccn_pmu_cmp_mask_attrs, +}; + +static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); +} + +static struct device_attribute arm_ccn_pmu_cpumask_attr = + __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL); + +static struct attribute *arm_ccn_pmu_cpumask_attrs[] = { + &arm_ccn_pmu_cpumask_attr.attr, + NULL, +}; + +static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = { + .attrs = arm_ccn_pmu_cpumask_attrs, +}; + +/* + * Default poll period is 10ms, which is way over the top anyway, + * as in the worst case scenario (an event every cycle), with 1GHz + * clocked bus, the smallest, 32 bit counter will overflow in + * more than 4s. + */ +static unsigned int arm_ccn_pmu_poll_period_us = 10000; +module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint, + S_IRUGO | S_IWUSR); + +static ktime_t arm_ccn_pmu_timer_period(void) +{ + return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000); +} + + +static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { + &arm_ccn_pmu_events_attr_group, + &arm_ccn_pmu_format_attr_group, + &arm_ccn_pmu_cmp_mask_attr_group, + &arm_ccn_pmu_cpumask_attr_group, + NULL +}; + + +static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size) +{ + int bit; + + do { + bit = find_first_zero_bit(bitmap, size); + if (bit >= size) + return -EAGAIN; + } while (test_and_set_bit(bit, bitmap)); + + return bit; +} + +/* All RN-I and RN-D nodes have identical PMUs */ +static int arm_ccn_pmu_type_eq(u32 a, u32 b) +{ + if (a == b) + return 1; + + switch (a) { + case CCN_TYPE_RNI_1P: + case CCN_TYPE_RNI_2P: + case CCN_TYPE_RNI_3P: + case CCN_TYPE_RND_1P: + case CCN_TYPE_RND_2P: + case CCN_TYPE_RND_3P: + switch (b) { + case CCN_TYPE_RNI_1P: + case CCN_TYPE_RNI_2P: + case CCN_TYPE_RNI_3P: + case CCN_TYPE_RND_1P: + case CCN_TYPE_RND_2P: + case CCN_TYPE_RND_3P: + return 1; + } + break; + } + + return 0; +} + +static int arm_ccn_pmu_event_alloc(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u32 node_xp, type, event_id; + struct arm_ccn_component *source; + int bit; + + node_xp = CCN_CONFIG_NODE(event->attr.config); + type = CCN_CONFIG_TYPE(event->attr.config); + event_id = CCN_CONFIG_EVENT(event->attr.config); + + /* Allocate the cycle counter */ + if (type == CCN_TYPE_CYCLES) { + if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, + ccn->dt.pmu_counters_mask)) + return -EAGAIN; + + hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; + ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; + + return 0; + } + + /* Allocate an event counter */ + hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, + CCN_NUM_PMU_EVENT_COUNTERS); + if (hw->idx < 0) { + dev_dbg(ccn->dev, "No more counters available!\n"); + return -EAGAIN; + } + + if (type == CCN_TYPE_XP) + source = &ccn->xp[node_xp]; + else + source = &ccn->node[node_xp]; + ccn->dt.pmu_counters[hw->idx].source = source; + + /* Allocate an event source or a watchpoint */ + if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) + bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, + CCN_NUM_XP_WATCHPOINTS); + else + bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, + CCN_NUM_PMU_EVENTS); + if (bit < 0) { + dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", + node_xp); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + return -EAGAIN; + } + hw->config_base = bit; + + ccn->dt.pmu_counters[hw->idx].event = event; + + return 0; +} + +static void arm_ccn_pmu_event_release(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { + clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); + } else { + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && + CCN_CONFIG_EVENT(event->attr.config) == + CCN_EVENT_WATCHPOINT) + clear_bit(hw->config_base, source->xp.dt_cmp_mask); + else + clear_bit(hw->config_base, source->pmu_events_mask); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + } + + ccn->dt.pmu_counters[hw->idx].source = NULL; + ccn->dt.pmu_counters[hw->idx].event = NULL; +} + +static int arm_ccn_pmu_event_init(struct perf_event *event) +{ + struct arm_ccn *ccn; + struct hw_perf_event *hw = &event->hw; + u32 node_xp, type, event_id; + int valid; + int i; + struct perf_event *sibling; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + ccn = pmu_to_arm_ccn(event->pmu); + + if (hw->sample_period) { + dev_warn(ccn->dev, "Sampling not supported!\n"); + return -EOPNOTSUPP; + } + + if (has_branch_stack(event) || event->attr.exclude_user || + event->attr.exclude_kernel || event->attr.exclude_hv || + event->attr.exclude_idle || event->attr.exclude_host || + event->attr.exclude_guest) { + dev_warn(ccn->dev, "Can't exclude execution levels!\n"); + return -EINVAL; + } + + if (event->cpu < 0) { + dev_warn(ccn->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, like CCN, where each + * event could be theoretically assigned to a different CPU. To + * mitigate this, we enforce CPU assignment to one, selected + * processor (the one described in the "cpumask" attribute). + */ + event->cpu = cpumask_first(&ccn->dt.cpu); + + node_xp = CCN_CONFIG_NODE(event->attr.config); + type = CCN_CONFIG_TYPE(event->attr.config); + event_id = CCN_CONFIG_EVENT(event->attr.config); + + /* Validate node/xp vs topology */ + switch (type) { + case CCN_TYPE_MN: + if (node_xp != ccn->mn_id) { + dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); + return -EINVAL; + } + break; + case CCN_TYPE_XP: + if (node_xp >= ccn->num_xps) { + dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); + return -EINVAL; + } + break; + case CCN_TYPE_CYCLES: + break; + default: + if (node_xp >= ccn->num_nodes) { + dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp); + return -EINVAL; + } + if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { + dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n", + type, node_xp); + return -EINVAL; + } + break; + } + + /* Validate event ID vs available for the type */ + for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid; + i++) { + struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i]; + u32 port = CCN_CONFIG_PORT(event->attr.config); + u32 vc = CCN_CONFIG_VC(event->attr.config); + + if (!arm_ccn_pmu_type_eq(type, e->type)) + continue; + if (event_id != e->event) + continue; + if (e->num_ports && port >= e->num_ports) { + dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n", + port, node_xp); + return -EINVAL; + } + if (e->num_vcs && vc >= e->num_vcs) { + dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", + vc, node_xp); + return -EINVAL; + } + valid = 1; + } + if (!valid) { + dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", + event_id, node_xp); + return -EINVAL; + } + + /* Watchpoint-based event for a node is actually set on XP */ + if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) { + u32 port; + + type = CCN_TYPE_XP; + port = arm_ccn_node_to_xp_port(node_xp); + node_xp = arm_ccn_node_to_xp(node_xp); + + arm_ccn_pmu_config_set(&event->attr.config, + node_xp, type, port); + } + + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable (for example to create a CCN group + * periodically read when a hrtimer aka cpu-clock leader triggers). + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + list_for_each_entry(sibling, &event->group_leader->sibling_list, + group_entry) + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; + + return 0; +} + +static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx) +{ + u64 res; + + if (idx == CCN_IDX_PMU_CYCLE_COUNTER) { +#ifdef readq + res = readq(ccn->dt.base + CCN_DT_PMCCNTR); +#else + /* 40 bit counter, can do snapshot and read in two parts */ + writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ); + while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1)) + ; + writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); + res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff; + res <<= 32; + res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR); +#endif + } else { + res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx)); + } + + return res; +} + +static void arm_ccn_pmu_event_update(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u64 prev_count, new_count, mask; + + do { + prev_count = local64_read(&hw->prev_count); + new_count = arm_ccn_pmu_read_counter(ccn, hw->idx); + } while (local64_xchg(&hw->prev_count, new_count) != prev_count); + + mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1; + + local64_add((new_count - prev_count) & mask, &event->count); +} + +static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *xp; + u32 val, dt_cfg; + + /* Nothing to do for cycle counter */ + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) + return; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) + xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; + else + xp = &ccn->xp[arm_ccn_node_to_xp( + CCN_CONFIG_NODE(event->attr.config))]; + + if (enable) + dt_cfg = hw->event_base; + else + dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH; + + spin_lock(&ccn->dt.config_lock); + + val = readl(xp->base + CCN_XP_DT_CONFIG); + val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK << + CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx)); + val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx); + writel(val, xp->base + CCN_XP_DT_CONFIG); + + spin_unlock(&ccn->dt.config_lock); +} + +static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + + local64_set(&event->hw.prev_count, + arm_ccn_pmu_read_counter(ccn, hw->idx)); + hw->state = 0; + + /* Set the DT bus input, engaging the counter */ + arm_ccn_pmu_xp_dt_config(event, 1); +} + +static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hw = &event->hw; + + /* Disable counting, setting the DT bus to pass-through mode */ + arm_ccn_pmu_xp_dt_config(event, 0); + + if (flags & PERF_EF_UPDATE) + arm_ccn_pmu_event_update(event); + + hw->state |= PERF_HES_STOPPED; +} + +static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + unsigned long wp = hw->config_base; + u32 val; + u64 cmp_l = event->attr.config1; + u64 cmp_h = event->attr.config2; + u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; + u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; + + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp); + + /* Direction (RX/TX), device (port) & virtual channel */ + val = readl(source->base + CCN_XP_DT_INTERFACE_SEL); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp)); + val |= CCN_CONFIG_DIR(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp)); + val |= CCN_CONFIG_PORT(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp)); + val |= CCN_CONFIG_VC(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp); + writel(val, source->base + CCN_XP_DT_INTERFACE_SEL); + + /* Comparison values */ + writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); + writel((cmp_l >> 32) & 0x7fffffff, + source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); + writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); + writel((cmp_h >> 32) & 0x0fffffff, + source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4); + + /* Mask */ + writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); + writel((mask_l >> 32) & 0x7fffffff, + source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); + writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); + writel((mask_h >> 32) & 0x0fffffff, + source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4); +} + +static void arm_ccn_pmu_xp_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + u32 val, id; + + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); + + id = (CCN_CONFIG_VC(event->attr.config) << 4) | + (CCN_CONFIG_BUS(event->attr.config) << 3) | + (CCN_CONFIG_EVENT(event->attr.config) << 0); + + val = readl(source->base + CCN_XP_PMU_EVENT_SEL); + val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK << + CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); + val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); + writel(val, source->base + CCN_XP_PMU_EVENT_SEL); +} + +static void arm_ccn_pmu_node_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + u32 type = CCN_CONFIG_TYPE(event->attr.config); + u32 val, port; + + port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port, + hw->config_base); + + /* These *_event_sel regs should be identical, but let's make sure... */ + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL); + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) != + CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1)); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) != + CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1)); + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK != + CCN_SBAS_PMU_EVENT_SEL__ID__MASK); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK != + CCN_RNI_PMU_EVENT_SEL__ID__MASK); + if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS && + !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P))) + return; + + /* Set the event id for the pre-allocated counter */ + val = readl(source->base + CCN_HNF_PMU_EVENT_SEL); + val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK << + CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); + val |= CCN_CONFIG_EVENT(event->attr.config) << + CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); + writel(val, source->base + CCN_HNF_PMU_EVENT_SEL); +} + +static void arm_ccn_pmu_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u32 xp, offset, val; + + /* Cycle counter requires no setup */ + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) + return; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) + xp = CCN_CONFIG_XP(event->attr.config); + else + xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); + + spin_lock(&ccn->dt.config_lock); + + /* Set the DT bus "distance" register */ + offset = (hw->idx / 4) * 4; + val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); + val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK << + CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4)); + val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4); + writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { + if (CCN_CONFIG_EVENT(event->attr.config) == + CCN_EVENT_WATCHPOINT) + arm_ccn_pmu_xp_watchpoint_config(event); + else + arm_ccn_pmu_xp_event_config(event); + } else { + arm_ccn_pmu_node_event_config(event); + } + + spin_unlock(&ccn->dt.config_lock); +} + +static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn) +{ + return bitmap_weight(ccn->dt.pmu_counters_mask, + CCN_NUM_PMU_EVENT_COUNTERS + 1); +} + +static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) +{ + int err; + struct hw_perf_event *hw = &event->hw; + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + + err = arm_ccn_pmu_event_alloc(event); + if (err) + return err; + + /* + * Pin the timer, so that the overflows are handled by the chosen + * event->cpu (this is the same one as presented in "cpumask" + * attribute). + */ + if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1) + hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), + HRTIMER_MODE_REL_PINNED); + + arm_ccn_pmu_event_config(event); + + hw->state = PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); + + return 0; +} + +static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + + arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); + + arm_ccn_pmu_event_release(event); + + if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0) + hrtimer_cancel(&ccn->dt.hrtimer); +} + +static void arm_ccn_pmu_event_read(struct perf_event *event) +{ + arm_ccn_pmu_event_update(event); +} + +static void arm_ccn_pmu_enable(struct pmu *pmu) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); + + u32 val = readl(ccn->dt.base + CCN_DT_PMCR); + val |= CCN_DT_PMCR__PMU_EN; + writel(val, ccn->dt.base + CCN_DT_PMCR); +} + +static void arm_ccn_pmu_disable(struct pmu *pmu) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); + + u32 val = readl(ccn->dt.base + CCN_DT_PMCR); + val &= ~CCN_DT_PMCR__PMU_EN; + writel(val, ccn->dt.base + CCN_DT_PMCR); +} + +static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) +{ + u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); + int idx; + + if (!pmovsr) + return IRQ_NONE; + + writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR); + + BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS); + + for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) { + struct perf_event *event = dt->pmu_counters[idx].event; + int overflowed = pmovsr & BIT(idx); + + WARN_ON_ONCE(overflowed && !event && + idx != CCN_IDX_PMU_CYCLE_COUNTER); + + if (!event || !overflowed) + continue; + + arm_ccn_pmu_event_update(event); + } + + return IRQ_HANDLED; +} + +static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) +{ + struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt, + hrtimer); + unsigned long flags; + + local_irq_save(flags); + arm_ccn_pmu_overflow_handler(dt); + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period()); + return HRTIMER_RESTART; +} + + +static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node); + struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + perf_pmu_migrate_context(&dt->pmu, cpu, target); + cpumask_set_cpu(target, &dt->cpu); + if (ccn->irq) + WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); + return 0; +} + +static DEFINE_IDA(arm_ccn_pmu_ida); + +static int arm_ccn_pmu_init(struct arm_ccn *ccn) +{ + int i; + char *name; + int err; + + /* Initialize DT subsystem */ + ccn->dt.base = ccn->base + CCN_REGION_SIZE; + spin_lock_init(&ccn->dt.config_lock); + writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR); + writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL); + writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN, + ccn->dt.base + CCN_DT_PMCR); + writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); + for (i = 0; i < ccn->num_xps; i++) { + writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG); + writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << + CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) | + (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << + CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) | + CCN_XP_DT_CONTROL__DT_ENABLE, + ccn->xp[i].base + CCN_XP_DT_CONTROL); + } + ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0; + ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15); + ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); + + /* Get a convenient /sys/event_source/devices/ name */ + ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); + if (ccn->dt.id == 0) { + name = "ccn"; + } else { + name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d", + ccn->dt.id); + if (!name) { + err = -ENOMEM; + goto error_choose_name; + } + } + + /* Perf driver registration */ + ccn->dt.pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = arm_ccn_pmu_attr_groups, + .task_ctx_nr = perf_invalid_context, + .event_init = arm_ccn_pmu_event_init, + .add = arm_ccn_pmu_event_add, + .del = arm_ccn_pmu_event_del, + .start = arm_ccn_pmu_event_start, + .stop = arm_ccn_pmu_event_stop, + .read = arm_ccn_pmu_event_read, + .pmu_enable = arm_ccn_pmu_enable, + .pmu_disable = arm_ccn_pmu_disable, + }; + + /* No overflow interrupt? Have to use a timer instead. */ + if (!ccn->irq) { + dev_info(ccn->dev, "No access to interrupts, using timer.\n"); + hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; + } + + /* Pick one CPU which we will use to collect data from CCN... */ + cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); + + /* Also make sure that the overflow interrupt is handled by this CPU */ + if (ccn->irq) { + err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); + if (err) { + dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); + goto error_set_affinity; + } + } + + err = perf_pmu_register(&ccn->dt.pmu, name, -1); + if (err) + goto error_pmu_register; + + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); + put_cpu(); + return 0; + +error_pmu_register: +error_set_affinity: + put_cpu(); +error_choose_name: + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + for (i = 0; i < ccn->num_xps; i++) + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); + writel(0, ccn->dt.base + CCN_DT_PMCR); + return err; +} + +static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) +{ + int i; + + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); + if (ccn->irq) + irq_set_affinity_hint(ccn->irq, NULL); + for (i = 0; i < ccn->num_xps; i++) + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); + writel(0, ccn->dt.base + CCN_DT_PMCR); + perf_pmu_unregister(&ccn->dt.pmu); + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); +} + +static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, + int (*callback)(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id)) +{ + int region; + + for (region = 0; region < CCN_NUM_REGIONS; region++) { + u32 val, type, id; + void __iomem *base; + int err; + + val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 + + 4 * (region / 32)); + if (!(val & (1 << (region % 32)))) + continue; + + base = ccn->base + region * CCN_REGION_SIZE; + val = readl(base + CCN_ALL_OLY_ID); + type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) & + CCN_ALL_OLY_ID__OLY_ID__MASK; + id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) & + CCN_ALL_OLY_ID__NODE_ID__MASK; + + err = callback(ccn, region, base, type, id); + if (err) + return err; + } + + return 0; +} + +static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id) +{ + + if (type == CCN_TYPE_XP && id >= ccn->num_xps) + ccn->num_xps = id + 1; + else if (id >= ccn->num_nodes) + ccn->num_nodes = id + 1; + + return 0; +} + +static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id) +{ + struct arm_ccn_component *component; + + dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type); + + switch (type) { + case CCN_TYPE_MN: + ccn->mn_id = id; + return 0; + case CCN_TYPE_DT: + return 0; + case CCN_TYPE_XP: + component = &ccn->xp[id]; + break; + case CCN_TYPE_SBSX: + ccn->sbsx_present = 1; + component = &ccn->node[id]; + break; + case CCN_TYPE_SBAS: + ccn->sbas_present = 1; + /* Fall-through */ + default: + component = &ccn->node[id]; + break; + } + + component->base = base; + component->type = type; + + return 0; +} + + +static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn, + const u32 *err_sig_val) +{ + /* This should be really handled by firmware... */ + dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n", + err_sig_val[5], err_sig_val[4], err_sig_val[3], + err_sig_val[2], err_sig_val[1], err_sig_val[0]); + dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n"); + writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + + return IRQ_HANDLED; +} + + +static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id) +{ + irqreturn_t res = IRQ_NONE; + struct arm_ccn *ccn = dev_id; + u32 err_sig_val[6]; + u32 err_or; + int i; + + /* PMU overflow is a special case */ + err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0); + if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) { + err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT; + res = arm_ccn_pmu_overflow_handler(&ccn->dt); + } + + /* Have to read all err_sig_vals to clear them */ + for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) { + err_sig_val[i] = readl(ccn->base + + CCN_MN_ERR_SIG_VAL_63_0 + i * 4); + err_or |= err_sig_val[i]; + } + if (err_or) + res |= arm_ccn_error_handler(ccn, err_sig_val); + + if (res != IRQ_NONE) + writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT, + ccn->base + CCN_MN_ERRINT_STATUS); + + return res; +} + + +static int arm_ccn_probe(struct platform_device *pdev) +{ + struct arm_ccn *ccn; + struct resource *res; + unsigned int irq; + int err; + + ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); + if (!ccn) + return -ENOMEM; + ccn->dev = &pdev->dev; + platform_set_drvdata(pdev, ccn); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + if (!devm_request_mem_region(ccn->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + ccn->base = devm_ioremap(ccn->dev, res->start, + resource_size(res)); + if (!ccn->base) + return -EFAULT; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) + return -EINVAL; + irq = res->start; + + /* Check if we can use the interrupt */ + writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + if (readl(ccn->base + CCN_MN_ERRINT_STATUS) & + CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) { + /* Can set 'disable' bits, so can acknowledge interrupts */ + writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(ccn->dev), ccn); + if (err) + return err; + + ccn->irq = irq; + } + + + /* Build topology */ + + err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num); + if (err) + return err; + + ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node), + GFP_KERNEL); + ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node), + GFP_KERNEL); + if (!ccn->node || !ccn->xp) + return -ENOMEM; + + err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes); + if (err) + return err; + + return arm_ccn_pmu_init(ccn); +} + +static int arm_ccn_remove(struct platform_device *pdev) +{ + struct arm_ccn *ccn = platform_get_drvdata(pdev); + + arm_ccn_pmu_cleanup(ccn); + + return 0; +} + +static const struct of_device_id arm_ccn_match[] = { + { .compatible = "arm,ccn-502", }, + { .compatible = "arm,ccn-504", }, + {}, +}; +MODULE_DEVICE_TABLE(of, arm_ccn_match); + +static struct platform_driver arm_ccn_driver = { + .driver = { + .name = "arm-ccn", + .of_match_table = arm_ccn_match, + }, + .probe = arm_ccn_probe, + .remove = arm_ccn_remove, +}; + +static int __init arm_ccn_init(void) +{ + int i, ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE, + "perf/arm/ccn:online", NULL, + arm_ccn_pmu_offline_cpu); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) + arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; + + ret = platform_driver_register(&arm_ccn_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); + return ret; +} + +static void __exit arm_ccn_exit(void) +{ + platform_driver_unregister(&arm_ccn_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); +} + +module_init(arm_ccn_init); +module_exit(arm_ccn_exit); + +MODULE_AUTHOR("Pawel Moll "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 3de6be7a3dd8934e59d85fc60a170d4ab2f0a0f2 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Feb 2018 18:51:42 +0000 Subject: drivers/bus: Split Arm CCI driver The arm-cci driver is really two entirely separate drivers; one for MCPM port control and the other for the performance monitors. Since they are already relatively self-contained, let's take the plunge and move the PMU parts out to drivers/perf where they belong these days. For non-MCPM systems this leaves a small dependency on the remaining "bus" stub for initial probing and discovery, but we end up with something that still fits the general pattern of its fellow system PMU drivers to ease future maintenance. Moving code to a new file also offers a perfect excuse to modernise the license/copyright headers and clean up some funky linewraps on the way. Cc: Lorenzo Pieralisi Reviewed-by: Suzuki Poulose Acked-by: Punit Agrawal Signed-off-by: Robin Murphy Signed-off-by: Arnd Bergmann --- drivers/bus/Kconfig | 28 - drivers/bus/arm-cci.c | 1745 +---------------------------------------------- drivers/perf/Kconfig | 26 + drivers/perf/Makefile | 1 + drivers/perf/arm-cci.c | 1747 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 1776 insertions(+), 1771 deletions(-) create mode 100644 drivers/perf/arm-cci.c diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 116446c42c6b..39ddb63be993 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -8,25 +8,10 @@ menu "Bus devices" config ARM_CCI bool -config ARM_CCI_PMU - bool - select ARM_CCI - config ARM_CCI400_COMMON bool select ARM_CCI -config ARM_CCI400_PMU - bool "ARM CCI400 PMU support" - depends on (ARM && CPU_V7) || ARM64 - depends on PERF_EVENTS - select ARM_CCI400_COMMON - select ARM_CCI_PMU - help - Support for PMU events monitoring on the ARM CCI-400 (cache coherent - interconnect). CCI-400 supports counting events related to the - connected slave/master interfaces. - config ARM_CCI400_PORT_CTRL bool depends on ARM && OF && CPU_V7 @@ -35,19 +20,6 @@ config ARM_CCI400_PORT_CTRL Low level power management driver for CCI400 cache coherent interconnect for ARM platforms. -config ARM_CCI5xx_PMU - bool "ARM CCI-500/CCI-550 PMU support" - depends on (ARM && CPU_V7) || ARM64 - depends on PERF_EVENTS - select ARM_CCI_PMU - help - Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache - coherent interconnects. Both of them provide 8 independent event counters, - which can count events pertaining to the slave/master interfaces as well - as the internal events to the CCI. - - If unsure, say Y - config BRCMSTB_GISB_ARB bool "Broadcom STB GISB bus arbiter" depends on ARM || ARM64 || MIPS diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 5426c04fe24b..503c1789dd02 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -16,20 +16,17 @@ #include #include -#include #include #include -#include #include -#include #include #include -#include #include #include -static void __iomem *cci_ctrl_base; +/* Referenced read-only by the PMU driver; see drivers/perf/arm-cci.c */ +void __iomem *cci_ctrl_base; static unsigned long cci_ctrl_phys; #ifdef CONFIG_ARM_CCI400_PORT_CTRL @@ -59,1716 +56,7 @@ static const struct of_device_id arm_cci_matches[] = { {}, }; -#ifdef CONFIG_ARM_CCI_PMU - #define DRIVER_NAME "ARM-CCI" -#define DRIVER_NAME_PMU DRIVER_NAME " PMU" - -#define CCI_PMCR 0x0100 -#define CCI_PID2 0x0fe8 - -#define CCI_PMCR_CEN 0x00000001 -#define CCI_PMCR_NCNT_MASK 0x0000f800 -#define CCI_PMCR_NCNT_SHIFT 11 - -#define CCI_PID2_REV_MASK 0xf0 -#define CCI_PID2_REV_SHIFT 4 - -#define CCI_PMU_EVT_SEL 0x000 -#define CCI_PMU_CNTR 0x004 -#define CCI_PMU_CNTR_CTRL 0x008 -#define CCI_PMU_OVRFLW 0x00c - -#define CCI_PMU_OVRFLW_FLAG 1 - -#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) -#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) -#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) -#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) - -#define CCI_PMU_MAX_HW_CNTRS(model) \ - ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) - -/* Types of interfaces that can generate events */ -enum { - CCI_IF_SLAVE, - CCI_IF_MASTER, -#ifdef CONFIG_ARM_CCI5xx_PMU - CCI_IF_GLOBAL, -#endif - CCI_IF_MAX, -}; - -struct event_range { - u32 min; - u32 max; -}; - -struct cci_pmu_hw_events { - struct perf_event **events; - unsigned long *used_mask; - raw_spinlock_t pmu_lock; -}; - -struct cci_pmu; -/* - * struct cci_pmu_model: - * @fixed_hw_cntrs - Number of fixed event counters - * @num_hw_cntrs - Maximum number of programmable event counters - * @cntr_size - Size of an event counter mapping - */ -struct cci_pmu_model { - char *name; - u32 fixed_hw_cntrs; - u32 num_hw_cntrs; - u32 cntr_size; - struct attribute **format_attrs; - struct attribute **event_attrs; - struct event_range event_ranges[CCI_IF_MAX]; - int (*validate_hw_event)(struct cci_pmu *, unsigned long); - int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); - void (*write_counters)(struct cci_pmu *, unsigned long *); -}; - -static struct cci_pmu_model cci_pmu_models[]; - -struct cci_pmu { - void __iomem *base; - struct pmu pmu; - int nr_irqs; - int *irqs; - unsigned long active_irqs; - const struct cci_pmu_model *model; - struct cci_pmu_hw_events hw_events; - struct platform_device *plat_device; - int num_cntrs; - atomic_t active_events; - struct mutex reserve_mutex; - struct hlist_node node; - cpumask_t cpus; -}; - -#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) - -enum cci_models { -#ifdef CONFIG_ARM_CCI400_PMU - CCI400_R0, - CCI400_R1, -#endif -#ifdef CONFIG_ARM_CCI5xx_PMU - CCI500_R0, - CCI550_R0, -#endif - CCI_MODEL_MAX -}; - -static void pmu_write_counters(struct cci_pmu *cci_pmu, - unsigned long *mask); -static ssize_t cci_pmu_format_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t cci_pmu_event_show(struct device *dev, - struct device_attribute *attr, char *buf); - -#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ - &((struct dev_ext_attribute[]) { \ - { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ - })[0].attr.attr - -#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ - CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) -#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ - CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) - -/* CCI400 PMU Specific definitions */ - -#ifdef CONFIG_ARM_CCI400_PMU - -/* Port ids */ -#define CCI400_PORT_S0 0 -#define CCI400_PORT_S1 1 -#define CCI400_PORT_S2 2 -#define CCI400_PORT_S3 3 -#define CCI400_PORT_S4 4 -#define CCI400_PORT_M0 5 -#define CCI400_PORT_M1 6 -#define CCI400_PORT_M2 7 - -#define CCI400_R1_PX 5 - -/* - * Instead of an event id to monitor CCI cycles, a dedicated counter is - * provided. Use 0xff to represent CCI cycles and hope that no future revisions - * make use of this event in hardware. - */ -enum cci400_perf_events { - CCI400_PMU_CYCLES = 0xff -}; - -#define CCI400_PMU_CYCLE_CNTR_IDX 0 -#define CCI400_PMU_CNTR0_IDX 1 - -/* - * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 - * ports and bits 4:0 are event codes. There are different event codes - * associated with each port type. - * - * Additionally, the range of events associated with the port types changed - * between Rev0 and Rev1. - * - * The constants below define the range of valid codes for each port type for - * the different revisions and are used to validate the event to be monitored. - */ - -#define CCI400_PMU_EVENT_MASK 0xffUL -#define CCI400_PMU_EVENT_SOURCE_SHIFT 5 -#define CCI400_PMU_EVENT_SOURCE_MASK 0x7 -#define CCI400_PMU_EVENT_CODE_SHIFT 0 -#define CCI400_PMU_EVENT_CODE_MASK 0x1f -#define CCI400_PMU_EVENT_SOURCE(event) \ - ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ - CCI400_PMU_EVENT_SOURCE_MASK) -#define CCI400_PMU_EVENT_CODE(event) \ - ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) - -#define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 -#define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 -#define CCI400_R0_MASTER_PORT_MIN_EV 0x14 -#define CCI400_R0_MASTER_PORT_MAX_EV 0x1a - -#define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 -#define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 -#define CCI400_R1_MASTER_PORT_MIN_EV 0x00 -#define CCI400_R1_MASTER_PORT_MAX_EV 0x11 - -#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ - CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ - (unsigned long)_config) - -static ssize_t cci400_pmu_cycle_event_show(struct device *dev, - struct device_attribute *attr, char *buf); - -static struct attribute *cci400_pmu_format_attrs[] = { - CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), - CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), - NULL -}; - -static struct attribute *cci400_r0_pmu_event_attrs[] = { - /* Slave events */ - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), - CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), - CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), - /* Master events */ - CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), - /* Special event for cycles counter */ - CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), - NULL -}; - -static struct attribute *cci400_r1_pmu_event_attrs[] = { - /* Slave events */ - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), - CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), - CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), - /* Master events */ - CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), - CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), - /* Special event for cycles counter */ - CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), - NULL -}; - -static ssize_t cci400_pmu_cycle_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_ext_attribute *eattr = container_of(attr, - struct dev_ext_attribute, attr); - return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); -} - -static int cci400_get_event_idx(struct cci_pmu *cci_pmu, - struct cci_pmu_hw_events *hw, - unsigned long cci_event) -{ - int idx; - - /* cycles event idx is fixed */ - if (cci_event == CCI400_PMU_CYCLES) { - if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) - return -EAGAIN; - - return CCI400_PMU_CYCLE_CNTR_IDX; - } - - for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) - if (!test_and_set_bit(idx, hw->used_mask)) - return idx; - - /* No counters available */ - return -EAGAIN; -} - -static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) -{ - u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); - u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); - int if_type; - - if (hw_event & ~CCI400_PMU_EVENT_MASK) - return -ENOENT; - - if (hw_event == CCI400_PMU_CYCLES) - return hw_event; - - switch (ev_source) { - case CCI400_PORT_S0: - case CCI400_PORT_S1: - case CCI400_PORT_S2: - case CCI400_PORT_S3: - case CCI400_PORT_S4: - /* Slave Interface */ - if_type = CCI_IF_SLAVE; - break; - case CCI400_PORT_M0: - case CCI400_PORT_M1: - case CCI400_PORT_M2: - /* Master Interface */ - if_type = CCI_IF_MASTER; - break; - default: - return -ENOENT; - } - - if (ev_code >= cci_pmu->model->event_ranges[if_type].min && - ev_code <= cci_pmu->model->event_ranges[if_type].max) - return hw_event; - - return -ENOENT; -} - -static int probe_cci400_revision(void) -{ - int rev; - rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; - rev >>= CCI_PID2_REV_SHIFT; - - if (rev < CCI400_R1_PX) - return CCI400_R0; - else - return CCI400_R1; -} - -static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) -{ - if (platform_has_secure_cci_access()) - return &cci_pmu_models[probe_cci400_revision()]; - return NULL; -} -#else /* !CONFIG_ARM_CCI400_PMU */ -static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) -{ - return NULL; -} -#endif /* CONFIG_ARM_CCI400_PMU */ - -#ifdef CONFIG_ARM_CCI5xx_PMU - -/* - * CCI5xx PMU event id is an 9-bit value made of two parts. - * bits [8:5] - Source for the event - * bits [4:0] - Event code (specific to type of interface) - * - * - */ - -/* Port ids */ -#define CCI5xx_PORT_S0 0x0 -#define CCI5xx_PORT_S1 0x1 -#define CCI5xx_PORT_S2 0x2 -#define CCI5xx_PORT_S3 0x3 -#define CCI5xx_PORT_S4 0x4 -#define CCI5xx_PORT_S5 0x5 -#define CCI5xx_PORT_S6 0x6 - -#define CCI5xx_PORT_M0 0x8 -#define CCI5xx_PORT_M1 0x9 -#define CCI5xx_PORT_M2 0xa -#define CCI5xx_PORT_M3 0xb -#define CCI5xx_PORT_M4 0xc -#define CCI5xx_PORT_M5 0xd -#define CCI5xx_PORT_M6 0xe - -#define CCI5xx_PORT_GLOBAL 0xf - -#define CCI5xx_PMU_EVENT_MASK 0x1ffUL -#define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 -#define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf -#define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 -#define CCI5xx_PMU_EVENT_CODE_MASK 0x1f - -#define CCI5xx_PMU_EVENT_SOURCE(event) \ - ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) -#define CCI5xx_PMU_EVENT_CODE(event) \ - ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) - -#define CCI5xx_SLAVE_PORT_MIN_EV 0x00 -#define CCI5xx_SLAVE_PORT_MAX_EV 0x1f -#define CCI5xx_MASTER_PORT_MIN_EV 0x00 -#define CCI5xx_MASTER_PORT_MAX_EV 0x06 -#define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 -#define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f - - -#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ - CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ - (unsigned long) _config) - -static ssize_t cci5xx_pmu_global_event_show(struct device *dev, - struct device_attribute *attr, char *buf); - -static struct attribute *cci5xx_pmu_format_attrs[] = { - CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), - CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), - NULL, -}; - -static struct attribute *cci5xx_pmu_event_attrs[] = { - /* Slave events */ - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), - CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), - CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), - CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), - CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), - CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), - CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), - CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), - CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), - CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), - CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), - CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), - CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), - CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), - CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), - - /* Master events */ - CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), - CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), - CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), - CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), - CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), - CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), - CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), - - /* Global events */ - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), - CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), - NULL -}; - -static ssize_t cci5xx_pmu_global_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_ext_attribute *eattr = container_of(attr, - struct dev_ext_attribute, attr); - /* Global events have single fixed source code */ - return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", - (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); -} - -/* - * CCI500 provides 8 independent event counters that can count - * any of the events available. - * CCI500 PMU event source ids - * 0x0-0x6 - Slave interfaces - * 0x8-0xD - Master interfaces - * 0xf - Global Events - * 0x7,0xe - Reserved - */ -static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, - unsigned long hw_event) -{ - u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); - u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); - int if_type; - - if (hw_event & ~CCI5xx_PMU_EVENT_MASK) - return -ENOENT; - - switch (ev_source) { - case CCI5xx_PORT_S0: - case CCI5xx_PORT_S1: - case CCI5xx_PORT_S2: - case CCI5xx_PORT_S3: - case CCI5xx_PORT_S4: - case CCI5xx_PORT_S5: - case CCI5xx_PORT_S6: - if_type = CCI_IF_SLAVE; - break; - case CCI5xx_PORT_M0: - case CCI5xx_PORT_M1: - case CCI5xx_PORT_M2: - case CCI5xx_PORT_M3: - case CCI5xx_PORT_M4: - case CCI5xx_PORT_M5: - if_type = CCI_IF_MASTER; - break; - case CCI5xx_PORT_GLOBAL: - if_type = CCI_IF_GLOBAL; - break; - default: - return -ENOENT; - } - - if (ev_code >= cci_pmu->model->event_ranges[if_type].min && - ev_code <= cci_pmu->model->event_ranges[if_type].max) - return hw_event; - - return -ENOENT; -} - -/* - * CCI550 provides 8 independent event counters that can count - * any of the events available. - * CCI550 PMU event source ids - * 0x0-0x6 - Slave interfaces - * 0x8-0xe - Master interfaces - * 0xf - Global Events - * 0x7 - Reserved - */ -static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, - unsigned long hw_event) -{ - u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); - u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); - int if_type; - - if (hw_event & ~CCI5xx_PMU_EVENT_MASK) - return -ENOENT; - - switch (ev_source) { - case CCI5xx_PORT_S0: - case CCI5xx_PORT_S1: - case CCI5xx_PORT_S2: - case CCI5xx_PORT_S3: - case CCI5xx_PORT_S4: - case CCI5xx_PORT_S5: - case CCI5xx_PORT_S6: - if_type = CCI_IF_SLAVE; - break; - case CCI5xx_PORT_M0: - case CCI5xx_PORT_M1: - case CCI5xx_PORT_M2: - case CCI5xx_PORT_M3: - case CCI5xx_PORT_M4: - case CCI5xx_PORT_M5: - case CCI5xx_PORT_M6: - if_type = CCI_IF_MASTER; - break; - case CCI5xx_PORT_GLOBAL: - if_type = CCI_IF_GLOBAL; - break; - default: - return -ENOENT; - } - - if (ev_code >= cci_pmu->model->event_ranges[if_type].min && - ev_code <= cci_pmu->model->event_ranges[if_type].max) - return hw_event; - - return -ENOENT; -} - -#endif /* CONFIG_ARM_CCI5xx_PMU */ - -/* - * Program the CCI PMU counters which have PERF_HES_ARCH set - * with the event period and mark them ready before we enable - * PMU. - */ -static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) -{ - int i; - struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; - - DECLARE_BITMAP(mask, cci_pmu->num_cntrs); - - bitmap_zero(mask, cci_pmu->num_cntrs); - for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { - struct perf_event *event = cci_hw->events[i]; - - if (WARN_ON(!event)) - continue; - - /* Leave the events which are not counting */ - if (event->hw.state & PERF_HES_STOPPED) - continue; - if (event->hw.state & PERF_HES_ARCH) { - set_bit(i, mask); - event->hw.state &= ~PERF_HES_ARCH; - } - } - - pmu_write_counters(cci_pmu, mask); -} - -/* Should be called with cci_pmu->hw_events->pmu_lock held */ -static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) -{ - u32 val; - - /* Enable all the PMU counters. */ - val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; - writel(val, cci_ctrl_base + CCI_PMCR); -} - -/* Should be called with cci_pmu->hw_events->pmu_lock held */ -static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) -{ - cci_pmu_sync_counters(cci_pmu); - __cci_pmu_enable_nosync(cci_pmu); -} - -/* Should be called with cci_pmu->hw_events->pmu_lock held */ -static void __cci_pmu_disable(void) -{ - u32 val; - - /* Disable all the PMU counters. */ - val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; - writel(val, cci_ctrl_base + CCI_PMCR); -} - -static ssize_t cci_pmu_format_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_ext_attribute *eattr = container_of(attr, - struct dev_ext_attribute, attr); - return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); -} - -static ssize_t cci_pmu_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_ext_attribute *eattr = container_of(attr, - struct dev_ext_attribute, attr); - /* source parameter is mandatory for normal PMU events */ - return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", - (unsigned long)eattr->var); -} - -static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) -{ - return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); -} - -static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) -{ - return readl_relaxed(cci_pmu->base + - CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); -} - -static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, - int idx, unsigned int offset) -{ - writel_relaxed(value, cci_pmu->base + - CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); -} - -static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) -{ - pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); -} - -static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) -{ - pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); -} - -static bool __maybe_unused -pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) -{ - return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; -} - -static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) -{ - pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); -} - -/* - * For all counters on the CCI-PMU, disable any 'enabled' counters, - * saving the changed counters in the mask, so that we can restore - * it later using pmu_restore_counters. The mask is private to the - * caller. We cannot rely on the used_mask maintained by the CCI_PMU - * as it only tells us if the counter is assigned to perf_event or not. - * The state of the perf_event cannot be locked by the PMU layer, hence - * we check the individual counter status (which can be locked by - * cci_pm->hw_events->pmu_lock). - * - * @mask should be initialised to empty by the caller. - */ -static void __maybe_unused -pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) -{ - int i; - - for (i = 0; i < cci_pmu->num_cntrs; i++) { - if (pmu_counter_is_enabled(cci_pmu, i)) { - set_bit(i, mask); - pmu_disable_counter(cci_pmu, i); - } - } -} - -/* - * Restore the status of the counters. Reversal of the pmu_save_counters(). - * For each counter set in the mask, enable the counter back. - */ -static void __maybe_unused -pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) -{ - int i; - - for_each_set_bit(i, mask, cci_pmu->num_cntrs) - pmu_enable_counter(cci_pmu, i); -} - -/* - * Returns the number of programmable counters actually implemented - * by the cci - */ -static u32 pmu_get_max_counters(void) -{ - return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & - CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; -} - -static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - unsigned long cci_event = event->hw.config_base; - int idx; - - if (cci_pmu->model->get_event_idx) - return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); - - /* Generic code to find an unused idx from the mask */ - for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) - if (!test_and_set_bit(idx, hw->used_mask)) - return idx; - - /* No counters available */ - return -EAGAIN; -} - -static int pmu_map_event(struct perf_event *event) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - - if (event->attr.type < PERF_TYPE_MAX || - !cci_pmu->model->validate_hw_event) - return -ENOENT; - - return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); -} - -static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) -{ - int i; - struct platform_device *pmu_device = cci_pmu->plat_device; - - if (unlikely(!pmu_device)) - return -ENODEV; - - if (cci_pmu->nr_irqs < 1) { - dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); - return -ENODEV; - } - - /* - * Register all available CCI PMU interrupts. In the interrupt handler - * we iterate over the counters checking for interrupt source (the - * overflowing counter) and clear it. - * - * This should allow handling of non-unique interrupt for the counters. - */ - for (i = 0; i < cci_pmu->nr_irqs; i++) { - int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, - "arm-cci-pmu", cci_pmu); - if (err) { - dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", - cci_pmu->irqs[i]); - return err; - } - - set_bit(i, &cci_pmu->active_irqs); - } - - return 0; -} - -static void pmu_free_irq(struct cci_pmu *cci_pmu) -{ - int i; - - for (i = 0; i < cci_pmu->nr_irqs; i++) { - if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) - continue; - - free_irq(cci_pmu->irqs[i], cci_pmu); - } -} - -static u32 pmu_read_counter(struct perf_event *event) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - struct hw_perf_event *hw_counter = &event->hw; - int idx = hw_counter->idx; - u32 value; - - if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { - dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); - return 0; - } - value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); - - return value; -} - -static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) -{ - pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); -} - -static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) -{ - int i; - struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; - - for_each_set_bit(i, mask, cci_pmu->num_cntrs) { - struct perf_event *event = cci_hw->events[i]; - - if (WARN_ON(!event)) - continue; - pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); - } -} - -static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) -{ - if (cci_pmu->model->write_counters) - cci_pmu->model->write_counters(cci_pmu, mask); - else - __pmu_write_counters(cci_pmu, mask); -} - -#ifdef CONFIG_ARM_CCI5xx_PMU - -/* - * CCI-500/CCI-550 has advanced power saving policies, which could gate the - * clocks to the PMU counters, which makes the writes to them ineffective. - * The only way to write to those counters is when the global counters - * are enabled and the particular counter is enabled. - * - * So we do the following : - * - * 1) Disable all the PMU counters, saving their current state - * 2) Enable the global PMU profiling, now that all counters are - * disabled. - * - * For each counter to be programmed, repeat steps 3-7: - * - * 3) Write an invalid event code to the event control register for the - counter, so that the counters are not modified. - * 4) Enable the counter control for the counter. - * 5) Set the counter value - * 6) Disable the counter - * 7) Restore the event in the target counter - * - * 8) Disable the global PMU. - * 9) Restore the status of the rest of the counters. - * - * We choose an event which for CCI-5xx is guaranteed not to count. - * We use the highest possible event code (0x1f) for the master interface 0. - */ -#define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ - (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) -static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) -{ - int i; - DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs); - - bitmap_zero(saved_mask, cci_pmu->num_cntrs); - pmu_save_counters(cci_pmu, saved_mask); - - /* - * Now that all the counters are disabled, we can safely turn the PMU on, - * without syncing the status of the counters - */ - __cci_pmu_enable_nosync(cci_pmu); - - for_each_set_bit(i, mask, cci_pmu->num_cntrs) { - struct perf_event *event = cci_pmu->hw_events.events[i]; - - if (WARN_ON(!event)) - continue; - - pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); - pmu_enable_counter(cci_pmu, i); - pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); - pmu_disable_counter(cci_pmu, i); - pmu_set_event(cci_pmu, i, event->hw.config_base); - } - - __cci_pmu_disable(); - - pmu_restore_counters(cci_pmu, saved_mask); -} - -#endif /* CONFIG_ARM_CCI5xx_PMU */ - -static u64 pmu_event_update(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - u64 delta, prev_raw_count, new_raw_count; - - do { - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = pmu_read_counter(event); - } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count); - - delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; - - local64_add(delta, &event->count); - - return new_raw_count; -} - -static void pmu_read(struct perf_event *event) -{ - pmu_event_update(event); -} - -static void pmu_event_set_period(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - /* - * The CCI PMU counters have a period of 2^32. To account for the - * possiblity of extreme interrupt latency we program for a period of - * half that. Hopefully we can handle the interrupt before another 2^31 - * events occur and the counter overtakes its previous value. - */ - u64 val = 1ULL << 31; - local64_set(&hwc->prev_count, val); - - /* - * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose - * values needs to be sync-ed with the s/w state before the PMU is - * enabled. - * Mark this counter for sync. - */ - hwc->state |= PERF_HES_ARCH; -} - -static irqreturn_t pmu_handle_irq(int irq_num, void *dev) -{ - unsigned long flags; - struct cci_pmu *cci_pmu = dev; - struct cci_pmu_hw_events *events = &cci_pmu->hw_events; - int idx, handled = IRQ_NONE; - - raw_spin_lock_irqsave(&events->pmu_lock, flags); - - /* Disable the PMU while we walk through the counters */ - __cci_pmu_disable(); - /* - * Iterate over counters and update the corresponding perf events. - * This should work regardless of whether we have per-counter overflow - * interrupt or a combined overflow interrupt. - */ - for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { - struct perf_event *event = events->events[idx]; - - if (!event) - continue; - - /* Did this counter overflow? */ - if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & - CCI_PMU_OVRFLW_FLAG)) - continue; - - pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, - CCI_PMU_OVRFLW); - - pmu_event_update(event); - pmu_event_set_period(event); - handled = IRQ_HANDLED; - } - - /* Enable the PMU and sync possibly overflowed counters */ - __cci_pmu_enable_sync(cci_pmu); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); - - return IRQ_RETVAL(handled); -} - -static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) -{ - int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); - if (ret) { - pmu_free_irq(cci_pmu); - return ret; - } - return 0; -} - -static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) -{ - pmu_free_irq(cci_pmu); -} - -static void hw_perf_event_destroy(struct perf_event *event) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - atomic_t *active_events = &cci_pmu->active_events; - struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { - cci_pmu_put_hw(cci_pmu); - mutex_unlock(reserve_mutex); - } -} - -static void cci_pmu_enable(struct pmu *pmu) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(pmu); - struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); - unsigned long flags; - - if (!enabled) - return; - - raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); - __cci_pmu_enable_sync(cci_pmu); - raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); - -} - -static void cci_pmu_disable(struct pmu *pmu) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(pmu); - struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - unsigned long flags; - - raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); - __cci_pmu_disable(); - raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); -} - -/* - * Check if the idx represents a non-programmable counter. - * All the fixed event counters are mapped before the programmable - * counters. - */ -static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) -{ - return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); -} - -static void cci_pmu_start(struct perf_event *event, int pmu_flags) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - unsigned long flags; - - /* - * To handle interrupt latency, we always reprogram the period - * regardlesss of PERF_EF_RELOAD. - */ - if (pmu_flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - - hwc->state = 0; - - if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { - dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); - return; - } - - raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); - - /* Configure the counter unless you are counting a fixed event */ - if (!pmu_fixed_hw_idx(cci_pmu, idx)) - pmu_set_event(cci_pmu, idx, hwc->config_base); - - pmu_event_set_period(event); - pmu_enable_counter(cci_pmu, idx); - - raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); -} - -static void cci_pmu_stop(struct perf_event *event, int pmu_flags) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - if (hwc->state & PERF_HES_STOPPED) - return; - - if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { - dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); - return; - } - - /* - * We always reprogram the counter, so ignore PERF_EF_UPDATE. See - * cci_pmu_start() - */ - pmu_disable_counter(cci_pmu, idx); - pmu_event_update(event); - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; -} - -static int cci_pmu_add(struct perf_event *event, int flags) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - perf_pmu_disable(event->pmu); - - /* If we don't have a space for the counter then finish early. */ - idx = pmu_get_event_idx(hw_events, event); - if (idx < 0) { - err = idx; - goto out; - } - - event->hw.idx = idx; - hw_events->events[idx] = event; - - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - if (flags & PERF_EF_START) - cci_pmu_start(event, PERF_EF_RELOAD); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - perf_pmu_enable(event->pmu); - return err; -} - -static void cci_pmu_del(struct perf_event *event, int flags) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - cci_pmu_stop(event, PERF_EF_UPDATE); - hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - - perf_event_update_userpage(event); -} - -static int -validate_event(struct pmu *cci_pmu, - struct cci_pmu_hw_events *hw_events, - struct perf_event *event) -{ - if (is_software_event(event)) - return 1; - - /* - * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The - * core perf code won't check that the pmu->ctx == leader->ctx - * until after pmu->event_init(event). - */ - if (event->pmu != cci_pmu) - return 0; - - if (event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) - return 1; - - return pmu_get_event_idx(hw_events, event) >= 0; -} - -static int -validate_group(struct perf_event *event) -{ - struct perf_event *sibling, *leader = event->group_leader; - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; - struct cci_pmu_hw_events fake_pmu = { - /* - * Initialise the fake PMU. We only need to populate the - * used_mask for the purposes of validation. - */ - .used_mask = mask, - }; - memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); - - if (!validate_event(event->pmu, &fake_pmu, leader)) - return -EINVAL; - - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(event->pmu, &fake_pmu, sibling)) - return -EINVAL; - } - - if (!validate_event(event->pmu, &fake_pmu, event)) - return -EINVAL; - - return 0; -} - -static int -__hw_perf_event_init(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - int mapping; - - mapping = pmu_map_event(event); - - if (mapping < 0) { - pr_debug("event %x:%llx not supported\n", event->attr.type, - event->attr.config); - return mapping; - } - - /* - * We don't assign an index until we actually place the event onto - * hardware. Use -1 to signify that we haven't decided where to put it - * yet. - */ - hwc->idx = -1; - hwc->config_base = 0; - hwc->config = 0; - hwc->event_base = 0; - - /* - * Store the event encoding into the config_base field. - */ - hwc->config_base |= (unsigned long)mapping; - - /* - * Limit the sample_period to half of the counter width. That way, the - * new counter value is far less likely to overtake the previous one - * unless you have some serious IRQ latency issues. - */ - hwc->sample_period = CCI_PMU_CNTR_MASK >> 1; - hwc->last_period = hwc->sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - - if (event->group_leader != event) { - if (validate_group(event) != 0) - return -EINVAL; - } - - return 0; -} - -static int cci_pmu_event_init(struct perf_event *event) -{ - struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - atomic_t *active_events = &cci_pmu->active_events; - int err = 0; - int cpu; - - if (event->attr.type != event->pmu->type) - return -ENOENT; - - /* Shared by all CPUs, no meaningful state to sample */ - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) - return -EOPNOTSUPP; - - /* We have no filtering of any kind */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - - /* - * Following the example set by other "uncore" PMUs, we accept any CPU - * and rewrite its affinity dynamically rather than having perf core - * handle cpu == -1 and pid == -1 for this case. - * - * The perf core will pin online CPUs for the duration of this call and - * the event being installed into its context, so the PMU's CPU can't - * change under our feet. - */ - cpu = cpumask_first(&cci_pmu->cpus); - if (event->cpu < 0 || cpu < 0) - return -EINVAL; - event->cpu = cpu; - - event->destroy = hw_perf_event_destroy; - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&cci_pmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = cci_pmu_get_hw(cci_pmu); - if (!err) - atomic_inc(active_events); - mutex_unlock(&cci_pmu->reserve_mutex); - } - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; -} - -static ssize_t pmu_cpumask_attr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pmu *pmu = dev_get_drvdata(dev); - struct cci_pmu *cci_pmu = to_cci_pmu(pmu); - - int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", - cpumask_pr_args(&cci_pmu->cpus)); - buf[n++] = '\n'; - buf[n] = '\0'; - return n; -} - -static struct device_attribute pmu_cpumask_attr = - __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); - -static struct attribute *pmu_attrs[] = { - &pmu_cpumask_attr.attr, - NULL, -}; - -static struct attribute_group pmu_attr_group = { - .attrs = pmu_attrs, -}; - -static struct attribute_group pmu_format_attr_group = { - .name = "format", - .attrs = NULL, /* Filled in cci_pmu_init_attrs */ -}; - -static struct attribute_group pmu_event_attr_group = { - .name = "events", - .attrs = NULL, /* Filled in cci_pmu_init_attrs */ -}; - -static const struct attribute_group *pmu_attr_groups[] = { - &pmu_attr_group, - &pmu_format_attr_group, - &pmu_event_attr_group, - NULL -}; - -static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) -{ - const struct cci_pmu_model *model = cci_pmu->model; - char *name = model->name; - u32 num_cntrs; - - pmu_event_attr_group.attrs = model->event_attrs; - pmu_format_attr_group.attrs = model->format_attrs; - - cci_pmu->pmu = (struct pmu) { - .name = cci_pmu->model->name, - .task_ctx_nr = perf_invalid_context, - .pmu_enable = cci_pmu_enable, - .pmu_disable = cci_pmu_disable, - .event_init = cci_pmu_event_init, - .add = cci_pmu_add, - .del = cci_pmu_del, - .start = cci_pmu_start, - .stop = cci_pmu_stop, - .read = pmu_read, - .attr_groups = pmu_attr_groups, - }; - - cci_pmu->plat_device = pdev; - num_cntrs = pmu_get_max_counters(); - if (num_cntrs > cci_pmu->model->num_hw_cntrs) { - dev_warn(&pdev->dev, - "PMU implements more counters(%d) than supported by" - " the model(%d), truncated.", - num_cntrs, cci_pmu->model->num_hw_cntrs); - num_cntrs = cci_pmu->model->num_hw_cntrs; - } - cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; - - return perf_pmu_register(&cci_pmu->pmu, name, -1); -} - -static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) -{ - struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node); - unsigned int target; - - if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) - return 0; - target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) - return 0; - /* - * TODO: migrate context once core races on event->ctx have - * been fixed. - */ - cpumask_set_cpu(target, &cci_pmu->cpus); - return 0; -} - -static struct cci_pmu_model cci_pmu_models[] = { -#ifdef CONFIG_ARM_CCI400_PMU - [CCI400_R0] = { - .name = "CCI_400", - .fixed_hw_cntrs = 1, /* Cycle counter */ - .num_hw_cntrs = 4, - .cntr_size = SZ_4K, - .format_attrs = cci400_pmu_format_attrs, - .event_attrs = cci400_r0_pmu_event_attrs, - .event_ranges = { - [CCI_IF_SLAVE] = { - CCI400_R0_SLAVE_PORT_MIN_EV, - CCI400_R0_SLAVE_PORT_MAX_EV, - }, - [CCI_IF_MASTER] = { - CCI400_R0_MASTER_PORT_MIN_EV, - CCI400_R0_MASTER_PORT_MAX_EV, - }, - }, - .validate_hw_event = cci400_validate_hw_event, - .get_event_idx = cci400_get_event_idx, - }, - [CCI400_R1] = { - .name = "CCI_400_r1", - .fixed_hw_cntrs = 1, /* Cycle counter */ - .num_hw_cntrs = 4, - .cntr_size = SZ_4K, - .format_attrs = cci400_pmu_format_attrs, - .event_attrs = cci400_r1_pmu_event_attrs, - .event_ranges = { - [CCI_IF_SLAVE] = { - CCI400_R1_SLAVE_PORT_MIN_EV, - CCI400_R1_SLAVE_PORT_MAX_EV, - }, - [CCI_IF_MASTER] = { - CCI400_R1_MASTER_PORT_MIN_EV, - CCI400_R1_MASTER_PORT_MAX_EV, - }, - }, - .validate_hw_event = cci400_validate_hw_event, - .get_event_idx = cci400_get_event_idx, - }, -#endif -#ifdef CONFIG_ARM_CCI5xx_PMU - [CCI500_R0] = { - .name = "CCI_500", - .fixed_hw_cntrs = 0, - .num_hw_cntrs = 8, - .cntr_size = SZ_64K, - .format_attrs = cci5xx_pmu_format_attrs, - .event_attrs = cci5xx_pmu_event_attrs, - .event_ranges = { - [CCI_IF_SLAVE] = { - CCI5xx_SLAVE_PORT_MIN_EV, - CCI5xx_SLAVE_PORT_MAX_EV, - }, - [CCI_IF_MASTER] = { - CCI5xx_MASTER_PORT_MIN_EV, - CCI5xx_MASTER_PORT_MAX_EV, - }, - [CCI_IF_GLOBAL] = { - CCI5xx_GLOBAL_PORT_MIN_EV, - CCI5xx_GLOBAL_PORT_MAX_EV, - }, - }, - .validate_hw_event = cci500_validate_hw_event, - .write_counters = cci5xx_pmu_write_counters, - }, - [CCI550_R0] = { - .name = "CCI_550", - .fixed_hw_cntrs = 0, - .num_hw_cntrs = 8, - .cntr_size = SZ_64K, - .format_attrs = cci5xx_pmu_format_attrs, - .event_attrs = cci5xx_pmu_event_attrs, - .event_ranges = { - [CCI_IF_SLAVE] = { - CCI5xx_SLAVE_PORT_MIN_EV, - CCI5xx_SLAVE_PORT_MAX_EV, - }, - [CCI_IF_MASTER] = { - CCI5xx_MASTER_PORT_MIN_EV, - CCI5xx_MASTER_PORT_MAX_EV, - }, - [CCI_IF_GLOBAL] = { - CCI5xx_GLOBAL_PORT_MIN_EV, - CCI5xx_GLOBAL_PORT_MAX_EV, - }, - }, - .validate_hw_event = cci550_validate_hw_event, - .write_counters = cci5xx_pmu_write_counters, - }, -#endif -}; - -static const struct of_device_id arm_cci_pmu_matches[] = { -#ifdef CONFIG_ARM_CCI400_PMU - { - .compatible = "arm,cci-400-pmu", - .data = NULL, - }, - { - .compatible = "arm,cci-400-pmu,r0", - .data = &cci_pmu_models[CCI400_R0], - }, - { - .compatible = "arm,cci-400-pmu,r1", - .data = &cci_pmu_models[CCI400_R1], - }, -#endif -#ifdef CONFIG_ARM_CCI5xx_PMU - { - .compatible = "arm,cci-500-pmu,r0", - .data = &cci_pmu_models[CCI500_R0], - }, - { - .compatible = "arm,cci-550-pmu,r0", - .data = &cci_pmu_models[CCI550_R0], - }, -#endif - {}, -}; - -static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) -{ - const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, - pdev->dev.of_node); - if (!match) - return NULL; - if (match->data) - return match->data; - - dev_warn(&pdev->dev, "DEPRECATED compatible property," - "requires secure access to CCI registers"); - return probe_cci_model(pdev); -} - -static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) -{ - int i; - - for (i = 0; i < nr_irqs; i++) - if (irq == irqs[i]) - return true; - - return false; -} - -static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) -{ - struct cci_pmu *cci_pmu; - const struct cci_pmu_model *model; - - /* - * All allocations are devm_* hence we don't have to free - * them explicitly on an error, as it would end up in driver - * detach. - */ - model = get_cci_model(pdev); - if (!model) { - dev_warn(&pdev->dev, "CCI PMU version not supported\n"); - return ERR_PTR(-ENODEV); - } - - cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); - if (!cci_pmu) - return ERR_PTR(-ENOMEM); - - cci_pmu->model = model; - cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), - sizeof(*cci_pmu->irqs), GFP_KERNEL); - if (!cci_pmu->irqs) - return ERR_PTR(-ENOMEM); - cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, - CCI_PMU_MAX_HW_CNTRS(model), - sizeof(*cci_pmu->hw_events.events), - GFP_KERNEL); - if (!cci_pmu->hw_events.events) - return ERR_PTR(-ENOMEM); - cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, - BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), - sizeof(*cci_pmu->hw_events.used_mask), - GFP_KERNEL); - if (!cci_pmu->hw_events.used_mask) - return ERR_PTR(-ENOMEM); - - return cci_pmu; -} - - -static int cci_pmu_probe(struct platform_device *pdev) -{ - struct resource *res; - struct cci_pmu *cci_pmu; - int i, ret, irq; - - cci_pmu = cci_pmu_alloc(pdev); - if (IS_ERR(cci_pmu)) - return PTR_ERR(cci_pmu); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(cci_pmu->base)) - return -ENOMEM; - - /* - * CCI PMU has one overflow interrupt per counter; but some may be tied - * together to a common interrupt. - */ - cci_pmu->nr_irqs = 0; - for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { - irq = platform_get_irq(pdev, i); - if (irq < 0) - break; - - if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) - continue; - - cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; - } - - /* - * Ensure that the device tree has as many interrupts as the number - * of counters. - */ - if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { - dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", - i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); - return -EINVAL; - } - - raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); - mutex_init(&cci_pmu->reserve_mutex); - atomic_set(&cci_pmu->active_events, 0); - cpumask_set_cpu(get_cpu(), &cci_pmu->cpus); - - ret = cci_pmu_init(cci_pmu, pdev); - if (ret) { - put_cpu(); - return ret; - } - - cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, - &cci_pmu->node); - put_cpu(); - pr_info("ARM %s PMU driver probed", cci_pmu->model->name); - return 0; -} static int cci_platform_probe(struct platform_device *pdev) { @@ -1778,14 +66,6 @@ static int cci_platform_probe(struct platform_device *pdev) return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); } -static struct platform_driver cci_pmu_driver = { - .driver = { - .name = DRIVER_NAME_PMU, - .of_match_table = arm_cci_pmu_matches, - }, - .probe = cci_pmu_probe, -}; - static struct platform_driver cci_platform_driver = { .driver = { .name = DRIVER_NAME, @@ -1796,30 +76,9 @@ static struct platform_driver cci_platform_driver = { static int __init cci_platform_init(void) { - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE, - "perf/arm/cci:online", NULL, - cci_pmu_offline_cpu); - if (ret) - return ret; - - ret = platform_driver_register(&cci_pmu_driver); - if (ret) - return ret; - return platform_driver_register(&cci_platform_driver); } -#else /* !CONFIG_ARM_CCI_PMU */ - -static int __init cci_platform_init(void) -{ - return 0; -} - -#endif /* CONFIG_ARM_CCI_PMU */ - #ifdef CONFIG_ARM_CCI400_PORT_CTRL #define CCI_PORT_CTRL 0x0 diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 331b6d992b5a..28bb5a029558 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -5,6 +5,32 @@ menu "Performance monitor support" depends on PERF_EVENTS +config ARM_CCI_PMU + bool + select ARM_CCI + +config ARM_CCI400_PMU + bool "ARM CCI400 PMU support" + depends on (ARM && CPU_V7) || ARM64 + select ARM_CCI400_COMMON + select ARM_CCI_PMU + help + Support for PMU events monitoring on the ARM CCI-400 (cache coherent + interconnect). CCI-400 supports counting events related to the + connected slave/master interfaces. + +config ARM_CCI5xx_PMU + bool "ARM CCI-500/CCI-550 PMU support" + depends on (ARM && CPU_V7) || ARM64 + select ARM_CCI_PMU + help + Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache + coherent interconnects. Both of them provide 8 independent event counters, + which can count events pertaining to the slave/master interfaces as well + as the internal events to the CCI. + + If unsure, say Y + config ARM_CCN tristate "ARM CCN driver support" depends on ARM || ARM64 diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 5004abee0f3a..b3902bd37d53 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o obj-$(CONFIG_ARM_CCN) += arm-ccn.o obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c new file mode 100644 index 000000000000..d5f8c750fd41 --- /dev/null +++ b/drivers/perf/arm-cci.c @@ -0,0 +1,1747 @@ +// SPDX-License-Identifier: GPL-2.0 +// CCI Cache Coherent Interconnect PMU driver +// Copyright (C) 2013-2018 Arm Ltd. +// Author: Punit Agrawal , Suzuki Poulose + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void __iomem *const cci_ctrl_base; + +#define DRIVER_NAME "ARM-CCI PMU" + +#define CCI_PMCR 0x0100 +#define CCI_PID2 0x0fe8 + +#define CCI_PMCR_CEN 0x00000001 +#define CCI_PMCR_NCNT_MASK 0x0000f800 +#define CCI_PMCR_NCNT_SHIFT 11 + +#define CCI_PID2_REV_MASK 0xf0 +#define CCI_PID2_REV_SHIFT 4 + +#define CCI_PMU_EVT_SEL 0x000 +#define CCI_PMU_CNTR 0x004 +#define CCI_PMU_CNTR_CTRL 0x008 +#define CCI_PMU_OVRFLW 0x00c + +#define CCI_PMU_OVRFLW_FLAG 1 + +#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) +#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) +#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) +#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) + +#define CCI_PMU_MAX_HW_CNTRS(model) \ + ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) + +/* Types of interfaces that can generate events */ +enum { + CCI_IF_SLAVE, + CCI_IF_MASTER, +#ifdef CONFIG_ARM_CCI5xx_PMU + CCI_IF_GLOBAL, +#endif + CCI_IF_MAX, +}; + +struct event_range { + u32 min; + u32 max; +}; + +struct cci_pmu_hw_events { + struct perf_event **events; + unsigned long *used_mask; + raw_spinlock_t pmu_lock; +}; + +struct cci_pmu; +/* + * struct cci_pmu_model: + * @fixed_hw_cntrs - Number of fixed event counters + * @num_hw_cntrs - Maximum number of programmable event counters + * @cntr_size - Size of an event counter mapping + */ +struct cci_pmu_model { + char *name; + u32 fixed_hw_cntrs; + u32 num_hw_cntrs; + u32 cntr_size; + struct attribute **format_attrs; + struct attribute **event_attrs; + struct event_range event_ranges[CCI_IF_MAX]; + int (*validate_hw_event)(struct cci_pmu *, unsigned long); + int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); + void (*write_counters)(struct cci_pmu *, unsigned long *); +}; + +static struct cci_pmu_model cci_pmu_models[]; + +struct cci_pmu { + void __iomem *base; + struct pmu pmu; + int nr_irqs; + int *irqs; + unsigned long active_irqs; + const struct cci_pmu_model *model; + struct cci_pmu_hw_events hw_events; + struct platform_device *plat_device; + int num_cntrs; + atomic_t active_events; + struct mutex reserve_mutex; + struct hlist_node node; + cpumask_t cpus; +}; + +#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) + +enum cci_models { +#ifdef CONFIG_ARM_CCI400_PMU + CCI400_R0, + CCI400_R1, +#endif +#ifdef CONFIG_ARM_CCI5xx_PMU + CCI500_R0, + CCI550_R0, +#endif + CCI_MODEL_MAX +}; + +static void pmu_write_counters(struct cci_pmu *cci_pmu, + unsigned long *mask); +static ssize_t cci_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t cci_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ + &((struct dev_ext_attribute[]) { \ + { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ + })[0].attr.attr + +#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) +#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) + +/* CCI400 PMU Specific definitions */ + +#ifdef CONFIG_ARM_CCI400_PMU + +/* Port ids */ +#define CCI400_PORT_S0 0 +#define CCI400_PORT_S1 1 +#define CCI400_PORT_S2 2 +#define CCI400_PORT_S3 3 +#define CCI400_PORT_S4 4 +#define CCI400_PORT_M0 5 +#define CCI400_PORT_M1 6 +#define CCI400_PORT_M2 7 + +#define CCI400_R1_PX 5 + +/* + * Instead of an event id to monitor CCI cycles, a dedicated counter is + * provided. Use 0xff to represent CCI cycles and hope that no future revisions + * make use of this event in hardware. + */ +enum cci400_perf_events { + CCI400_PMU_CYCLES = 0xff +}; + +#define CCI400_PMU_CYCLE_CNTR_IDX 0 +#define CCI400_PMU_CNTR0_IDX 1 + +/* + * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 + * ports and bits 4:0 are event codes. There are different event codes + * associated with each port type. + * + * Additionally, the range of events associated with the port types changed + * between Rev0 and Rev1. + * + * The constants below define the range of valid codes for each port type for + * the different revisions and are used to validate the event to be monitored. + */ + +#define CCI400_PMU_EVENT_MASK 0xffUL +#define CCI400_PMU_EVENT_SOURCE_SHIFT 5 +#define CCI400_PMU_EVENT_SOURCE_MASK 0x7 +#define CCI400_PMU_EVENT_CODE_SHIFT 0 +#define CCI400_PMU_EVENT_CODE_MASK 0x1f +#define CCI400_PMU_EVENT_SOURCE(event) \ + ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ + CCI400_PMU_EVENT_SOURCE_MASK) +#define CCI400_PMU_EVENT_CODE(event) \ + ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) + +#define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 +#define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 +#define CCI400_R0_MASTER_PORT_MIN_EV 0x14 +#define CCI400_R0_MASTER_PORT_MAX_EV 0x1a + +#define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 +#define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 +#define CCI400_R1_MASTER_PORT_MIN_EV 0x00 +#define CCI400_R1_MASTER_PORT_MAX_EV 0x11 + +#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ + (unsigned long)_config) + +static ssize_t cci400_pmu_cycle_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static struct attribute *cci400_pmu_format_attrs[] = { + CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), + CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), + NULL +}; + +static struct attribute *cci400_r0_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), + /* Special event for cycles counter */ + CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), + NULL +}; + +static struct attribute *cci400_r1_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), + /* Special event for cycles counter */ + CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), + NULL +}; + +static ssize_t cci400_pmu_cycle_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static int cci400_get_event_idx(struct cci_pmu *cci_pmu, + struct cci_pmu_hw_events *hw, + unsigned long cci_event) +{ + int idx; + + /* cycles event idx is fixed */ + if (cci_event == CCI400_PMU_CYCLES) { + if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) + return -EAGAIN; + + return CCI400_PMU_CYCLE_CNTR_IDX; + } + + for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) + if (!test_and_set_bit(idx, hw->used_mask)) + return idx; + + /* No counters available */ + return -EAGAIN; +} + +static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) +{ + u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); + u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI400_PMU_EVENT_MASK) + return -ENOENT; + + if (hw_event == CCI400_PMU_CYCLES) + return hw_event; + + switch (ev_source) { + case CCI400_PORT_S0: + case CCI400_PORT_S1: + case CCI400_PORT_S2: + case CCI400_PORT_S3: + case CCI400_PORT_S4: + /* Slave Interface */ + if_type = CCI_IF_SLAVE; + break; + case CCI400_PORT_M0: + case CCI400_PORT_M1: + case CCI400_PORT_M2: + /* Master Interface */ + if_type = CCI_IF_MASTER; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} + +static int probe_cci400_revision(void) +{ + int rev; + rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; + rev >>= CCI_PID2_REV_SHIFT; + + if (rev < CCI400_R1_PX) + return CCI400_R0; + else + return CCI400_R1; +} + +static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) +{ + if (platform_has_secure_cci_access()) + return &cci_pmu_models[probe_cci400_revision()]; + return NULL; +} +#else /* !CONFIG_ARM_CCI400_PMU */ +static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) +{ + return NULL; +} +#endif /* CONFIG_ARM_CCI400_PMU */ + +#ifdef CONFIG_ARM_CCI5xx_PMU + +/* + * CCI5xx PMU event id is an 9-bit value made of two parts. + * bits [8:5] - Source for the event + * bits [4:0] - Event code (specific to type of interface) + * + * + */ + +/* Port ids */ +#define CCI5xx_PORT_S0 0x0 +#define CCI5xx_PORT_S1 0x1 +#define CCI5xx_PORT_S2 0x2 +#define CCI5xx_PORT_S3 0x3 +#define CCI5xx_PORT_S4 0x4 +#define CCI5xx_PORT_S5 0x5 +#define CCI5xx_PORT_S6 0x6 + +#define CCI5xx_PORT_M0 0x8 +#define CCI5xx_PORT_M1 0x9 +#define CCI5xx_PORT_M2 0xa +#define CCI5xx_PORT_M3 0xb +#define CCI5xx_PORT_M4 0xc +#define CCI5xx_PORT_M5 0xd +#define CCI5xx_PORT_M6 0xe + +#define CCI5xx_PORT_GLOBAL 0xf + +#define CCI5xx_PMU_EVENT_MASK 0x1ffUL +#define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 +#define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf +#define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 +#define CCI5xx_PMU_EVENT_CODE_MASK 0x1f + +#define CCI5xx_PMU_EVENT_SOURCE(event) \ + ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) +#define CCI5xx_PMU_EVENT_CODE(event) \ + ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) + +#define CCI5xx_SLAVE_PORT_MIN_EV 0x00 +#define CCI5xx_SLAVE_PORT_MAX_EV 0x1f +#define CCI5xx_MASTER_PORT_MIN_EV 0x00 +#define CCI5xx_MASTER_PORT_MAX_EV 0x06 +#define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 +#define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f + + +#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ + (unsigned long) _config) + +static ssize_t cci5xx_pmu_global_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static struct attribute *cci5xx_pmu_format_attrs[] = { + CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), + CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), + NULL, +}; + +static struct attribute *cci5xx_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), + CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), + CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), + + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), + + /* Global events */ + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), + NULL +}; + +static ssize_t cci5xx_pmu_global_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + /* Global events have single fixed source code */ + return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", + (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); +} + +/* + * CCI500 provides 8 independent event counters that can count + * any of the events available. + * CCI500 PMU event source ids + * 0x0-0x6 - Slave interfaces + * 0x8-0xD - Master interfaces + * 0xf - Global Events + * 0x7,0xe - Reserved + */ +static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, + unsigned long hw_event) +{ + u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); + u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI5xx_PMU_EVENT_MASK) + return -ENOENT; + + switch (ev_source) { + case CCI5xx_PORT_S0: + case CCI5xx_PORT_S1: + case CCI5xx_PORT_S2: + case CCI5xx_PORT_S3: + case CCI5xx_PORT_S4: + case CCI5xx_PORT_S5: + case CCI5xx_PORT_S6: + if_type = CCI_IF_SLAVE; + break; + case CCI5xx_PORT_M0: + case CCI5xx_PORT_M1: + case CCI5xx_PORT_M2: + case CCI5xx_PORT_M3: + case CCI5xx_PORT_M4: + case CCI5xx_PORT_M5: + if_type = CCI_IF_MASTER; + break; + case CCI5xx_PORT_GLOBAL: + if_type = CCI_IF_GLOBAL; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} + +/* + * CCI550 provides 8 independent event counters that can count + * any of the events available. + * CCI550 PMU event source ids + * 0x0-0x6 - Slave interfaces + * 0x8-0xe - Master interfaces + * 0xf - Global Events + * 0x7 - Reserved + */ +static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, + unsigned long hw_event) +{ + u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); + u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI5xx_PMU_EVENT_MASK) + return -ENOENT; + + switch (ev_source) { + case CCI5xx_PORT_S0: + case CCI5xx_PORT_S1: + case CCI5xx_PORT_S2: + case CCI5xx_PORT_S3: + case CCI5xx_PORT_S4: + case CCI5xx_PORT_S5: + case CCI5xx_PORT_S6: + if_type = CCI_IF_SLAVE; + break; + case CCI5xx_PORT_M0: + case CCI5xx_PORT_M1: + case CCI5xx_PORT_M2: + case CCI5xx_PORT_M3: + case CCI5xx_PORT_M4: + case CCI5xx_PORT_M5: + case CCI5xx_PORT_M6: + if_type = CCI_IF_MASTER; + break; + case CCI5xx_PORT_GLOBAL: + if_type = CCI_IF_GLOBAL; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} + +#endif /* CONFIG_ARM_CCI5xx_PMU */ + +/* + * Program the CCI PMU counters which have PERF_HES_ARCH set + * with the event period and mark them ready before we enable + * PMU. + */ +static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) +{ + int i; + struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; + + DECLARE_BITMAP(mask, cci_pmu->num_cntrs); + + bitmap_zero(mask, cci_pmu->num_cntrs); + for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { + struct perf_event *event = cci_hw->events[i]; + + if (WARN_ON(!event)) + continue; + + /* Leave the events which are not counting */ + if (event->hw.state & PERF_HES_STOPPED) + continue; + if (event->hw.state & PERF_HES_ARCH) { + set_bit(i, mask); + event->hw.state &= ~PERF_HES_ARCH; + } + } + + pmu_write_counters(cci_pmu, mask); +} + +/* Should be called with cci_pmu->hw_events->pmu_lock held */ +static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) +{ + u32 val; + + /* Enable all the PMU counters. */ + val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; + writel(val, cci_ctrl_base + CCI_PMCR); +} + +/* Should be called with cci_pmu->hw_events->pmu_lock held */ +static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) +{ + cci_pmu_sync_counters(cci_pmu); + __cci_pmu_enable_nosync(cci_pmu); +} + +/* Should be called with cci_pmu->hw_events->pmu_lock held */ +static void __cci_pmu_disable(void) +{ + u32 val; + + /* Disable all the PMU counters. */ + val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; + writel(val, cci_ctrl_base + CCI_PMCR); +} + +static ssize_t cci_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); +} + +static ssize_t cci_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + /* source parameter is mandatory for normal PMU events */ + return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", + (unsigned long)eattr->var); +} + +static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) +{ + return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); +} + +static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) +{ + return readl_relaxed(cci_pmu->base + + CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); +} + +static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, + int idx, unsigned int offset) +{ + writel_relaxed(value, cci_pmu->base + + CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); +} + +static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) +{ + pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); +} + +static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) +{ + pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); +} + +static bool __maybe_unused +pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) +{ + return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; +} + +static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) +{ + pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); +} + +/* + * For all counters on the CCI-PMU, disable any 'enabled' counters, + * saving the changed counters in the mask, so that we can restore + * it later using pmu_restore_counters. The mask is private to the + * caller. We cannot rely on the used_mask maintained by the CCI_PMU + * as it only tells us if the counter is assigned to perf_event or not. + * The state of the perf_event cannot be locked by the PMU layer, hence + * we check the individual counter status (which can be locked by + * cci_pm->hw_events->pmu_lock). + * + * @mask should be initialised to empty by the caller. + */ +static void __maybe_unused +pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + + for (i = 0; i < cci_pmu->num_cntrs; i++) { + if (pmu_counter_is_enabled(cci_pmu, i)) { + set_bit(i, mask); + pmu_disable_counter(cci_pmu, i); + } + } +} + +/* + * Restore the status of the counters. Reversal of the pmu_save_counters(). + * For each counter set in the mask, enable the counter back. + */ +static void __maybe_unused +pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + + for_each_set_bit(i, mask, cci_pmu->num_cntrs) + pmu_enable_counter(cci_pmu, i); +} + +/* + * Returns the number of programmable counters actually implemented + * by the cci + */ +static u32 pmu_get_max_counters(void) +{ + return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & + CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; +} + +static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + unsigned long cci_event = event->hw.config_base; + int idx; + + if (cci_pmu->model->get_event_idx) + return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); + + /* Generic code to find an unused idx from the mask */ + for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) + if (!test_and_set_bit(idx, hw->used_mask)) + return idx; + + /* No counters available */ + return -EAGAIN; +} + +static int pmu_map_event(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + + if (event->attr.type < PERF_TYPE_MAX || + !cci_pmu->model->validate_hw_event) + return -ENOENT; + + return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); +} + +static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) +{ + int i; + struct platform_device *pmu_device = cci_pmu->plat_device; + + if (unlikely(!pmu_device)) + return -ENODEV; + + if (cci_pmu->nr_irqs < 1) { + dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); + return -ENODEV; + } + + /* + * Register all available CCI PMU interrupts. In the interrupt handler + * we iterate over the counters checking for interrupt source (the + * overflowing counter) and clear it. + * + * This should allow handling of non-unique interrupt for the counters. + */ + for (i = 0; i < cci_pmu->nr_irqs; i++) { + int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, + "arm-cci-pmu", cci_pmu); + if (err) { + dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", + cci_pmu->irqs[i]); + return err; + } + + set_bit(i, &cci_pmu->active_irqs); + } + + return 0; +} + +static void pmu_free_irq(struct cci_pmu *cci_pmu) +{ + int i; + + for (i = 0; i < cci_pmu->nr_irqs; i++) { + if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) + continue; + + free_irq(cci_pmu->irqs[i], cci_pmu); + } +} + +static u32 pmu_read_counter(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct hw_perf_event *hw_counter = &event->hw; + int idx = hw_counter->idx; + u32 value; + + if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return 0; + } + value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); + + return value; +} + +static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) +{ + pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); +} + +static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; + + for_each_set_bit(i, mask, cci_pmu->num_cntrs) { + struct perf_event *event = cci_hw->events[i]; + + if (WARN_ON(!event)) + continue; + pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); + } +} + +static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + if (cci_pmu->model->write_counters) + cci_pmu->model->write_counters(cci_pmu, mask); + else + __pmu_write_counters(cci_pmu, mask); +} + +#ifdef CONFIG_ARM_CCI5xx_PMU + +/* + * CCI-500/CCI-550 has advanced power saving policies, which could gate the + * clocks to the PMU counters, which makes the writes to them ineffective. + * The only way to write to those counters is when the global counters + * are enabled and the particular counter is enabled. + * + * So we do the following : + * + * 1) Disable all the PMU counters, saving their current state + * 2) Enable the global PMU profiling, now that all counters are + * disabled. + * + * For each counter to be programmed, repeat steps 3-7: + * + * 3) Write an invalid event code to the event control register for the + counter, so that the counters are not modified. + * 4) Enable the counter control for the counter. + * 5) Set the counter value + * 6) Disable the counter + * 7) Restore the event in the target counter + * + * 8) Disable the global PMU. + * 9) Restore the status of the rest of the counters. + * + * We choose an event which for CCI-5xx is guaranteed not to count. + * We use the highest possible event code (0x1f) for the master interface 0. + */ +#define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ + (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) +static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs); + + bitmap_zero(saved_mask, cci_pmu->num_cntrs); + pmu_save_counters(cci_pmu, saved_mask); + + /* + * Now that all the counters are disabled, we can safely turn the PMU on, + * without syncing the status of the counters + */ + __cci_pmu_enable_nosync(cci_pmu); + + for_each_set_bit(i, mask, cci_pmu->num_cntrs) { + struct perf_event *event = cci_pmu->hw_events.events[i]; + + if (WARN_ON(!event)) + continue; + + pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); + pmu_enable_counter(cci_pmu, i); + pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); + pmu_disable_counter(cci_pmu, i); + pmu_set_event(cci_pmu, i, event->hw.config_base); + } + + __cci_pmu_disable(); + + pmu_restore_counters(cci_pmu, saved_mask); +} + +#endif /* CONFIG_ARM_CCI5xx_PMU */ + +static u64 pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; + + local64_add(delta, &event->count); + + return new_raw_count; +} + +static void pmu_read(struct perf_event *event) +{ + pmu_event_update(event); +} + +static void pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + /* + * The CCI PMU counters have a period of 2^32. To account for the + * possiblity of extreme interrupt latency we program for a period of + * half that. Hopefully we can handle the interrupt before another 2^31 + * events occur and the counter overtakes its previous value. + */ + u64 val = 1ULL << 31; + local64_set(&hwc->prev_count, val); + + /* + * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose + * values needs to be sync-ed with the s/w state before the PMU is + * enabled. + * Mark this counter for sync. + */ + hwc->state |= PERF_HES_ARCH; +} + +static irqreturn_t pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long flags; + struct cci_pmu *cci_pmu = dev; + struct cci_pmu_hw_events *events = &cci_pmu->hw_events; + int idx, handled = IRQ_NONE; + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable the PMU while we walk through the counters */ + __cci_pmu_disable(); + /* + * Iterate over counters and update the corresponding perf events. + * This should work regardless of whether we have per-counter overflow + * interrupt or a combined overflow interrupt. + */ + for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { + struct perf_event *event = events->events[idx]; + + if (!event) + continue; + + /* Did this counter overflow? */ + if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & + CCI_PMU_OVRFLW_FLAG)) + continue; + + pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, + CCI_PMU_OVRFLW); + + pmu_event_update(event); + pmu_event_set_period(event); + handled = IRQ_HANDLED; + } + + /* Enable the PMU and sync possibly overflowed counters */ + __cci_pmu_enable_sync(cci_pmu); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + + return IRQ_RETVAL(handled); +} + +static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) +{ + int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); + if (ret) { + pmu_free_irq(cci_pmu); + return ret; + } + return 0; +} + +static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) +{ + pmu_free_irq(cci_pmu); +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + atomic_t *active_events = &cci_pmu->active_events; + struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; + + if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { + cci_pmu_put_hw(cci_pmu); + mutex_unlock(reserve_mutex); + } +} + +static void cci_pmu_enable(struct pmu *pmu) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); + unsigned long flags; + + if (!enabled) + return; + + raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); + __cci_pmu_enable_sync(cci_pmu); + raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); + +} + +static void cci_pmu_disable(struct pmu *pmu) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + unsigned long flags; + + raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); + __cci_pmu_disable(); + raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); +} + +/* + * Check if the idx represents a non-programmable counter. + * All the fixed event counters are mapped before the programmable + * counters. + */ +static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) +{ + return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); +} + +static void cci_pmu_start(struct perf_event *event, int pmu_flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + unsigned long flags; + + /* + * To handle interrupt latency, we always reprogram the period + * regardlesss of PERF_EF_RELOAD. + */ + if (pmu_flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return; + } + + raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); + + /* Configure the counter unless you are counting a fixed event */ + if (!pmu_fixed_hw_idx(cci_pmu, idx)) + pmu_set_event(cci_pmu, idx, hwc->config_base); + + pmu_event_set_period(event); + pmu_enable_counter(cci_pmu, idx); + + raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); +} + +static void cci_pmu_stop(struct perf_event *event, int pmu_flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (hwc->state & PERF_HES_STOPPED) + return; + + if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return; + } + + /* + * We always reprogram the counter, so ignore PERF_EF_UPDATE. See + * cci_pmu_start() + */ + pmu_disable_counter(cci_pmu, idx); + pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int cci_pmu_add(struct perf_event *event, int flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + perf_pmu_disable(event->pmu); + + /* If we don't have a space for the counter then finish early. */ + idx = pmu_get_event_idx(hw_events, event); + if (idx < 0) { + err = idx; + goto out; + } + + event->hw.idx = idx; + hw_events->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + cci_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static void cci_pmu_del(struct perf_event *event, int flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + cci_pmu_stop(event, PERF_EF_UPDATE); + hw_events->events[idx] = NULL; + clear_bit(idx, hw_events->used_mask); + + perf_event_update_userpage(event); +} + +static int validate_event(struct pmu *cci_pmu, + struct cci_pmu_hw_events *hw_events, + struct perf_event *event) +{ + if (is_software_event(event)) + return 1; + + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != cci_pmu) + return 0; + + if (event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) + return 1; + + return pmu_get_event_idx(hw_events, event) >= 0; +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; + struct cci_pmu_hw_events fake_pmu = { + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + .used_mask = mask, + }; + memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); + + if (!validate_event(event->pmu, &fake_pmu, leader)) + return -EINVAL; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(event->pmu, &fake_pmu, sibling)) + return -EINVAL; + } + + if (!validate_event(event->pmu, &fake_pmu, event)) + return -EINVAL; + + return 0; +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int mapping; + + mapping = pmu_map_event(event); + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. + */ + hwc->idx = -1; + hwc->config_base = 0; + hwc->config = 0; + hwc->event_base = 0; + + /* + * Store the event encoding into the config_base field. + */ + hwc->config_base |= (unsigned long)mapping; + + /* + * Limit the sample_period to half of the counter width. That way, the + * new counter value is far less likely to overtake the previous one + * unless you have some serious IRQ latency issues. + */ + hwc->sample_period = CCI_PMU_CNTR_MASK >> 1; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + + if (event->group_leader != event) { + if (validate_group(event) != 0) + return -EINVAL; + } + + return 0; +} + +static int cci_pmu_event_init(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + atomic_t *active_events = &cci_pmu->active_events; + int err = 0; + int cpu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Shared by all CPUs, no meaningful state to sample */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + /* We have no filtering of any kind */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest) + return -EINVAL; + + /* + * Following the example set by other "uncore" PMUs, we accept any CPU + * and rewrite its affinity dynamically rather than having perf core + * handle cpu == -1 and pid == -1 for this case. + * + * The perf core will pin online CPUs for the duration of this call and + * the event being installed into its context, so the PMU's CPU can't + * change under our feet. + */ + cpu = cpumask_first(&cci_pmu->cpus); + if (event->cpu < 0 || cpu < 0) + return -EINVAL; + event->cpu = cpu; + + event->destroy = hw_perf_event_destroy; + if (!atomic_inc_not_zero(active_events)) { + mutex_lock(&cci_pmu->reserve_mutex); + if (atomic_read(active_events) == 0) + err = cci_pmu_get_hw(cci_pmu); + if (!err) + atomic_inc(active_events); + mutex_unlock(&cci_pmu->reserve_mutex); + } + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static ssize_t pmu_cpumask_attr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct cci_pmu *cci_pmu = to_cci_pmu(pmu); + + int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", + cpumask_pr_args(&cci_pmu->cpus)); + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} + +static struct device_attribute pmu_cpumask_attr = + __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); + +static struct attribute *pmu_attrs[] = { + &pmu_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group pmu_attr_group = { + .attrs = pmu_attrs, +}; + +static struct attribute_group pmu_format_attr_group = { + .name = "format", + .attrs = NULL, /* Filled in cci_pmu_init_attrs */ +}; + +static struct attribute_group pmu_event_attr_group = { + .name = "events", + .attrs = NULL, /* Filled in cci_pmu_init_attrs */ +}; + +static const struct attribute_group *pmu_attr_groups[] = { + &pmu_attr_group, + &pmu_format_attr_group, + &pmu_event_attr_group, + NULL +}; + +static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) +{ + const struct cci_pmu_model *model = cci_pmu->model; + char *name = model->name; + u32 num_cntrs; + + pmu_event_attr_group.attrs = model->event_attrs; + pmu_format_attr_group.attrs = model->format_attrs; + + cci_pmu->pmu = (struct pmu) { + .name = cci_pmu->model->name, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = cci_pmu_enable, + .pmu_disable = cci_pmu_disable, + .event_init = cci_pmu_event_init, + .add = cci_pmu_add, + .del = cci_pmu_del, + .start = cci_pmu_start, + .stop = cci_pmu_stop, + .read = pmu_read, + .attr_groups = pmu_attr_groups, + }; + + cci_pmu->plat_device = pdev; + num_cntrs = pmu_get_max_counters(); + if (num_cntrs > cci_pmu->model->num_hw_cntrs) { + dev_warn(&pdev->dev, + "PMU implements more counters(%d) than supported by" + " the model(%d), truncated.", + num_cntrs, cci_pmu->model->num_hw_cntrs); + num_cntrs = cci_pmu->model->num_hw_cntrs; + } + cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; + + return perf_pmu_register(&cci_pmu->pmu, name, -1); +} + +static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node); + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + /* + * TODO: migrate context once core races on event->ctx have + * been fixed. + */ + cpumask_set_cpu(target, &cci_pmu->cpus); + return 0; +} + +static struct cci_pmu_model cci_pmu_models[] = { +#ifdef CONFIG_ARM_CCI400_PMU + [CCI400_R0] = { + .name = "CCI_400", + .fixed_hw_cntrs = 1, /* Cycle counter */ + .num_hw_cntrs = 4, + .cntr_size = SZ_4K, + .format_attrs = cci400_pmu_format_attrs, + .event_attrs = cci400_r0_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI400_R0_SLAVE_PORT_MIN_EV, + CCI400_R0_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI400_R0_MASTER_PORT_MIN_EV, + CCI400_R0_MASTER_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci400_validate_hw_event, + .get_event_idx = cci400_get_event_idx, + }, + [CCI400_R1] = { + .name = "CCI_400_r1", + .fixed_hw_cntrs = 1, /* Cycle counter */ + .num_hw_cntrs = 4, + .cntr_size = SZ_4K, + .format_attrs = cci400_pmu_format_attrs, + .event_attrs = cci400_r1_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI400_R1_SLAVE_PORT_MIN_EV, + CCI400_R1_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI400_R1_MASTER_PORT_MIN_EV, + CCI400_R1_MASTER_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci400_validate_hw_event, + .get_event_idx = cci400_get_event_idx, + }, +#endif +#ifdef CONFIG_ARM_CCI5xx_PMU + [CCI500_R0] = { + .name = "CCI_500", + .fixed_hw_cntrs = 0, + .num_hw_cntrs = 8, + .cntr_size = SZ_64K, + .format_attrs = cci5xx_pmu_format_attrs, + .event_attrs = cci5xx_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI5xx_SLAVE_PORT_MIN_EV, + CCI5xx_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI5xx_MASTER_PORT_MIN_EV, + CCI5xx_MASTER_PORT_MAX_EV, + }, + [CCI_IF_GLOBAL] = { + CCI5xx_GLOBAL_PORT_MIN_EV, + CCI5xx_GLOBAL_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci500_validate_hw_event, + .write_counters = cci5xx_pmu_write_counters, + }, + [CCI550_R0] = { + .name = "CCI_550", + .fixed_hw_cntrs = 0, + .num_hw_cntrs = 8, + .cntr_size = SZ_64K, + .format_attrs = cci5xx_pmu_format_attrs, + .event_attrs = cci5xx_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI5xx_SLAVE_PORT_MIN_EV, + CCI5xx_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI5xx_MASTER_PORT_MIN_EV, + CCI5xx_MASTER_PORT_MAX_EV, + }, + [CCI_IF_GLOBAL] = { + CCI5xx_GLOBAL_PORT_MIN_EV, + CCI5xx_GLOBAL_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci550_validate_hw_event, + .write_counters = cci5xx_pmu_write_counters, + }, +#endif +}; + +static const struct of_device_id arm_cci_pmu_matches[] = { +#ifdef CONFIG_ARM_CCI400_PMU + { + .compatible = "arm,cci-400-pmu", + .data = NULL, + }, + { + .compatible = "arm,cci-400-pmu,r0", + .data = &cci_pmu_models[CCI400_R0], + }, + { + .compatible = "arm,cci-400-pmu,r1", + .data = &cci_pmu_models[CCI400_R1], + }, +#endif +#ifdef CONFIG_ARM_CCI5xx_PMU + { + .compatible = "arm,cci-500-pmu,r0", + .data = &cci_pmu_models[CCI500_R0], + }, + { + .compatible = "arm,cci-550-pmu,r0", + .data = &cci_pmu_models[CCI550_R0], + }, +#endif + {}, +}; + +static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) +{ + const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, + pdev->dev.of_node); + if (!match) + return NULL; + if (match->data) + return match->data; + + dev_warn(&pdev->dev, "DEPRECATED compatible property," + "requires secure access to CCI registers"); + return probe_cci_model(pdev); +} + +static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) + if (irq == irqs[i]) + return true; + + return false; +} + +static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) +{ + struct cci_pmu *cci_pmu; + const struct cci_pmu_model *model; + + /* + * All allocations are devm_* hence we don't have to free + * them explicitly on an error, as it would end up in driver + * detach. + */ + model = get_cci_model(pdev); + if (!model) { + dev_warn(&pdev->dev, "CCI PMU version not supported\n"); + return ERR_PTR(-ENODEV); + } + + cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); + if (!cci_pmu) + return ERR_PTR(-ENOMEM); + + cci_pmu->model = model; + cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), + sizeof(*cci_pmu->irqs), GFP_KERNEL); + if (!cci_pmu->irqs) + return ERR_PTR(-ENOMEM); + cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, + CCI_PMU_MAX_HW_CNTRS(model), + sizeof(*cci_pmu->hw_events.events), + GFP_KERNEL); + if (!cci_pmu->hw_events.events) + return ERR_PTR(-ENOMEM); + cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, + BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), + sizeof(*cci_pmu->hw_events.used_mask), + GFP_KERNEL); + if (!cci_pmu->hw_events.used_mask) + return ERR_PTR(-ENOMEM); + + return cci_pmu; +} + +static int cci_pmu_probe(struct platform_device *pdev) +{ + struct resource *res; + struct cci_pmu *cci_pmu; + int i, ret, irq; + + cci_pmu = cci_pmu_alloc(pdev); + if (IS_ERR(cci_pmu)) + return PTR_ERR(cci_pmu); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cci_pmu->base)) + return -ENOMEM; + + /* + * CCI PMU has one overflow interrupt per counter; but some may be tied + * together to a common interrupt. + */ + cci_pmu->nr_irqs = 0; + for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) + break; + + if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) + continue; + + cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; + } + + /* + * Ensure that the device tree has as many interrupts as the number + * of counters. + */ + if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { + dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", + i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); + return -EINVAL; + } + + raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); + mutex_init(&cci_pmu->reserve_mutex); + atomic_set(&cci_pmu->active_events, 0); + cpumask_set_cpu(get_cpu(), &cci_pmu->cpus); + + ret = cci_pmu_init(cci_pmu, pdev); + if (ret) { + put_cpu(); + return ret; + } + + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, + &cci_pmu->node); + put_cpu(); + pr_info("ARM %s PMU driver probed", cci_pmu->model->name); + return 0; +} + +static struct platform_driver cci_pmu_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = arm_cci_pmu_matches, + }, + .probe = cci_pmu_probe, +}; + +static int __init cci_platform_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE, + "perf/arm/cci:online", NULL, + cci_pmu_offline_cpu); + if (ret) + return ret; + + return platform_driver_register(&cci_pmu_driver); +} + +device_initcall(cci_platform_init); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ARM CCI PMU support"); -- cgit v1.2.3 From 03057f2626e955ebea88a668a6d7d699f836e5c0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Feb 2018 18:51:43 +0000 Subject: perf/arm-cci: Simplify CPU hotplug Realistically, systems with multiple CCIs are unlikely to ever exist, and since the driver only actually supports a single instance anyway there's really no need to do the multi-instance hotplug state dance. Take the opportunity to simplify the hotplug-related code all over, addressing the context-migration TODO in the process for good measure. Acked-by: Punit Agrawal Acked-by: Will Deacon Signed-off-by: Robin Murphy Signed-off-by: Arnd Bergmann --- drivers/perf/arm-cci.c | 56 +++++++++++++++++--------------------------------- 1 file changed, 19 insertions(+), 37 deletions(-) diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index d5f8c750fd41..242623fbce1f 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -90,6 +90,7 @@ static struct cci_pmu_model cci_pmu_models[]; struct cci_pmu { void __iomem *base; struct pmu pmu; + int cpu; int nr_irqs; int *irqs; unsigned long active_irqs; @@ -99,12 +100,12 @@ struct cci_pmu { int num_cntrs; atomic_t active_events; struct mutex reserve_mutex; - struct hlist_node node; - cpumask_t cpus; }; #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) +static struct cci_pmu *g_cci_pmu; + enum cci_models { #ifdef CONFIG_ARM_CCI400_PMU CCI400_R0, @@ -1325,7 +1326,6 @@ static int cci_pmu_event_init(struct perf_event *event) struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); atomic_t *active_events = &cci_pmu->active_events; int err = 0; - int cpu; if (event->attr.type != event->pmu->type) return -ENOENT; @@ -1352,10 +1352,9 @@ static int cci_pmu_event_init(struct perf_event *event) * the event being installed into its context, so the PMU's CPU can't * change under our feet. */ - cpu = cpumask_first(&cci_pmu->cpus); - if (event->cpu < 0 || cpu < 0) + if (event->cpu < 0) return -EINVAL; - event->cpu = cpu; + event->cpu = cci_pmu->cpu; event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(active_events)) { @@ -1382,11 +1381,7 @@ static ssize_t pmu_cpumask_attr_show(struct device *dev, struct pmu *pmu = dev_get_drvdata(dev); struct cci_pmu *cci_pmu = to_cci_pmu(pmu); - int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", - cpumask_pr_args(&cci_pmu->cpus)); - buf[n++] = '\n'; - buf[n] = '\0'; - return n; + return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); } static struct device_attribute pmu_cpumask_attr = @@ -1455,21 +1450,19 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) return perf_pmu_register(&cci_pmu->pmu, name, -1); } -static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +static int cci_pmu_offline_cpu(unsigned int cpu) { - struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node); - unsigned int target; + int target; - if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) + if (!g_cci_pmu || cpu != g_cci_pmu->cpu) return 0; + target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) return 0; - /* - * TODO: migrate context once core races on event->ctx have - * been fixed. - */ - cpumask_set_cpu(target, &cci_pmu->cpus); + + perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); + g_cci_pmu->cpu = target; return 0; } @@ -1706,7 +1699,7 @@ static int cci_pmu_probe(struct platform_device *pdev) raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); mutex_init(&cci_pmu->reserve_mutex); atomic_set(&cci_pmu->active_events, 0); - cpumask_set_cpu(get_cpu(), &cci_pmu->cpus); + cci_pmu->cpu = get_cpu(); ret = cci_pmu_init(cci_pmu, pdev); if (ret) { @@ -1714,9 +1707,11 @@ static int cci_pmu_probe(struct platform_device *pdev) return ret; } - cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, - &cci_pmu->node); + cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, + "perf/arm/cci:online", NULL, + cci_pmu_offline_cpu); put_cpu(); + g_cci_pmu = cci_pmu; pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; } @@ -1729,19 +1724,6 @@ static struct platform_driver cci_pmu_driver = { .probe = cci_pmu_probe, }; -static int __init cci_platform_init(void) -{ - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE, - "perf/arm/cci:online", NULL, - cci_pmu_offline_cpu); - if (ret) - return ret; - - return platform_driver_register(&cci_pmu_driver); -} - -device_initcall(cci_platform_init); +builtin_platform_driver(cci_pmu_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ARM CCI PMU support"); -- cgit v1.2.3 From 3ee5e821f3a1affd406ff1031f4cf6d7b4f81ab3 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Feb 2018 18:51:44 +0000 Subject: bus/arm-cci: Streamline devicetree handling a bit Rather than iterating over child nodes explicitly testing for availability, we can just use the other helper which already subsumes that check. Also, the availability check is already NULL-safe, so get rid of a redundant check in cci_probe(), too. Acked-by: Punit Agrawal Acked-by: Will Deacon Signed-off-by: Robin Murphy Signed-off-by: Arnd Bergmann --- drivers/bus/arm-cci.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 503c1789dd02..23dc0b890d0c 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -448,13 +448,10 @@ static int cci_probe_ports(struct device_node *np) if (!ports) return -ENOMEM; - for_each_child_of_node(np, cp) { + for_each_available_child_of_node(np, cp) { if (!of_match_node(arm_cci_ctrl_if_matches, cp)) continue; - if (!of_device_is_available(cp)) - continue; - i = nb_ace + nb_ace_lite; if (i >= nb_cci_ports) @@ -534,7 +531,7 @@ static int cci_probe(void) struct resource res; np = of_find_matching_node(NULL, arm_cci_matches); - if(!np || !of_device_is_available(np)) + if (!of_device_is_available(np)) return -ENODEV; ret = of_address_to_resource(np, 0, &res); -- cgit v1.2.3 From 32837954db462ecc28051923109ef6e4a221f2b2 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Feb 2018 18:51:45 +0000 Subject: perf/arm-cci: Clean up model discovery Since I am the self-appointed of_device_get_match_data() police, it's only right that I should clean up this driver while I'm otherwise touching it. This also reveals that we're passing around a struct platform_device in places where we only ever care about its regular device, so straighten that out in the process. Acked-by: Punit Agrawal Acked-by: Will Deacon Signed-off-by: Robin Murphy Signed-off-by: Arnd Bergmann --- drivers/perf/arm-cci.c | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 242623fbce1f..336f1455cf96 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -371,14 +372,14 @@ static int probe_cci400_revision(void) return CCI400_R1; } -static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) +static const struct cci_pmu_model *probe_cci_model(void) { if (platform_has_secure_cci_access()) return &cci_pmu_models[probe_cci400_revision()]; return NULL; } #else /* !CONFIG_ARM_CCI400_PMU */ -static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) +static inline struct cci_pmu_model *probe_cci_model(void) { return NULL; } @@ -1589,20 +1590,6 @@ static const struct of_device_id arm_cci_pmu_matches[] = { {}, }; -static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) -{ - const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, - pdev->dev.of_node); - if (!match) - return NULL; - if (match->data) - return match->data; - - dev_warn(&pdev->dev, "DEPRECATED compatible property," - "requires secure access to CCI registers"); - return probe_cci_model(pdev); -} - static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) { int i; @@ -1614,7 +1601,7 @@ static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) return false; } -static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) +static struct cci_pmu *cci_pmu_alloc(struct device *dev) { struct cci_pmu *cci_pmu; const struct cci_pmu_model *model; @@ -1624,28 +1611,33 @@ static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) * them explicitly on an error, as it would end up in driver * detach. */ - model = get_cci_model(pdev); + model = of_device_get_match_data(dev); + if (!model) { + dev_warn(dev, + "DEPRECATED compatible property, requires secure access to CCI registers"); + model = probe_cci_model(); + } if (!model) { - dev_warn(&pdev->dev, "CCI PMU version not supported\n"); + dev_warn(dev, "CCI PMU version not supported\n"); return ERR_PTR(-ENODEV); } - cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); + cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); if (!cci_pmu) return ERR_PTR(-ENOMEM); cci_pmu->model = model; - cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), + cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), sizeof(*cci_pmu->irqs), GFP_KERNEL); if (!cci_pmu->irqs) return ERR_PTR(-ENOMEM); - cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, + cci_pmu->hw_events.events = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), sizeof(*cci_pmu->hw_events.events), GFP_KERNEL); if (!cci_pmu->hw_events.events) return ERR_PTR(-ENOMEM); - cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, + cci_pmu->hw_events.used_mask = devm_kcalloc(dev, BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), sizeof(*cci_pmu->hw_events.used_mask), GFP_KERNEL); @@ -1661,7 +1653,7 @@ static int cci_pmu_probe(struct platform_device *pdev) struct cci_pmu *cci_pmu; int i, ret, irq; - cci_pmu = cci_pmu_alloc(pdev); + cci_pmu = cci_pmu_alloc(&pdev->dev); if (IS_ERR(cci_pmu)) return PTR_ERR(cci_pmu); -- cgit v1.2.3 From e9c112c94b014b581380d370d3fa2f1d23d07cc0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 15 Feb 2018 18:51:46 +0000 Subject: perf/arm-cci: Untangle global cci_ctrl_base Depending directly on the bus driver's global cci_ctrl_base variable is a little unpleasant, and exporting it to allow the PMU driver to be modular would be even more so. Let's make things a little better abstracted by adding the control register block to the cci_pmu instance data alongside the PMU register block, and communicating the mapped address from the bus driver via platform data. It's not practical to try the same thing for the bus driver itself, given that the globals are entangled with the hairy assembly code for port control, so we leave them be there. It would however be prudent to move them to the __ro_after_init section in passing, since the addresses really should never be changing once set. Signed-off-by: Robin Murphy Signed-off-by: Arnd Bergmann --- drivers/bus/arm-cci.c | 17 +++++++++++++---- drivers/perf/arm-cci.c | 47 ++++++++++++++++++++++++----------------------- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 23dc0b890d0c..443e4c3fd357 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -25,9 +25,8 @@ #include #include -/* Referenced read-only by the PMU driver; see drivers/perf/arm-cci.c */ -void __iomem *cci_ctrl_base; -static unsigned long cci_ctrl_phys; +static void __iomem *cci_ctrl_base __ro_after_init; +static unsigned long cci_ctrl_phys __ro_after_init; #ifdef CONFIG_ARM_CCI400_PORT_CTRL struct cci_nb_ports { @@ -56,6 +55,15 @@ static const struct of_device_id arm_cci_matches[] = { {}, }; +static const struct of_dev_auxdata arm_cci_auxdata[] = { + OF_DEV_AUXDATA("arm,cci-400-pmu", 0, NULL, &cci_ctrl_base), + OF_DEV_AUXDATA("arm,cci-400-pmu,r0", 0, NULL, &cci_ctrl_base), + OF_DEV_AUXDATA("arm,cci-400-pmu,r1", 0, NULL, &cci_ctrl_base), + OF_DEV_AUXDATA("arm,cci-500-pmu,r0", 0, NULL, &cci_ctrl_base), + OF_DEV_AUXDATA("arm,cci-550-pmu,r0", 0, NULL, &cci_ctrl_base), + {} +}; + #define DRIVER_NAME "ARM-CCI" static int cci_platform_probe(struct platform_device *pdev) @@ -63,7 +71,8 @@ static int cci_platform_probe(struct platform_device *pdev) if (!cci_probed()) return -ENODEV; - return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + return of_platform_populate(pdev->dev.of_node, NULL, + arm_cci_auxdata, &pdev->dev); } static struct platform_driver cci_platform_driver = { diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 336f1455cf96..67a74c48c7c2 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -16,8 +16,6 @@ #include #include -extern void __iomem *const cci_ctrl_base; - #define DRIVER_NAME "ARM-CCI PMU" #define CCI_PMCR 0x0100 @@ -90,6 +88,7 @@ static struct cci_pmu_model cci_pmu_models[]; struct cci_pmu { void __iomem *base; + void __iomem *ctrl_base; struct pmu pmu; int cpu; int nr_irqs; @@ -360,10 +359,10 @@ static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_ev return -ENOENT; } -static int probe_cci400_revision(void) +static int probe_cci400_revision(struct cci_pmu *cci_pmu) { int rev; - rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; + rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; rev >>= CCI_PID2_REV_SHIFT; if (rev < CCI400_R1_PX) @@ -372,14 +371,14 @@ static int probe_cci400_revision(void) return CCI400_R1; } -static const struct cci_pmu_model *probe_cci_model(void) +static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) { if (platform_has_secure_cci_access()) - return &cci_pmu_models[probe_cci400_revision()]; + return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; return NULL; } #else /* !CONFIG_ARM_CCI400_PMU */ -static inline struct cci_pmu_model *probe_cci_model(void) +static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) { return NULL; } @@ -662,8 +661,8 @@ static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) u32 val; /* Enable all the PMU counters. */ - val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; - writel(val, cci_ctrl_base + CCI_PMCR); + val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; + writel(val, cci_pmu->ctrl_base + CCI_PMCR); } /* Should be called with cci_pmu->hw_events->pmu_lock held */ @@ -674,13 +673,13 @@ static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) } /* Should be called with cci_pmu->hw_events->pmu_lock held */ -static void __cci_pmu_disable(void) +static void __cci_pmu_disable(struct cci_pmu *cci_pmu) { u32 val; /* Disable all the PMU counters. */ - val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; - writel(val, cci_ctrl_base + CCI_PMCR); + val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; + writel(val, cci_pmu->ctrl_base + CCI_PMCR); } static ssize_t cci_pmu_format_show(struct device *dev, @@ -782,9 +781,9 @@ pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) * Returns the number of programmable counters actually implemented * by the cci */ -static u32 pmu_get_max_counters(void) +static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) { - return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & + return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; } @@ -965,7 +964,7 @@ static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *ma pmu_set_event(cci_pmu, i, event->hw.config_base); } - __cci_pmu_disable(); + __cci_pmu_disable(cci_pmu); pmu_restore_counters(cci_pmu, saved_mask); } @@ -1026,7 +1025,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable the PMU while we walk through the counters */ - __cci_pmu_disable(); + __cci_pmu_disable(cci_pmu); /* * Iterate over counters and update the corresponding perf events. * This should work regardless of whether we have per-counter overflow @@ -1108,7 +1107,7 @@ static void cci_pmu_disable(struct pmu *pmu) unsigned long flags; raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); - __cci_pmu_disable(); + __cci_pmu_disable(cci_pmu); raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); } @@ -1438,7 +1437,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) }; cci_pmu->plat_device = pdev; - num_cntrs = pmu_get_max_counters(); + num_cntrs = pmu_get_max_counters(cci_pmu); if (num_cntrs > cci_pmu->model->num_hw_cntrs) { dev_warn(&pdev->dev, "PMU implements more counters(%d) than supported by" @@ -1611,21 +1610,23 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev) * them explicitly on an error, as it would end up in driver * detach. */ + cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); + if (!cci_pmu) + return ERR_PTR(-ENOMEM); + + cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; + model = of_device_get_match_data(dev); if (!model) { dev_warn(dev, "DEPRECATED compatible property, requires secure access to CCI registers"); - model = probe_cci_model(); + model = probe_cci_model(cci_pmu); } if (!model) { dev_warn(dev, "CCI PMU version not supported\n"); return ERR_PTR(-ENODEV); } - cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); - if (!cci_pmu) - return ERR_PTR(-ENOMEM); - cci_pmu->model = model; cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), sizeof(*cci_pmu->irqs), GFP_KERNEL); -- cgit v1.2.3 From e0270c8089ae65f7e5180b56a9fe1fdc7435a326 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 6 Mar 2018 11:47:49 +0000 Subject: firmware: meson-sm: rework meson_sm_init to use module_platform_driver_probe Commit 3aa0582fdb82 ("of: platform: populate /firmware/ node from of_platform_default_populate_init()") takes care of populating all the devices under the /firmware/ node in of_platform_default_populate_init() This patch reworks meson_sm_init to use module_platform_driver_probe as the platform device is populated. Cc: Carlo Caione Cc: Kevin Hilman Cc: linux-amlogic@lists.infradead.org Signed-off-by: Sudeep Holla Signed-off-by: Kevin Hilman --- drivers/firmware/meson/meson_sm.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index ff204421117b..0ec2ca87318c 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -17,8 +17,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -217,21 +219,11 @@ static const struct of_device_id meson_sm_ids[] = { { /* sentinel */ }, }; -int __init meson_sm_init(void) +static int __init meson_sm_probe(struct platform_device *pdev) { const struct meson_sm_chip *chip; - const struct of_device_id *matched_np; - struct device_node *np; - np = of_find_matching_node_and_match(NULL, meson_sm_ids, &matched_np); - if (!np) - return -ENODEV; - - chip = matched_np->data; - if (!chip) { - pr_err("unable to setup secure-monitor data\n"); - goto out; - } + chip = of_match_device(meson_sm_ids, &pdev->dev)->data; if (chip->cmd_shmem_in_base) { fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base, @@ -257,4 +249,11 @@ out_in_base: out: return -EINVAL; } -device_initcall(meson_sm_init); + +static struct platform_driver meson_sm_driver = { + .driver = { + .name = "meson-sm", + .of_match_table = of_match_ptr(meson_sm_ids), + }, +}; +module_platform_driver_probe(meson_sm_driver, meson_sm_probe); -- cgit v1.2.3 From 1abb081e41a718d73183b0e1b76bfff66e92f7e1 Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Tue, 20 Feb 2018 13:58:06 +0200 Subject: firmware: tegra: Simplify channel management The Tegra194 BPMP only implements 5 channels (4 to BPMP, 1 to CCPLEX), and they are not placed contiguously in memory. The current channel management in the BPMP driver does not support this. Simplify and refactor the channel management such that only one atomic transmit channel and one receive channel are supported, and channels are not required to be placed contiguously in memory. The same configuration also works on T186 so we end up with less code. Signed-off-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/firmware/tegra/bpmp.c | 142 +++++++++++++++++++----------------------- include/soc/tegra/bpmp.h | 4 +- 2 files changed, 66 insertions(+), 80 deletions(-) diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index a7f461f2e650..81bc2dce8626 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -70,57 +70,20 @@ void tegra_bpmp_put(struct tegra_bpmp *bpmp) } EXPORT_SYMBOL_GPL(tegra_bpmp_put); -static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel) -{ - return channel - channel->bpmp->channels; -} - static int tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel) { struct tegra_bpmp *bpmp = channel->bpmp; - unsigned int offset, count; + unsigned int count; int index; - offset = bpmp->soc->channels.thread.offset; count = bpmp->soc->channels.thread.count; - index = tegra_bpmp_channel_get_index(channel); - if (index < 0) - return index; - - if (index < offset || index >= offset + count) + index = channel - channel->bpmp->threaded_channels; + if (index < 0 || index >= count) return -EINVAL; - return index - offset; -} - -static struct tegra_bpmp_channel * -tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index) -{ - unsigned int offset = bpmp->soc->channels.thread.offset; - unsigned int count = bpmp->soc->channels.thread.count; - - if (index >= count) - return NULL; - - return &bpmp->channels[offset + index]; -} - -static struct tegra_bpmp_channel * -tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp) -{ - unsigned int offset = bpmp->soc->channels.cpu_tx.offset; - - return &bpmp->channels[offset + smp_processor_id()]; -} - -static struct tegra_bpmp_channel * -tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp) -{ - unsigned int offset = bpmp->soc->channels.cpu_rx.offset; - - return &bpmp->channels[offset]; + return index; } static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg) @@ -271,11 +234,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq, goto unlock; } - channel = tegra_bpmp_channel_get_thread(bpmp, index); - if (!channel) { - err = -EINVAL; - goto unlock; - } + channel = &bpmp->threaded_channels[index]; if (!tegra_bpmp_master_free(channel)) { err = -EBUSY; @@ -328,12 +287,18 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, if (!tegra_bpmp_message_valid(msg)) return -EINVAL; - channel = tegra_bpmp_channel_get_tx(bpmp); + channel = bpmp->tx_channel; + + spin_lock(&bpmp->atomic_tx_lock); err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK, msg->tx.data, msg->tx.size); - if (err < 0) + if (err < 0) { + spin_unlock(&bpmp->atomic_tx_lock); return err; + } + + spin_unlock(&bpmp->atomic_tx_lock); err = mbox_send_message(bpmp->mbox.channel, NULL); if (err < 0) @@ -607,7 +572,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) unsigned int i, count; unsigned long *busy; - channel = tegra_bpmp_channel_get_rx(bpmp); + channel = bpmp->rx_channel; count = bpmp->soc->channels.thread.count; busy = bpmp->threaded.busy; @@ -619,9 +584,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) for_each_set_bit(i, busy, count) { struct tegra_bpmp_channel *channel; - channel = tegra_bpmp_channel_get_thread(bpmp, i); - if (!channel) - continue; + channel = &bpmp->threaded_channels[i]; if (tegra_bpmp_master_acked(channel)) { tegra_bpmp_channel_signal(channel); @@ -698,7 +661,6 @@ static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel) static int tegra_bpmp_probe(struct platform_device *pdev) { - struct tegra_bpmp_channel *channel; struct tegra_bpmp *bpmp; unsigned int i; char tag[32]; @@ -758,24 +720,45 @@ static int tegra_bpmp_probe(struct platform_device *pdev) goto free_rx; } - bpmp->num_channels = bpmp->soc->channels.cpu_tx.count + - bpmp->soc->channels.thread.count + - bpmp->soc->channels.cpu_rx.count; + spin_lock_init(&bpmp->atomic_tx_lock); + bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel), + GFP_KERNEL); + if (!bpmp->tx_channel) { + err = -ENOMEM; + goto free_rx; + } - bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels, - sizeof(*channel), GFP_KERNEL); - if (!bpmp->channels) { + bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel), + GFP_KERNEL); + if (!bpmp->rx_channel) { err = -ENOMEM; goto free_rx; } - /* message channel initialization */ - for (i = 0; i < bpmp->num_channels; i++) { - struct tegra_bpmp_channel *channel = &bpmp->channels[i]; + bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count, + sizeof(*bpmp->threaded_channels), + GFP_KERNEL); + if (!bpmp->threaded_channels) { + err = -ENOMEM; + goto free_rx; + } - err = tegra_bpmp_channel_init(channel, bpmp, i); + err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp, + bpmp->soc->channels.cpu_tx.offset); + if (err < 0) + goto free_rx; + + err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp, + bpmp->soc->channels.cpu_rx.offset); + if (err < 0) + goto cleanup_tx_channel; + + for (i = 0; i < bpmp->threaded.count; i++) { + err = tegra_bpmp_channel_init( + &bpmp->threaded_channels[i], bpmp, + bpmp->soc->channels.thread.offset + i); if (err < 0) - goto cleanup_channels; + goto cleanup_threaded_channels; } /* mbox registration */ @@ -788,15 +771,14 @@ static int tegra_bpmp_probe(struct platform_device *pdev) if (IS_ERR(bpmp->mbox.channel)) { err = PTR_ERR(bpmp->mbox.channel); dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err); - goto cleanup_channels; + goto cleanup_threaded_channels; } /* reset message channels */ - for (i = 0; i < bpmp->num_channels; i++) { - struct tegra_bpmp_channel *channel = &bpmp->channels[i]; - - tegra_bpmp_channel_reset(channel); - } + tegra_bpmp_channel_reset(bpmp->tx_channel); + tegra_bpmp_channel_reset(bpmp->rx_channel); + for (i = 0; i < bpmp->threaded.count; i++) + tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]); err = tegra_bpmp_request_mrq(bpmp, MRQ_PING, tegra_bpmp_mrq_handle_ping, bpmp); @@ -845,9 +827,15 @@ free_mrq: tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp); free_mbox: mbox_free_channel(bpmp->mbox.channel); -cleanup_channels: - while (i--) - tegra_bpmp_channel_cleanup(&bpmp->channels[i]); +cleanup_threaded_channels: + for (i = 0; i < bpmp->threaded.count; i++) { + if (bpmp->threaded_channels[i].bpmp) + tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]); + } + + tegra_bpmp_channel_cleanup(bpmp->rx_channel); +cleanup_tx_channel: + tegra_bpmp_channel_cleanup(bpmp->tx_channel); free_rx: gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096); free_tx: @@ -858,18 +846,16 @@ free_tx: static const struct tegra_bpmp_soc tegra186_soc = { .channels = { .cpu_tx = { - .offset = 0, - .count = 6, + .offset = 3, .timeout = 60 * USEC_PER_SEC, }, .thread = { - .offset = 6, - .count = 7, + .offset = 0, + .count = 3, .timeout = 600 * USEC_PER_SEC, }, .cpu_rx = { .offset = 13, - .count = 1, .timeout = 0, }, }, diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h index aeae4466dd25..e69e4c4d80ae 100644 --- a/include/soc/tegra/bpmp.h +++ b/include/soc/tegra/bpmp.h @@ -75,8 +75,8 @@ struct tegra_bpmp { struct mbox_chan *channel; } mbox; - struct tegra_bpmp_channel *channels; - unsigned int num_channels; + spinlock_t atomic_tx_lock; + struct tegra_bpmp_channel *tx_channel, *rx_channel, *threaded_channels; struct { unsigned long *allocated; -- cgit v1.2.3 From 1320f76897c5e4893aff68d0bfc1797a5ba543ff Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 27 Jan 2018 15:28:28 +0100 Subject: firmware: tegra: adjust tested variable Check the variable that was most recently initialized. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression x, y, f, g, e, m; statement S1,S2,S3,S4; @@ x = f(...); if (\(<+...x...+>\&e\)) S1 else S2 ( x = g(...); | m = g(...,&x,...); | y = g(...); *if (e) S3 else S4 ) // Signed-off-by: Julia Lawall Reviewed-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/firmware/tegra/bpmp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 81bc2dce8626..14a456afa379 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -694,7 +694,7 @@ static int tegra_bpmp_probe(struct platform_device *pdev) } bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys); - if (!bpmp->rx.pool) { + if (!bpmp->rx.virt) { dev_err(&pdev->dev, "failed to allocate from RX pool\n"); err = -ENOMEM; goto free_tx; -- cgit v1.2.3 From 6f9ed07fde03870ec247dcaf244313fe32521545 Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Tue, 20 Feb 2018 13:58:07 +0200 Subject: soc/tegra: Add Tegra194 SoC configuration option Add the configuration option to enable support for the Tegra194 system- on-chip. Signed-off-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/soc/tegra/Kconfig | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 89ebe22a3e27..fe4481676da6 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -104,6 +104,16 @@ config ARCH_TEGRA_186_SOC multi-format support, ISP for image capture processing and BPMP for power management. +config ARCH_TEGRA_194_SOC + bool "NVIDIA Tegra194 SoC" + select MAILBOX + select TEGRA_BPMP + select TEGRA_HSP_MBOX + select TEGRA_IVC + select SOC_TEGRA_PMC + help + Enable support for the NVIDIA Tegra194 SoC. + endif endif -- cgit v1.2.3 From 56327f54d95c05791fd01e59a9dc03e3a4eafd21 Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Tue, 20 Feb 2018 13:58:08 +0200 Subject: soc/tegra: pmc: Add Tegra194 compatibility string The Tegra194 PMC is mostly compatible with Tegra186, including in all currently supported features. As such, add a new compatibility string but point to the existing Tegra186 SoC data for now. Signed-off-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/soc/tegra/pmc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index ce62a47a6647..a2df230bf51a 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1920,6 +1920,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { }; static const struct of_device_id tegra_pmc_match[] = { + { .compatible = "nvidia,tegra194-pmc", .data = &tegra186_pmc_soc }, { .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc }, { .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc }, { .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc }, -- cgit v1.2.3 From a263394a096e3578eb8dd77039b961c84a7392e7 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Thu, 25 Jan 2018 16:00:13 +0200 Subject: soc/tegra: pmc: MBIST work around for Tegra210 Apply the memory built-in self test work around when ungating certain Tegra210 power domains. Signed-off-by: Peter De Schrijver Reviewed-by: Jon Hunter Tested-by: Jon Hunter Tested-by: Hector Martin Tested-by: Andre Heider Tested-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/soc/tegra/pmc.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index a2df230bf51a..010fbc928cd3 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -153,6 +153,7 @@ struct tegra_pmc_soc { bool has_tsense_reset; bool has_gpu_clamps; + bool needs_mbist_war; const struct tegra_io_pad_soc *io_pads; unsigned int num_io_pads; @@ -396,6 +397,11 @@ static int tegra_powergate_reset_deassert(struct tegra_powergate *pg) return 0; } +int __weak tegra210_clk_handle_mbist_war(unsigned int id) +{ + return 0; +} + static int tegra_powergate_power_up(struct tegra_powergate *pg, bool disable_clocks) { @@ -431,6 +437,11 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, usleep_range(10, 20); + if (pg->pmc->soc->needs_mbist_war) + err = tegra210_clk_handle_mbist_war(pg->id); + if (err) + goto disable_clks; + if (disable_clocks) tegra_powergate_disable_clocks(pg); @@ -1815,6 +1826,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .cpu_powergates = tegra210_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .needs_mbist_war = true, .num_io_pads = ARRAY_SIZE(tegra210_io_pads), .io_pads = tegra210_io_pads, .regs = &tegra20_pmc_regs, -- cgit v1.2.3 From 507c655a06c8553a67122c371608b024b997b0e0 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 12 Feb 2018 16:09:43 +0100 Subject: soc/tegra: pmc: Pass PMC to tegra_powergate_power_up() tegra_powergate_sequence_power_up() makes up a struct tegra_powergate from scratch in order to reuse the same code as used by the generic PM domain implementation. However, subsequent patches will need to access the struct tegra_pmc * embedded in the powergate structure, so we need to make sure we always pass it in. Tested-by: Hector Martin Tested-by: Andre Heider Tested-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/soc/tegra/pmc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 010fbc928cd3..0efdc4ec019f 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -599,6 +599,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, pg.num_clks = 1; pg.resets = &rst; pg.num_resets = 1; + pg.pmc = pmc; err = tegra_powergate_power_up(&pg, false); if (err) -- cgit v1.2.3 From fa65f8045137d061134c15233d35a089cc85b87e Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 12 Feb 2018 17:37:23 -0800 Subject: soc: qcom: rmtfs-mem: Add support for assigning memory to remote On some platform the remote processor's memory map is not statically configured in TrustZone, so each memory region that is to be accessed by the remote needs a call into TrustZone to set up the remote's permissions. Implement this for the rmtfs memory driver, to give the modem on 8996 access to the shared file system buffers. Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/Kconfig | 1 + drivers/soc/qcom/rmtfs_mem.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index e050eb83341d..a993d19fa562 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -47,6 +47,7 @@ config QCOM_QMI_HELPERS config QCOM_RMTFS_MEM tristate "Qualcomm Remote Filesystem memory driver" depends on ARCH_QCOM + select QCOM_SCM help The Qualcomm remote filesystem memory driver is used for allocating and exposing regions of shared memory with remote processors for the diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 0a43b2e8906f..c8999e38b005 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -37,6 +37,8 @@ struct qcom_rmtfs_mem { phys_addr_t size; unsigned int client_id; + + unsigned int perms; }; static ssize_t qcom_rmtfs_mem_show(struct device *dev, @@ -151,9 +153,11 @@ static void qcom_rmtfs_mem_release_device(struct device *dev) static int qcom_rmtfs_mem_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; + struct qcom_scm_vmperm perms[2]; struct reserved_mem *rmem; struct qcom_rmtfs_mem *rmtfs_mem; u32 client_id; + u32 vmid; int ret; rmem = of_reserved_mem_lookup(node); @@ -204,10 +208,31 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device; + ret = of_property_read_u32(node, "qcom,vmid", &vmid); + if (ret < 0 && ret != -EINVAL) { + dev_err(&pdev->dev, "failed to parse qcom,vmid\n"); + goto remove_cdev; + } else if (!ret) { + perms[0].vmid = QCOM_SCM_VMID_HLOS; + perms[0].perm = QCOM_SCM_PERM_RW; + perms[1].vmid = vmid; + perms[1].perm = QCOM_SCM_PERM_RW; + + rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS); + ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size, + &rmtfs_mem->perms, perms, 2); + if (ret < 0) { + dev_err(&pdev->dev, "assign memory failed\n"); + goto remove_cdev; + } + } + dev_set_drvdata(&pdev->dev, rmtfs_mem); return 0; +remove_cdev: + cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev); put_device: put_device(&rmtfs_mem->dev); @@ -217,6 +242,15 @@ put_device: static int qcom_rmtfs_mem_remove(struct platform_device *pdev) { struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev); + struct qcom_scm_vmperm perm; + + if (rmtfs_mem->perms) { + perm.vmid = QCOM_SCM_VMID_HLOS; + perm.perm = QCOM_SCM_PERM_RW; + + qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size, + &rmtfs_mem->perms, &perm, 1); + } cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev); put_device(&rmtfs_mem->dev); -- cgit v1.2.3 From 90c29ed7627b6b4aeb603ee197650173c8434512 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 27 Feb 2018 16:45:25 -0800 Subject: soc: qcom: wcnss_ctrl: Fix increment in NV upload hdr.len includes both the size of the header and the fragment, so using this when stepping through the firmware causes us to skip 16 bytes every chunk of 3072 bytes; causing only the first fragment to actually be valid data. Instead use fragment size steps through the firmware blob. Fixes: ea7a1f275cf0 ("soc: qcom: Introduce WCNSS_CTRL SMD client") Reported-by: Will Newton Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- drivers/soc/qcom/wcnss_ctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index d008e5b82db4..df3ccb30bc2d 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) /* Increment for next fragment */ req->seq++; - data += req->hdr.len; + data += NV_FRAGMENT_SIZE; left -= NV_FRAGMENT_SIZE; } while (left > 0); -- cgit v1.2.3 From b6853f821658e660dc1f0d694f04d64c1bdbdb7e Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 7 Feb 2018 18:22:47 +0800 Subject: dt-bindings: soc: add SCPSYS binding for MT7623 and MT7623A SoC document the binding for enabling SCPSYS on MediaTek MT7623 and MT7623A SoC. Where MT7623 SoC has the same definition about power domains with MT2701, so it's fine to using MT2701 ones as MT7623's fallback. Signed-off-by: Sean Wang Reviewed-by: Rob Herring Reviewed-by: Ulf Hansson Signed-off-by: Matthias Brugger --- Documentation/devicetree/bindings/soc/mediatek/scpsys.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt index 76bf45b893fa..d6fe16f094af 100644 --- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt +++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt @@ -21,6 +21,8 @@ Required properties: - "mediatek,mt2712-scpsys" - "mediatek,mt6797-scpsys" - "mediatek,mt7622-scpsys" + - "mediatek,mt7623-scpsys", "mediatek,mt2701-scpsys": For MT7623 SoC + - "mediatek,mt7623a-scpsys": For MT7623A SoC - "mediatek,mt8173-scpsys" - #power-domain-cells: Must be 1 - reg: Address range of the SCPSYS unit @@ -28,10 +30,11 @@ Required properties: - clock, clock-names: clocks according to the common clock binding. These are clocks which hardware needs to be enabled before enabling certain power domains. - Required clocks for MT2701: "mm", "mfg", "ethif" + Required clocks for MT2701 or MT7623: "mm", "mfg", "ethif" Required clocks for MT2712: "mm", "mfg", "venc", "jpgdec", "audio", "vdec" Required clocks for MT6797: "mm", "mfg", "vdec" Required clocks for MT7622: "hif_sel" + Required clocks for MT7622A: "ethif" Required clocks for MT8173: "mm", "mfg", "venc", "venc_lt" Optional properties: -- cgit v1.2.3 From 0c1a2c17f60fc120cd6a6033ffdb5c336078a0a3 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 7 Feb 2018 18:22:48 +0800 Subject: dt-bindings: soc: add header files required for MT7623A SCPSYS dt-binding Add relevant header files required for dt-bindings of SCPSYS power domain control for subsystems found on MT7623A SoC. Signed-off-by: Sean Wang Cc: Rob Herring Reviewed-by: Rob Herring Reviewed-by: Ulf Hansson [mb: clean-up commit message] Signed-off-by: Matthias Brugger --- include/dt-bindings/power/mt7623a-power.h | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 include/dt-bindings/power/mt7623a-power.h diff --git a/include/dt-bindings/power/mt7623a-power.h b/include/dt-bindings/power/mt7623a-power.h new file mode 100644 index 000000000000..2544822aa76b --- /dev/null +++ b/include/dt-bindings/power/mt7623a-power.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_POWER_MT7623A_POWER_H +#define _DT_BINDINGS_POWER_MT7623A_POWER_H + +#define MT7623A_POWER_DOMAIN_CONN 0 +#define MT7623A_POWER_DOMAIN_ETH 1 +#define MT7623A_POWER_DOMAIN_HIF 2 +#define MT7623A_POWER_DOMAIN_IFR_MSC 3 + +#endif /* _DT_BINDINGS_POWER_MT7623A_POWER_H */ -- cgit v1.2.3 From c59c9c85e36aa09cfd901cc15a0d8d3772c18195 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 7 Feb 2018 18:22:49 +0800 Subject: soc: mediatek: avoid hardcoded value with bus_prot_mask use a meaningful definition for bus_prot_mask instead of just hardcoded for it. Signed-off-by: Sean Wang Reviewed-by: Ulf Hansson Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-scpsys.c | 5 +++-- include/linux/soc/mediatek/infracfg.h | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 435ce5ec648a..5346f33dd70b 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -518,7 +518,8 @@ static const struct scp_domain_data scp_domain_data_mt2701[] = { .name = "conn", .sta_mask = PWR_STATUS_CONN, .ctl_offs = SPM_CONN_PWR_CON, - .bus_prot_mask = 0x0104, + .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M | + MT2701_TOP_AXI_PROT_EN_CONN_S, .clk_id = {CLK_NONE}, .active_wakeup = true, }, @@ -528,7 +529,7 @@ static const struct scp_domain_data scp_domain_data_mt2701[] = { .ctl_offs = SPM_DIS_PWR_CON, .sram_pdn_bits = GENMASK(11, 8), .clk_id = {CLK_MM}, - .bus_prot_mask = 0x0002, + .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_MM_M0, .active_wakeup = true, }, [MT2701_POWER_DOMAIN_MFG] = { diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index b0a507d356ef..fd25f0148566 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -21,6 +21,10 @@ #define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) #define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) +#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) +#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) +#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) + #define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17)) #define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25)) #define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \ -- cgit v1.2.3 From c932ba8c38ba59d430a6e1d9600c0b8c4a029cce Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 7 Feb 2018 18:22:50 +0800 Subject: soc: mediatek: add SCPSYS power domain driver for MediaTek MT7623A SoC Add SCPSYS power domain driver for MT7623A SoC. The MT7623A's power domains are the subset of MT7623 SoC's ones. As MT7623 SoC has full features whereas MT7623A is being designed just for router applications. Thus, MT7623A doesn't include those power domains multimedia function belongs to. In order to avoid certain errors undoubtedly happening at registering those power domains on MT7623A SoC using the existing MT7623 SCPSYS driver, it's required to define another setup specifically for MT7623A SoC. Signed-off-by: Sean Wang Reviewed-by: Ulf Hansson Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-scpsys.c | 55 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 5346f33dd70b..fc55faa17867 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #define SPM_VDE_PWR_CON 0x0210 @@ -794,6 +795,47 @@ static const struct scp_domain_data scp_domain_data_mt7622[] = { }, }; +/* + * MT7623A power domain support + */ + +static const struct scp_domain_data scp_domain_data_mt7623a[] = { + [MT7623A_POWER_DOMAIN_CONN] = { + .name = "conn", + .sta_mask = PWR_STATUS_CONN, + .ctl_offs = SPM_CONN_PWR_CON, + .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M | + MT2701_TOP_AXI_PROT_EN_CONN_S, + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, + [MT7623A_POWER_DOMAIN_ETH] = { + .name = "eth", + .sta_mask = PWR_STATUS_ETH, + .ctl_offs = SPM_ETH_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_ETHIF}, + .active_wakeup = true, + }, + [MT7623A_POWER_DOMAIN_HIF] = { + .name = "hif", + .sta_mask = PWR_STATUS_HIF, + .ctl_offs = SPM_HIF_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_ETHIF}, + .active_wakeup = true, + }, + [MT7623A_POWER_DOMAIN_IFR_MSC] = { + .name = "ifr_msc", + .sta_mask = PWR_STATUS_IFR_MSC, + .ctl_offs = SPM_IFR_MSC_PWR_CON, + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, +}; + /* * MT8173 power domain support */ @@ -935,6 +977,16 @@ static const struct scp_soc_data mt7622_data = { .bus_prot_reg_update = true, }; +static const struct scp_soc_data mt7623a_data = { + .domains = scp_domain_data_mt7623a, + .num_domains = ARRAY_SIZE(scp_domain_data_mt7623a), + .regs = { + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND + }, + .bus_prot_reg_update = true, +}; + static const struct scp_soc_data mt8173_data = { .domains = scp_domain_data_mt8173, .num_domains = ARRAY_SIZE(scp_domain_data_mt8173), @@ -964,6 +1016,9 @@ static const struct of_device_id of_scpsys_match_tbl[] = { }, { .compatible = "mediatek,mt7622-scpsys", .data = &mt7622_data, + }, { + .compatible = "mediatek,mt7623a-scpsys", + .data = &mt7623a_data, }, { .compatible = "mediatek,mt8173-scpsys", .data = &mt8173_data, -- cgit v1.2.3 From 73ce2ce129783813e1ebc37d2c757fe5e0fab1ef Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 9 Feb 2018 02:07:59 +0800 Subject: soc: mediatek: fix the mistaken pointer accessed when subdomains are added Fix the pointer to struct scp_subdomian not being moved forward when each sub-domain is expected to be iteratively added through pm_genpd_add_subdomain call. Cc: stable@vger.kernel.org Fixes: 53fddb1a66dd ("soc: mediatek: reduce code duplication of scpsys_probe across all SoCs") Reported-by: Weiyi Lu Signed-off-by: Sean Wang Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-scpsys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index fc55faa17867..9de801e9e0e0 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -1048,7 +1048,7 @@ static int scpsys_probe(struct platform_device *pdev) pd_data = &scp->pd_data; - for (i = 0, sd = soc->subdomains ; i < soc->num_subdomains ; i++) { + for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) { ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin], pd_data->domains[sd->subdomain]); if (ret && IS_ENABLED(CONFIG_PM)) -- cgit v1.2.3 From 697a3a873c2cd085f8ee18e20f993e3c731e1b8d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 13 Mar 2018 10:37:33 +0100 Subject: cpufreq: scmi: add thermal dependency A built-in scmi cpufreq driver cannot link against a modular thermal framework: drivers/cpufreq/scmi-cpufreq.o: In function `scmi_cpufreq_ready': scmi-cpufreq.c:(.text+0x40): undefined reference to `of_cpufreq_cooling_register' drivers/cpufreq/scmi-cpufreq.o: In function `scmi_cpufreq_exit': scmi-cpufreq.c:(.text+0x88): undefined reference to `cpufreq_cooling_unregister' This adds a Kconfig dependency that makes sure this configuration is not possible, while allowing all configurations that can work. Note that disabling CPU_THERMAL means we don't care about the THERMAL dependency. Acked-by: Sudeep Holla Signed-off-by: Arnd Bergmann --- drivers/cpufreq/Kconfig.arm | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 9bbb5b39d18a..d16d109a4560 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -241,6 +241,7 @@ config ARM_SA1110_CPUFREQ config ARM_SCMI_CPUFREQ tristate "SCMI based CPUfreq driver" depends on ARM_SCMI_PROTOCOL || COMPILE_TEST + depends on !CPU_THERMAL || THERMAL select PM_OPP help This adds the CPUfreq driver support for ARM platforms using SCMI -- cgit v1.2.3 From 1390515aed5e5eea8d6c2c5c08ef6d04ba4a4a50 Mon Sep 17 00:00:00 2001 From: "weiyi.lu@mediatek.com" Date: Mon, 12 Mar 2018 15:03:38 +0800 Subject: dt-bindings: soc: update MT2712 power dt-bindings Add new power domains(MFG_SC1/MFG_SC2/MFG_SC3) for MT2712 according to ECO design change. Signed-off-by: Weiyi Lu Reviewed-by: Rob Herring Signed-off-by: Matthias Brugger --- include/dt-bindings/power/mt2712-power.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h index 92b46d772fae..2c147817efc2 100644 --- a/include/dt-bindings/power/mt2712-power.h +++ b/include/dt-bindings/power/mt2712-power.h @@ -22,5 +22,8 @@ #define MT2712_POWER_DOMAIN_USB 5 #define MT2712_POWER_DOMAIN_USB2 6 #define MT2712_POWER_DOMAIN_MFG 7 +#define MT2712_POWER_DOMAIN_MFG_SC1 8 +#define MT2712_POWER_DOMAIN_MFG_SC2 9 +#define MT2712_POWER_DOMAIN_MFG_SC3 10 #endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */ -- cgit v1.2.3 From 9f9971266110add19b512f7b10a6d922e741368e Mon Sep 17 00:00:00 2001 From: "weiyi.lu@mediatek.com" Date: Mon, 12 Mar 2018 15:03:39 +0800 Subject: soc: mediatek: update power domain data of MT2712 1. split MFG power domain into MFG/MFG_SC1/MFG_SC2/MFG_SC3 according to MT2712 ECO design change 2. add subdomain support for MT2712 Signed-off-by: Weiyi Lu Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-scpsys.c | 42 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 9de801e9e0e0..d762a46d434f 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -666,12 +666,48 @@ static const struct scp_domain_data scp_domain_data_mt2712[] = { .name = "mfg", .sta_mask = PWR_STATUS_MFG, .ctl_offs = SPM_MFG_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(19, 16), + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(16, 16), .clk_id = {CLK_MFG}, .bus_prot_mask = BIT(14) | BIT(21) | BIT(23), .active_wakeup = true, }, + [MT2712_POWER_DOMAIN_MFG_SC1] = { + .name = "mfg_sc1", + .sta_mask = BIT(22), + .ctl_offs = 0x02c0, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(16, 16), + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, + [MT2712_POWER_DOMAIN_MFG_SC2] = { + .name = "mfg_sc2", + .sta_mask = BIT(23), + .ctl_offs = 0x02c4, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(16, 16), + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, + [MT2712_POWER_DOMAIN_MFG_SC3] = { + .name = "mfg_sc3", + .sta_mask = BIT(30), + .ctl_offs = 0x01f8, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(16, 16), + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, +}; + +static const struct scp_subdomain scp_subdomain_mt2712[] = { + {MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_VDEC}, + {MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_VENC}, + {MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_ISP}, + {MT2712_POWER_DOMAIN_MFG, MT2712_POWER_DOMAIN_MFG_SC1}, + {MT2712_POWER_DOMAIN_MFG_SC1, MT2712_POWER_DOMAIN_MFG_SC2}, + {MT2712_POWER_DOMAIN_MFG_SC2, MT2712_POWER_DOMAIN_MFG_SC3}, }; /* @@ -948,6 +984,8 @@ static const struct scp_soc_data mt2701_data = { static const struct scp_soc_data mt2712_data = { .domains = scp_domain_data_mt2712, .num_domains = ARRAY_SIZE(scp_domain_data_mt2712), + .subdomains = scp_subdomain_mt2712, + .num_subdomains = ARRAY_SIZE(scp_subdomain_mt2712), .regs = { .pwr_sta_offs = SPM_PWR_STATUS, .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND -- cgit v1.2.3 From 4c817ccf73abaf7b06a2630e8352d82648c8bc70 Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Wed, 19 Jul 2017 17:59:08 +0200 Subject: soc/tegra: pmc: Use the new reset APIs to manage reset controllers Make use of of_reset_control_array_get_exclusive() to manage an array of reset controllers available with the device. Cc: Jon Hunter Cc: Thierry Reding Signed-off-by: Vivek Gautam [p.zabel@pengutronix.de: switch to hidden reset control array] Signed-off-by: Philipp Zabel Acked-by: Jon Hunter Signed-off-by: Thierry Reding --- drivers/soc/tegra/pmc.c | 92 ++++++++++--------------------------------------- 1 file changed, 18 insertions(+), 74 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 0efdc4ec019f..d9fcdb592b39 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -127,8 +127,7 @@ struct tegra_powergate { unsigned int id; struct clk **clks; unsigned int num_clks; - struct reset_control **resets; - unsigned int num_resets; + struct reset_control *reset; }; struct tegra_io_pad_soc { @@ -369,34 +368,6 @@ out: return err; } -static int tegra_powergate_reset_assert(struct tegra_powergate *pg) -{ - unsigned int i; - int err; - - for (i = 0; i < pg->num_resets; i++) { - err = reset_control_assert(pg->resets[i]); - if (err) - return err; - } - - return 0; -} - -static int tegra_powergate_reset_deassert(struct tegra_powergate *pg) -{ - unsigned int i; - int err; - - for (i = 0; i < pg->num_resets; i++) { - err = reset_control_deassert(pg->resets[i]); - if (err) - return err; - } - - return 0; -} - int __weak tegra210_clk_handle_mbist_war(unsigned int id) { return 0; @@ -407,7 +378,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, { int err; - err = tegra_powergate_reset_assert(pg); + err = reset_control_assert(pg->reset); if (err) return err; @@ -431,7 +402,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, usleep_range(10, 20); - err = tegra_powergate_reset_deassert(pg); + err = reset_control_deassert(pg->reset); if (err) goto powergate_off; @@ -467,7 +438,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg) usleep_range(10, 20); - err = tegra_powergate_reset_assert(pg); + err = reset_control_assert(pg->reset); if (err) goto disable_clks; @@ -486,7 +457,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg) assert_resets: tegra_powergate_enable_clocks(pg); usleep_range(10, 20); - tegra_powergate_reset_deassert(pg); + reset_control_deassert(pg->reset); usleep_range(10, 20); disable_clks: @@ -597,8 +568,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, pg.id = id; pg.clks = &clk; pg.num_clks = 1; - pg.resets = &rst; - pg.num_resets = 1; + pg.reset = rst; pg.pmc = pmc; err = tegra_powergate_power_up(&pg, false); @@ -787,45 +757,22 @@ err: static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, struct device_node *np, bool off) { - struct reset_control *rst; - unsigned int i, count; int err; - count = of_count_phandle_with_args(np, "resets", "#reset-cells"); - if (count == 0) - return -ENODEV; - - pg->resets = kcalloc(count, sizeof(rst), GFP_KERNEL); - if (!pg->resets) - return -ENOMEM; - - for (i = 0; i < count; i++) { - pg->resets[i] = of_reset_control_get_by_index(np, i); - if (IS_ERR(pg->resets[i])) { - err = PTR_ERR(pg->resets[i]); - goto error; - } - - if (off) - err = reset_control_assert(pg->resets[i]); - else - err = reset_control_deassert(pg->resets[i]); - - if (err) { - reset_control_put(pg->resets[i]); - goto error; - } + pg->reset = of_reset_control_array_get_exclusive(np); + if (IS_ERR(pg->reset)) { + err = PTR_ERR(pg->reset); + pr_err("failed to get device resets: %d\n", err); + return err; } - pg->num_resets = count; - - return 0; - -error: - while (i--) - reset_control_put(pg->resets[i]); + if (off) + err = reset_control_assert(pg->reset); + else + err = reset_control_deassert(pg->reset); - kfree(pg->resets); + if (err) + reset_control_put(pg->reset); return err; } @@ -917,10 +864,7 @@ remove_genpd: pm_genpd_remove(&pg->genpd); remove_resets: - while (pg->num_resets--) - reset_control_put(pg->resets[pg->num_resets]); - - kfree(pg->resets); + reset_control_put(pg->reset); remove_clks: while (pg->num_clks--) -- cgit v1.2.3 From f842c41adc044e4586dd232c6e889f9d46180fa8 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 12 Mar 2018 12:17:40 +0100 Subject: amlogic: meson-gx-socinfo: Update soc ids Updates the Amlogic Meson SoCs IDs for the Armv8 based SoCs. It includes the new families and packages. Signed-off-by: Neil Armstrong Signed-off-by: Kevin Hilman --- drivers/soc/amlogic/meson-gx-socinfo.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index ea091f1f7dae..37ea0a1c24c8 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -33,6 +33,10 @@ static const struct meson_gx_soc_id { { "GXL", 0x21 }, { "GXM", 0x22 }, { "TXL", 0x23 }, + { "TXLX", 0x24 }, + { "AXG", 0x25 }, + { "GXLX", 0x26 }, + { "TXHD", 0x27 }, }; static const struct meson_gx_package_id { @@ -41,12 +45,18 @@ static const struct meson_gx_package_id { unsigned int pack_id; } soc_packages[] = { { "S905", 0x1f, 0 }, + { "S905H", 0x1f, 0x13 }, { "S905M", 0x1f, 0x20 }, { "S905D", 0x21, 0 }, { "S905X", 0x21, 0x80 }, + { "S905W", 0x21, 0xa0 }, { "S905L", 0x21, 0xc0 }, { "S905M2", 0x21, 0xe0 }, { "S912", 0x22, 0 }, + { "962X", 0x24, 0x10 }, + { "962E", 0x24, 0x20 }, + { "A113X", 0x25, 0x37 }, + { "A113D", 0x25, 0x22 }, }; static inline unsigned int socinfo_to_major(u32 socinfo) -- cgit v1.2.3 From c09880cef78d0ddf149cdfeb733027a806465ba2 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 15 Mar 2018 16:54:18 +0000 Subject: hwmon: (scmi) return -EINVAL when sensor information is unavailable Passing NULL pointer to PTR_ERR will result in return value of 0 indicating success which is clearly not what it is intended here. This patch returns -EINVAL instead when the sensor information is not available. Fixes: b23688aefb8b ("hwmon: add support for sensors exported via ARM SCMI") Reported-by: Dan Carpenter Acked-by: Guenter Roeck Cc: linux-hwmon@vger.kernel.org Signed-off-by: Sudeep Holla --- drivers/hwmon/scmi-hwmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 32e750373ced..363bf56eb0f2 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -138,7 +138,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) for (i = 0; i < nr_sensors; i++) { sensor = handle->sensor_ops->info_get(handle, i); if (!sensor) - return PTR_ERR(sensor); + return -EINVAL; switch (sensor->type) { case TEMPERATURE_C: -- cgit v1.2.3 From b5e817dcdd556da2b74c66af1cf0f44840283102 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 16 Mar 2018 12:44:42 +0100 Subject: firmware: arm_scmi: prevent accessing rate_discrete uninitialized gcc-5.3 and earlier warns that rate_discrete maybe-uninitialized ../drivers/firmware/arm_scmi/clock.c:185:5: warning: 'rate_discrete' may be used uninitialized in this function [-Wmaybe-uninitialized] if (rate_discrete) ^ ../drivers/firmware/arm_scmi/clock.c:128:7: note: 'rate_discrete' was declared here bool rate_discrete; ^ This patch fixing the warning by initialising rate_discrete and also using goto label for the error path. Fixes: 5f6c6430e904 ("firmware: arm_scmi: add initial support for clock protocol") Suggested-by: Arnd Bergmann Signed-off-by: Anders Roxell [sudeep.holla: added one line description to the commit message] Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index e8ffad33a0ff..e6f17825db79 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -125,7 +125,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, { u64 *rate; int ret, cnt; - bool rate_discrete; + bool rate_discrete = false; u32 tot_rate_cnt = 0, rates_flag; u16 num_returned, num_remaining; struct scmi_xfer *t; @@ -147,7 +147,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, ret = scmi_do_xfer(handle, t); if (ret) - break; + goto err; rates_flag = le32_to_cpu(rlist->num_rates_flags); num_remaining = NUM_REMAINING(rates_flag); @@ -185,6 +185,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, if (rate_discrete) clk->list.num_rates = tot_rate_cnt; +err: scmi_one_xfer_put(handle, t); return ret; } -- cgit v1.2.3 From 7f9badfceffc6d2ae331050f1cd155a633130437 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 20 Mar 2018 11:22:48 +0000 Subject: clk: scmi: use devm_of_clk_add_hw_provider() API and drop scmi_clocks_remove Commit aa795c41d9cd ("clk: Add devm_of_clk_add_hw_provider()/del_provider() APIs") adds devm_of_clk_add_hw_provider which takes care of deleting the clock provider when the clock providers device is removed. This patch makes use of devm_of_clk_add_hw_provider() instead of of_clk_add_hw_provider() so that we can eliminate the need of explicit scmi_clocks_remove for just doing of_clk_del_provider() Acked-by: Stephen Boyd Signed-off-by: Sudeep Holla --- drivers/clk/clk-scmi.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 26f1476d4a79..488c21376b55 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -172,15 +172,8 @@ static int scmi_clocks_probe(struct scmi_device *sdev) } } - return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); -} - -static void scmi_clocks_remove(struct scmi_device *sdev) -{ - struct device *dev = &sdev->dev; - struct device_node *np = dev->of_node; - - of_clk_del_provider(np); + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + clk_data); } static const struct scmi_device_id scmi_id_table[] = { @@ -192,7 +185,6 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table); static struct scmi_driver scmi_clocks_driver = { .name = "scmi-clocks", .probe = scmi_clocks_probe, - .remove = scmi_clocks_remove, .id_table = scmi_id_table, }; module_scmi_driver(scmi_clocks_driver); -- cgit v1.2.3 From 6691dffab0ab6301bb7b489b1dcf9f5efdef202f Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Wed, 28 Feb 2018 14:08:57 +0100 Subject: reset: add support for non-DT systems The reset framework only supports device-tree. There are some platforms however, which need to use it even in legacy, board-file based mode. An example of such architecture is the DaVinci family of SoCs which supports both device tree and legacy boot modes and we don't want to introduce any regressions. We're currently working on converting the platform from its hand-crafted clock API to using the common clock framework. Part of the overhaul will be representing the chip's power sleep controller's reset lines using the reset framework. This changeset extends the core reset code with a new reset lookup entry structure. It contains data allowing the reset core to associate reset lines with devices by comparing the dev_id and con_id strings. It also provides a function allowing drivers to register lookup entries with the framework. The new lookup function is only called as a fallback in case the of_node field is NULL and doesn't change anything for current users. Tested with a dummy reset driver with several lookup entries. An example lookup table registration from a driver can be found below: static struct reset_control_lookup foobar_reset_lookup[] = { RESET_LOOKUP("foo.0", "foo", 15), RESET_LOOKUP("bar.0", NULL, 5), }; foobar_probe() { ... reset_controller_add_lookup(&rcdev, foobar_reset_lookup, ARRAY_SIZE(foobar_reset_lookup)); ... } Cc: Sekhar Nori Cc: Kevin Hilman Cc: David Lechner Signed-off-by: Bartosz Golaszewski Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 72 +++++++++++++++++++++++++++++++++++++++- include/linux/reset-controller.h | 28 ++++++++++++++++ 2 files changed, 99 insertions(+), 1 deletion(-) diff --git a/drivers/reset/core.c b/drivers/reset/core.c index da4292e9de97..06fa4907afc4 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -23,6 +23,9 @@ static DEFINE_MUTEX(reset_list_mutex); static LIST_HEAD(reset_controller_list); +static DEFINE_MUTEX(reset_lookup_mutex); +static LIST_HEAD(reset_lookup_list); + /** * struct reset_control - a reset control * @rcdev: a pointer to the reset controller device @@ -148,6 +151,36 @@ int devm_reset_controller_register(struct device *dev, } EXPORT_SYMBOL_GPL(devm_reset_controller_register); +/** + * reset_controller_add_lookup - register a set of lookup entries + * @rcdev: initialized reset controller device owning the reset line + * @lookup: array of reset lookup entries + * @num_entries: number of entries in the lookup array + */ +void reset_controller_add_lookup(struct reset_controller_dev *rcdev, + struct reset_control_lookup *lookup, + unsigned int num_entries) +{ + struct reset_control_lookup *entry; + unsigned int i; + + mutex_lock(&reset_lookup_mutex); + for (i = 0; i < num_entries; i++) { + entry = &lookup[i]; + + if (!entry->dev_id) { + pr_warn("%s(): reset lookup entry has no dev_id, skipping\n", + __func__); + continue; + } + + entry->rcdev = rcdev; + list_add_tail(&entry->list, &reset_lookup_list); + } + mutex_unlock(&reset_lookup_mutex); +} +EXPORT_SYMBOL_GPL(reset_controller_add_lookup); + static inline struct reset_control_array * rstc_to_array(struct reset_control *rstc) { return container_of(rstc, struct reset_control_array, base); @@ -493,6 +526,43 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } EXPORT_SYMBOL_GPL(__of_reset_control_get); +static struct reset_control * +__reset_control_get_from_lookup(struct device *dev, const char *con_id, + bool shared, bool optional) +{ + const struct reset_control_lookup *lookup; + const char *dev_id = dev_name(dev); + struct reset_control *rstc = NULL; + + if (!dev) + return ERR_PTR(-EINVAL); + + mutex_lock(&reset_lookup_mutex); + + list_for_each_entry(lookup, &reset_lookup_list, list) { + if (strcmp(lookup->dev_id, dev_id)) + continue; + + if ((!con_id && !lookup->con_id) || + ((con_id && lookup->con_id) && + !strcmp(con_id, lookup->con_id))) { + mutex_lock(&reset_list_mutex); + rstc = __reset_control_get_internal(lookup->rcdev, + lookup->index, + shared); + mutex_unlock(&reset_list_mutex); + break; + } + } + + mutex_unlock(&reset_lookup_mutex); + + if (!rstc) + return optional ? NULL : ERR_PTR(-ENOENT); + + return rstc; +} + struct reset_control *__reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional) { @@ -500,7 +570,7 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id, return __of_reset_control_get(dev->of_node, id, index, shared, optional); - return optional ? NULL : ERR_PTR(-EINVAL); + return __reset_control_get_from_lookup(dev, id, shared, optional); } EXPORT_SYMBOL_GPL(__reset_control_get); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index adb88f8cefbc..25698f6c1fae 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -26,6 +26,30 @@ struct module; struct device_node; struct of_phandle_args; +/** + * struct reset_control_lookup - represents a single lookup entry + * + * @list: internal list of all reset lookup entries + * @rcdev: reset controller device controlling this reset line + * @index: ID of the reset controller in the reset controller device + * @dev_id: name of the device associated with this reset line + * @con_id name of the reset line (can be NULL) + */ +struct reset_control_lookup { + struct list_head list; + struct reset_controller_dev *rcdev; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +#define RESET_LOOKUP(_dev_id, _con_id, _index) \ + { \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + .index = _index, \ + } + /** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls @@ -58,4 +82,8 @@ struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); +void reset_controller_add_lookup(struct reset_controller_dev *rcdev, + struct reset_control_lookup *lookup, + unsigned int num_entries); + #endif -- cgit v1.2.3 From e2749bb998701e21cdb8b34486b82fc1c051ab41 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 23 Mar 2018 14:04:48 +0100 Subject: reset: modify the way reset lookup works for board files Commit 7af1bb19f1d7 ("reset: add support for non-DT systems") introduced reset control lookup mechanism for boards that still use board files. The routine used to register lookup entries takes the corresponding reset_controlled_dev structure as argument. It's been determined however that for the first user of this new interface - davinci psc driver - it will be easier to register the lookup entries using the reset controller device name. This patch changes the way lookup entries are added. Signed-off-by: Bartosz Golaszewski [p.zabel@pengutronix.de: added missing ERR_PTR] Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 38 +++++++++++++++++++++++++++++++------- include/linux/reset-controller.h | 14 ++++++++------ 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 06fa4907afc4..6488292e129c 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -153,12 +153,10 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register); /** * reset_controller_add_lookup - register a set of lookup entries - * @rcdev: initialized reset controller device owning the reset line * @lookup: array of reset lookup entries * @num_entries: number of entries in the lookup array */ -void reset_controller_add_lookup(struct reset_controller_dev *rcdev, - struct reset_control_lookup *lookup, +void reset_controller_add_lookup(struct reset_control_lookup *lookup, unsigned int num_entries) { struct reset_control_lookup *entry; @@ -168,13 +166,12 @@ void reset_controller_add_lookup(struct reset_controller_dev *rcdev, for (i = 0; i < num_entries; i++) { entry = &lookup[i]; - if (!entry->dev_id) { - pr_warn("%s(): reset lookup entry has no dev_id, skipping\n", + if (!entry->dev_id || !entry->provider) { + pr_warn("%s(): reset lookup entry badly specified, skipping\n", __func__); continue; } - entry->rcdev = rcdev; list_add_tail(&entry->list, &reset_lookup_list); } mutex_unlock(&reset_lookup_mutex); @@ -526,11 +523,30 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } EXPORT_SYMBOL_GPL(__of_reset_control_get); +static struct reset_controller_dev * +__reset_controller_by_name(const char *name) +{ + struct reset_controller_dev *rcdev; + + lockdep_assert_held(&reset_list_mutex); + + list_for_each_entry(rcdev, &reset_controller_list, list) { + if (!rcdev->dev) + continue; + + if (!strcmp(name, dev_name(rcdev->dev))) + return rcdev; + } + + return NULL; +} + static struct reset_control * __reset_control_get_from_lookup(struct device *dev, const char *con_id, bool shared, bool optional) { const struct reset_control_lookup *lookup; + struct reset_controller_dev *rcdev; const char *dev_id = dev_name(dev); struct reset_control *rstc = NULL; @@ -547,7 +563,15 @@ __reset_control_get_from_lookup(struct device *dev, const char *con_id, ((con_id && lookup->con_id) && !strcmp(con_id, lookup->con_id))) { mutex_lock(&reset_list_mutex); - rstc = __reset_control_get_internal(lookup->rcdev, + rcdev = __reset_controller_by_name(lookup->provider); + if (!rcdev) { + mutex_unlock(&reset_list_mutex); + mutex_unlock(&reset_lookup_mutex); + /* Reset provider may not be ready yet. */ + return ERR_PTR(-EPROBE_DEFER); + } + + rstc = __reset_control_get_internal(rcdev, lookup->index, shared); mutex_unlock(&reset_list_mutex); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 25698f6c1fae..9326d671b6e6 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -30,24 +30,25 @@ struct of_phandle_args; * struct reset_control_lookup - represents a single lookup entry * * @list: internal list of all reset lookup entries - * @rcdev: reset controller device controlling this reset line + * @provider: name of the reset controller device controlling this reset line * @index: ID of the reset controller in the reset controller device * @dev_id: name of the device associated with this reset line * @con_id name of the reset line (can be NULL) */ struct reset_control_lookup { struct list_head list; - struct reset_controller_dev *rcdev; + const char *provider; unsigned int index; const char *dev_id; const char *con_id; }; -#define RESET_LOOKUP(_dev_id, _con_id, _index) \ +#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ { \ + .provider = _provider, \ + .index = _index, \ .dev_id = _dev_id, \ .con_id = _con_id, \ - .index = _index, \ } /** @@ -57,6 +58,7 @@ struct reset_control_lookup { * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices * @reset_control_head: head of internal list of requested reset controls + * @dev: corresponding driver model device struct * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the @@ -68,6 +70,7 @@ struct reset_controller_dev { struct module *owner; struct list_head list; struct list_head reset_control_head; + struct device *dev; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, @@ -82,8 +85,7 @@ struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); -void reset_controller_add_lookup(struct reset_controller_dev *rcdev, - struct reset_control_lookup *lookup, +void reset_controller_add_lookup(struct reset_control_lookup *lookup, unsigned int num_entries); #endif -- cgit v1.2.3 From 8fa566206efdef4d3ec7f13bed36302f9969f2ef Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 6 Mar 2018 20:15:11 +0900 Subject: reset: imx7: add 'depends on HAS_IOMEM' to fix unmet dependency This config select's MFD_SYSCON, but does not depend on HAS_IOMEM. Compile testing on architecture without HAS_IOMEM causes "unmet direct dependencies" in Kconfig phase. Detected by "make ARCH=score allyesconfig". Signed-off-by: Masahiro Yamada Signed-off-by: Philipp Zabel --- drivers/reset/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 18f152d251d7..1efbc6cc8021 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -49,6 +49,7 @@ config RESET_HSDK config RESET_IMX7 bool "i.MX7 Reset Driver" if COMPILE_TEST + depends on HAS_IOMEM default SOC_IMX7D select MFD_SYSCON help -- cgit v1.2.3 From b06b631c8b67aafd57cfbc0313bdfec0db62cacb Mon Sep 17 00:00:00 2001 From: Katsuhiro Suzuki Date: Thu, 8 Mar 2018 17:09:29 +0900 Subject: reset: uniphier: add Pro4/Pro5/PXs2 audio systems reset control Add reset lines for audio subsystem (AIO) on UniPhier Pro4/Pro5/PXs2 SoCs. Signed-off-by: Katsuhiro Suzuki Acked-by: Masahiro Yamada Signed-off-by: Philipp Zabel --- drivers/reset/reset-uniphier.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index e8bb023ff15e..0fa23129d343 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c @@ -63,6 +63,7 @@ static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = { UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */ UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ + UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */ UNIPHIER_RESET_END, }; @@ -72,6 +73,7 @@ static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = { UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (PCIe, USB3) */ UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ + UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */ UNIPHIER_RESET_END, }; @@ -88,6 +90,7 @@ static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */ UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */ UNIPHIER_RESET(29, 0x2014, 8), /* SATA-PHY (active high) */ + UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */ UNIPHIER_RESET_END, }; -- cgit v1.2.3 From dae5af9762c8f03233b68401ecc4fab4befae11c Mon Sep 17 00:00:00 2001 From: Gabriel Fernandez Date: Mon, 19 Mar 2018 08:25:50 +0100 Subject: dt-bindings: reset: add STM32MP1 resets This patch adds the reset binding entry for STM32MP1 Signed-off-by: Gabriel Fernandez Reviewed-by: Rob Herring Signed-off-by: Philipp Zabel --- .../devicetree/bindings/reset/st,stm32mp1-rcc.txt | 6 ++ include/dt-bindings/reset/stm32mp1-resets.h | 108 +++++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt create mode 100644 include/dt-bindings/reset/stm32mp1-resets.h diff --git a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt new file mode 100644 index 000000000000..b4edaf7c7ff3 --- /dev/null +++ b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt @@ -0,0 +1,6 @@ +STMicroelectronics STM32MP1 Peripheral Reset Controller +======================================================= + +The RCC IP is both a reset and a clock controller. + +Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h new file mode 100644 index 000000000000..f0c3aaef67a0 --- /dev/null +++ b/include/dt-bindings/reset/stm32mp1-resets.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP1_RESET_H_ +#define _DT_BINDINGS_STM32MP1_RESET_H_ + +#define LTDC_R 3072 +#define DSI_R 3076 +#define DDRPERFM_R 3080 +#define USBPHY_R 3088 +#define SPI6_R 3136 +#define I2C4_R 3138 +#define I2C6_R 3139 +#define USART1_R 3140 +#define STGEN_R 3156 +#define GPIOZ_R 3200 +#define CRYP1_R 3204 +#define HASH1_R 3205 +#define RNG1_R 3206 +#define AXIM_R 3216 +#define GPU_R 3269 +#define ETHMAC_R 3274 +#define FMC_R 3276 +#define QSPI_R 3278 +#define SDMMC1_R 3280 +#define SDMMC2_R 3281 +#define CRC1_R 3284 +#define USBH_R 3288 +#define MDMA_R 3328 +#define MCU_R 8225 +#define TIM2_R 19456 +#define TIM3_R 19457 +#define TIM4_R 19458 +#define TIM5_R 19459 +#define TIM6_R 19460 +#define TIM7_R 19461 +#define TIM12_R 16462 +#define TIM13_R 16463 +#define TIM14_R 16464 +#define LPTIM1_R 19465 +#define SPI2_R 19467 +#define SPI3_R 19468 +#define USART2_R 19470 +#define USART3_R 19471 +#define UART4_R 19472 +#define UART5_R 19473 +#define UART7_R 19474 +#define UART8_R 19475 +#define I2C1_R 19477 +#define I2C2_R 19478 +#define I2C3_R 19479 +#define I2C5_R 19480 +#define SPDIF_R 19482 +#define CEC_R 19483 +#define DAC12_R 19485 +#define MDIO_R 19847 +#define TIM1_R 19520 +#define TIM8_R 19521 +#define TIM15_R 19522 +#define TIM16_R 19523 +#define TIM17_R 19524 +#define SPI1_R 19528 +#define SPI4_R 19529 +#define SPI5_R 19530 +#define USART6_R 19533 +#define SAI1_R 19536 +#define SAI2_R 19537 +#define SAI3_R 19538 +#define DFSDM_R 19540 +#define FDCAN_R 19544 +#define LPTIM2_R 19584 +#define LPTIM3_R 19585 +#define LPTIM4_R 19586 +#define LPTIM5_R 19587 +#define SAI4_R 19592 +#define SYSCFG_R 19595 +#define VREF_R 19597 +#define TMPSENS_R 19600 +#define PMBCTRL_R 19601 +#define DMA1_R 19648 +#define DMA2_R 19649 +#define DMAMUX_R 19650 +#define ADC12_R 19653 +#define USBO_R 19656 +#define SDMMC3_R 19664 +#define CAMITF_R 19712 +#define CRYP2_R 19716 +#define HASH2_R 19717 +#define RNG2_R 19718 +#define CRC2_R 19719 +#define HSEM_R 19723 +#define MBOX_R 19724 +#define GPIOA_R 19776 +#define GPIOB_R 19777 +#define GPIOC_R 19778 +#define GPIOD_R 19779 +#define GPIOE_R 19780 +#define GPIOF_R 19781 +#define GPIOG_R 19782 +#define GPIOH_R 19783 +#define GPIOI_R 19784 +#define GPIOJ_R 19785 +#define GPIOK_R 19786 + +#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */ -- cgit v1.2.3 From 197858b68532d415f21a5a92fb6661c67c6c9a97 Mon Sep 17 00:00:00 2001 From: Gabriel Fernandez Date: Mon, 19 Mar 2018 08:25:51 +0100 Subject: reset: stm32mp1: Enable stm32mp1 reset driver stm32mp1 RCC IP 1 has a reset SET register and a reset CLEAR register. Writing '0' on reset SET register has no effect Writing '1' on reset SET register activates the reset of the corresponding peripheral Writing '0' on reset CLEAR register has no effect Writing '1' on reset CLEAR register releases the reset of the corresponding peripheral See Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt Signed-off-by: Gabriel Fernandez Signed-off-by: Philipp Zabel --- drivers/reset/Kconfig | 6 +++ drivers/reset/Makefile | 1 + drivers/reset/reset-stm32mp1.c | 115 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 drivers/reset/reset-stm32mp1.c diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 1efbc6cc8021..c0b292be1b72 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -97,6 +97,12 @@ config RESET_SIMPLE - Allwinner SoCs - ZTE's zx2967 family +config RESET_STM32MP157 + bool "STM32MP157 Reset Driver" if COMPILE_TEST + default MACH_STM32MP157 + help + This enables the RCC reset controller driver for STM32 MPUs. + config RESET_SUNXI bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI default ARCH_SUNXI diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 132c24f5ddb5..c1261dcfe9ad 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o +obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o diff --git a/drivers/reset/reset-stm32mp1.c b/drivers/reset/reset-stm32mp1.c new file mode 100644 index 000000000000..b221a28041fa --- /dev/null +++ b/drivers/reset/reset-stm32mp1.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#include +#include +#include +#include +#include +#include + +#define CLR_OFFSET 0x4 + +struct stm32_reset_data { + struct reset_controller_dev rcdev; + void __iomem *membase; +}; + +static inline struct stm32_reset_data * +to_stm32_reset_data(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct stm32_reset_data, rcdev); +} + +static int stm32_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct stm32_reset_data *data = to_stm32_reset_data(rcdev); + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); + void __iomem *addr; + + addr = data->membase + (bank * reg_width); + if (!assert) + addr += CLR_OFFSET; + + writel(BIT(offset), addr); + + return 0; +} + +static int stm32_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return stm32_reset_update(rcdev, id, true); +} + +static int stm32_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return stm32_reset_update(rcdev, id, false); +} + +static int stm32_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct stm32_reset_data *data = to_stm32_reset_data(rcdev); + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); + u32 reg; + + reg = readl(data->membase + (bank * reg_width)); + + return !!(reg & BIT(offset)); +} + +static const struct reset_control_ops stm32_reset_ops = { + .assert = stm32_reset_assert, + .deassert = stm32_reset_deassert, + .status = stm32_reset_status, +}; + +static const struct of_device_id stm32_reset_dt_ids[] = { + { .compatible = "st,stm32mp1-rcc"}, + { /* sentinel */ }, +}; + +static int stm32_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_reset_data *data; + void __iomem *membase; + struct resource *res; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + membase = devm_ioremap_resource(dev, res); + if (IS_ERR(membase)) + return PTR_ERR(membase); + + data->membase = membase; + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE; + data->rcdev.ops = &stm32_reset_ops; + data->rcdev.of_node = dev->of_node; + + return devm_reset_controller_register(dev, &data->rcdev); +} + +static struct platform_driver stm32_reset_driver = { + .probe = stm32_reset_probe, + .driver = { + .name = "stm32mp1-reset", + .of_match_table = stm32_reset_dt_ids, + }, +}; + +builtin_platform_driver(stm32_reset_driver); -- cgit v1.2.3 From 5573fe85c7e6f94613a357bfcbcec2b57492a8ed Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Fri, 23 Mar 2018 14:12:34 +0900 Subject: reset: uniphier: add ethernet reset control support for PXs3 Add reset lines for ethernet controller on PXs3 SoC. Signed-off-by: Kunihiko Hayashi Acked-by: Masahiro Yamada Signed-off-by: Philipp Zabel --- drivers/reset/reset-uniphier.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index 0fa23129d343..360e06b20c53 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c @@ -124,6 +124,8 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = { UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ + UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */ + UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */ UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */ UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */ UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */ -- cgit v1.2.3