summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Hu <andy.hu@starfivetech.com>2024-01-05 10:24:19 +0300
committerAndy Hu <andy.hu@starfivetech.com>2024-01-05 10:24:19 +0300
commit0327819b586a701ae4d9d865cc9dcf30ee369b34 (patch)
treed252d7cc2bf7a4a20b431e9a72441e31a633fa56
parentf674c23c3266542cb68a58d15e9ee97b30a6f6f3 (diff)
parentfa86f41550a08f2c087b488fdedba4f64e0a3d16 (diff)
downloadlinux-0327819b586a701ae4d9d865cc9dcf30ee369b34.tar.xz
Merge tag 'JH7110_SDK_6.1_v5.10.4' into vf2-6.1.y-devel
-rw-r--r--MAINTAINERS6
-rwxr-xr-x[-rw-r--r--]arch/riscv/boot/dts/starfive/jh7110.dtsi0
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c114
-rw-r--r--drivers/gpu/drm/verisilicon/vs_crtc.c9
-rw-r--r--drivers/gpu/drm/verisilicon/vs_crtc.h2
-rw-r--r--drivers/gpu/drm/verisilicon/vs_dc.c65
-rw-r--r--drivers/gpu/drm/verisilicon/vs_dc_hw.c61
-rw-r--r--drivers/gpu/drm/verisilicon/vs_dc_hw.h2
-rw-r--r--drivers/perf/Kconfig10
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/sifive_u74_l2_pmu.c532
-rw-r--r--drivers/soc/sifive/sifive_ccache.c13
-rw-r--r--drivers/soc/sifive/sifive_pl2.h14
13 files changed, 679 insertions, 150 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 88fb5f651629..06b39f33ee7c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -22857,6 +22857,12 @@ L: linux-mm@kvack.org
S: Maintained
F: mm/zswap.c
+SIFIVE L2 CACHE PMU
+M: Minda.chen <minda.chen@starfivetech.com>
+S: Maintained
+F: drivers/perf/sifive_u74_l2_pmu.c
+F: drivers/soc/sifive/sifive_pl2.h
+
THE REST
M: Linus Torvalds <torvalds@linux-foundation.org>
L: linux-kernel@vger.kernel.org
diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi
index 19928efe3b1a..19928efe3b1a 100644..100755
--- a/arch/riscv/boot/dts/starfive/jh7110.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 02b4a7dc92f5..342b451cd77c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1134,6 +1134,42 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
+ for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+ int ret;
+
+ /* Shut down everything that needs a full modeset. */
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
+ continue;
+
+ funcs = crtc->helper_private;
+
+ drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
+
+
+ /* Right function depends upon target state. */
+ if (new_crtc_state->enable && funcs->prepare)
+ funcs->prepare(crtc);
+ else if (funcs->atomic_disable)
+ funcs->atomic_disable(crtc, old_state);
+ else if (funcs->disable)
+ funcs->disable(crtc);
+ else if (funcs->dpms)
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (!drm_dev_has_vblank(dev))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
+ if (ret == 0)
+ drm_crtc_vblank_put(crtc);
+ }
+
for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
@@ -1194,41 +1230,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
drm_atomic_bridge_chain_post_disable(bridge, old_state);
}
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
- const struct drm_crtc_helper_funcs *funcs;
- int ret;
-
- /* Shut down everything that needs a full modeset. */
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
- continue;
-
- if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
- continue;
-
- funcs = crtc->helper_private;
-
- drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
- crtc->base.id, crtc->name);
-
-
- /* Right function depends upon target state. */
- if (new_crtc_state->enable && funcs->prepare)
- funcs->prepare(crtc);
- else if (funcs->atomic_disable)
- funcs->atomic_disable(crtc, old_state);
- else if (funcs->disable)
- funcs->disable(crtc);
- else if (funcs->dpms)
- funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-
- if (!drm_dev_has_vblank(dev))
- continue;
- ret = drm_crtc_vblank_get(crtc);
- WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
- if (ret == 0)
- drm_crtc_vblank_put(crtc);
- }
}
/**
@@ -1464,27 +1466,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_connector_state *new_conn_state;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
- const struct drm_crtc_helper_funcs *funcs;
-
- /* Need to filter out CRTCs where only planes change. */
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
- continue;
-
- if (!new_crtc_state->active)
- continue;
-
- funcs = crtc->helper_private;
- if (new_crtc_state->enable) {
- drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
- crtc->base.id, crtc->name);
- if (funcs->atomic_enable)
- funcs->atomic_enable(crtc, old_state);
- else if (funcs->commit)
- funcs->commit(crtc);
- }
- }
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
@@ -1523,6 +1505,28 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_atomic_bridge_chain_enable(bridge, old_state);
}
+ for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+
+ /* Need to filter out CRTCs where only planes change. */
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!new_crtc_state->active)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (new_crtc_state->enable) {
+ drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
+ if (funcs->atomic_enable)
+ funcs->atomic_enable(crtc, old_state);
+ else if (funcs->commit)
+ funcs->commit(crtc);
+ }
+ }
+
drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1629,7 +1633,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
ret = wait_event_timeout(dev->vblank[i].queue,
old_state->crtcs[i].last_vblank_count !=
drm_crtc_vblank_count(crtc),
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/verisilicon/vs_crtc.c b/drivers/gpu/drm/verisilicon/vs_crtc.c
index 98f952a72e2e..cb9c0c3776f4 100644
--- a/drivers/gpu/drm/verisilicon/vs_crtc.c
+++ b/drivers/gpu/drm/verisilicon/vs_crtc.c
@@ -192,8 +192,10 @@ static int vs_crtc_late_register(struct drm_crtc *crtc)
static int vs_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct vs_crtc *vs_crtc = to_vs_crtc(crtc);
+ u32 ctrc_mask = 0;
- vs_crtc->funcs->enable_vblank(vs_crtc->dev, true);
+ ctrc_mask = drm_crtc_mask(crtc);
+ vs_crtc->funcs->enable_vblank(vs_crtc->dev, true, ctrc_mask);
return 0;
}
@@ -201,8 +203,9 @@ static int vs_crtc_enable_vblank(struct drm_crtc *crtc)
static void vs_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct vs_crtc *vs_crtc = to_vs_crtc(crtc);
-
- vs_crtc->funcs->enable_vblank(vs_crtc->dev, false);
+ u32 ctrc_mask = 0;
+ ctrc_mask = drm_crtc_mask(crtc);
+ vs_crtc->funcs->enable_vblank(vs_crtc->dev, false, ctrc_mask);
}
static const struct drm_crtc_funcs vs_crtc_funcs = {
diff --git a/drivers/gpu/drm/verisilicon/vs_crtc.h b/drivers/gpu/drm/verisilicon/vs_crtc.h
index a506ad00fb76..9292a4a060a6 100644
--- a/drivers/gpu/drm/verisilicon/vs_crtc.h
+++ b/drivers/gpu/drm/verisilicon/vs_crtc.h
@@ -21,7 +21,7 @@ struct vs_crtc_funcs {
struct drm_color_lut *lut, unsigned int size);
void (*enable_gamma)(struct device *dev, struct drm_crtc *crtc,
bool enable);
- void (*enable_vblank)(struct device *dev, bool enable);
+ void (*enable_vblank)(struct device *dev, bool enable, u32 ctrc_mask);
void (*commit)(struct device *dev);
};
diff --git a/drivers/gpu/drm/verisilicon/vs_dc.c b/drivers/gpu/drm/verisilicon/vs_dc.c
index 99db76c349cd..2fce762256c9 100644
--- a/drivers/gpu/drm/verisilicon/vs_dc.c
+++ b/drivers/gpu/drm/verisilicon/vs_dc.c
@@ -267,47 +267,6 @@ static inline u8 to_vs_display_id(struct vs_dc *dc, struct drm_crtc *crtc)
return 0;
}
-#if 0
-static int plda_clk_rst_init(struct device *dev)
-{
- int ret;
- struct vs_dc *dc = dev_get_drvdata(dev);
-
- dc->num_clks = devm_clk_bulk_get_all(dev, &dc->clks);
- if (dc->num_clks < 0) {
- dev_err(dev, "failed to get vout clocks\n");
- ret = -ENODEV;
- goto exit;
- }
- ret = clk_bulk_prepare_enable(dc->num_clks, dc->clks);
- if (ret) {
- dev_err(dev, "failed to enable clocks\n");
- goto exit;
- }
-
- dc->resets = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(dc->resets)) {
- ret = PTR_ERR(dc->resets);
- dev_err(dev, "failed to get pcie resets");
- goto err_clk_init;
- }
- ret = reset_control_deassert(dc->resets);
- goto exit;
-
-err_clk_init:
- clk_bulk_disable_unprepare(dc->num_clks, dc->clks);
-exit:
- return ret;
-}
-
-static void plda_clk_rst_deinit(struct device *dev)
-{
- struct vs_dc *dc = dev_get_drvdata(dev);
-
- reset_control_assert(dc->resets);
- clk_bulk_disable_unprepare(dc->num_clks, dc->clks);
-}
-#endif
static int vs_dc_get_clock(struct device *dev, struct vs_dc *dc)
{
@@ -473,14 +432,6 @@ static void vs_vout_reset_deassert(struct vs_dc *dc)
//reset_control_deassert(dc->noc_disp);//ok
}
-/*
-static void vs_vout_reset_assert(struct vs_dc *dc)
-{
- reset_control_assert(dc->rst_vout_src);//no!
- reset_control_assert(dc->noc_disp);//ok
-}
-*/
-
static void vs_dc8200_reset_get(struct device *dev, struct vs_dc *dc)
{
dc->dc8200_rst_axi = reset_control_get_shared(dev, "rst_axi");
@@ -635,7 +586,7 @@ static void dc_deinit(struct device *dev)
{
struct vs_dc *dc = dev_get_drvdata(dev);
int ret;
- dc_hw_enable_interrupt(&dc->hw, 0);
+ dc_hw_enable_interrupt(&dc->hw, 0, 0);
dc_hw_deinit(&dc->hw);
vs_dc_dc8200_clock_disable(dc);
vs_dc_vouttop_clock_disable(dc);
@@ -920,11 +871,11 @@ static void vs_dc_enable_gamma(struct device *dev, struct drm_crtc *crtc,
dc_hw_enable_gamma(&dc->hw, id, enable);
}
-static void vs_dc_enable_vblank(struct device *dev, bool enable)
+static void vs_dc_enable_vblank(struct device *dev, bool enable, u32 ctrc_mask)
{
struct vs_dc *dc = dev_get_drvdata(dev);
- dc_hw_enable_interrupt(&dc->hw, enable);
+ dc_hw_enable_interrupt(&dc->hw, enable, ctrc_mask);
}
static u32 calc_factor(u32 src, u32 dest)
@@ -1008,11 +959,11 @@ static void update_fb(struct vs_plane *plane, u8 display_id,
update_swizzle(drm_fb->format->format, fb);
update_watermark(plane_state->watermark, fb);
- sifive_l2_flush64_range(fb->y_address, fb->height * fb->y_stride);
- if (fb->u_address)
- sifive_l2_flush64_range(fb->u_address, fb->height * fb->u_stride);
- if (fb->v_address)
- sifive_l2_flush64_range(fb->v_address, fb->height * fb->v_stride);
+ sifive_l2_flush64_range(fb->y_address, fb->height * fb->y_stride);
+ if (fb->u_address)
+ sifive_l2_flush64_range(fb->u_address, fb->height * fb->u_stride);
+ if (fb->v_address)
+ sifive_l2_flush64_range(fb->v_address, fb->height * fb->v_stride);
plane_state->status.tile_mode = fb->tile_mode;
}
diff --git a/drivers/gpu/drm/verisilicon/vs_dc_hw.c b/drivers/gpu/drm/verisilicon/vs_dc_hw.c
index 5ebc8abd6039..72203d7c8f32 100644
--- a/drivers/gpu/drm/verisilicon/vs_dc_hw.c
+++ b/drivers/gpu/drm/verisilicon/vs_dc_hw.c
@@ -1163,6 +1163,16 @@ static inline void hi_write(struct dc_hw *hw, u32 reg, u32 value)
writel(value, hw->hi_base + reg);
}
+static inline void dc_high_set_clear(struct dc_hw *hw, u32 reg, u32 set, u32 clear)
+{
+ u32 value = hi_read(hw, reg);
+
+ value &= ~clear;
+ value |= set;
+ hi_write(hw, reg, value);
+}
+
+
static inline void dc_write(struct dc_hw *hw, u32 reg, u32 value)
{
writel(value, hw->reg_base + reg - DC_REG_BASE);
@@ -1488,16 +1498,12 @@ void dc_hw_setup_display(struct dc_hw *hw, struct dc_hw_display *display)
hw->func->display(hw, display);
}
-void dc_hw_enable_interrupt(struct dc_hw *hw, bool enable)
+void dc_hw_enable_interrupt(struct dc_hw *hw, bool enable, u32 ctrc_mask)
{
- if (enable) {
- if (hw->out[1] == OUT_DPI)
- dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(1), BIT(3));
-
- hi_write(hw, AQ_INTR_ENBL, 0xFFFFFFFF);
- } else {
- ;//hi_write(hw, AQ_INTR_ENBL, 0);
- }
+ if (enable)
+ dc_high_set_clear(hw, AQ_INTR_ENBL, ctrc_mask, 0);
+ else
+ dc_high_set_clear(hw, AQ_INTR_ENBL, 0, ctrc_mask);
}
u32 dc_hw_get_interrupt(struct dc_hw *hw)
@@ -1945,29 +1951,20 @@ static void setup_display(struct dc_hw *hw, struct dc_hw_display *display)
dc_write(hw, DC_DISPLAY_DITHER_CONFIG + offset, 0);
}
- dc_set_clear(hw, DC_DISPLAY_PANEL_CONFIG + offset, BIT(12), 0);
- if (hw->display[id].sync_enable)
- dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(2) | BIT(3), 0);
- else if (id == 0)
- dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(0), BIT(3));
- else
- if (hw->out[id] != OUT_DPI)
- dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(1), BIT(3));
- } else {
- dc_set_clear(hw, DC_DISPLAY_PANEL_CONFIG + offset, 0, BIT(12));
- if (id == 0)
- dc_set_clear(hw, DC_DISPLAY_PANEL_START, 0, BIT(0) | BIT(2));
- else
- dc_set_clear(hw, DC_DISPLAY_PANEL_START, 0, BIT(1) | BIT(2));
-
- dc_set_clear(hw, DC_OVERLAY_CONFIG + 0x0, 0x0, BIT(24));
- dc_set_clear(hw, DC_OVERLAY_CONFIG + 0x4, 0x0, BIT(24));
- dc_set_clear(hw, DC_OVERLAY_CONFIG + 0x8, 0x0, BIT(24));
- dc_set_clear(hw, DC_OVERLAY_CONFIG + 0xc, 0x0, BIT(24));
-
- dc_set_clear(hw, DC_CURSOR_CONFIG + 0x0, BIT(3), 0x03);
- dc_set_clear(hw, DC_CURSOR_CONFIG + DC_CURSOR_OFFSET, BIT(3), 0x03);
- }
+ dc_set_clear(hw, DC_DISPLAY_PANEL_CONFIG + offset, BIT(12), 0);
+ if (hw->display[id].sync_enable)
+ dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(2) | BIT(3), 0);
+ else if (id == 0)
+ dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(0), BIT(3));
+ else
+ dc_set_clear(hw, DC_DISPLAY_PANEL_START, BIT(1), BIT(3));
+ } else {
+ dc_set_clear(hw, DC_DISPLAY_PANEL_CONFIG + offset, 0, BIT(12));
+ if (id == 0)
+ dc_set_clear(hw, DC_DISPLAY_PANEL_START, 0, BIT(0) | BIT(2));
+ else
+ dc_set_clear(hw, DC_DISPLAY_PANEL_START, 0, BIT(1) | BIT(2));
+ }
}
static void setup_display_ex(struct dc_hw *hw, struct dc_hw_display *display)
diff --git a/drivers/gpu/drm/verisilicon/vs_dc_hw.h b/drivers/gpu/drm/verisilicon/vs_dc_hw.h
index c6f467355597..7bc1c385eab3 100644
--- a/drivers/gpu/drm/verisilicon/vs_dc_hw.h
+++ b/drivers/gpu/drm/verisilicon/vs_dc_hw.h
@@ -558,7 +558,7 @@ void dc_hw_enable_gamma(struct dc_hw *hw, u8 id, bool enable);
void dc_hw_enable_dump(struct dc_hw *hw, u32 addr, u32 pitch);
void dc_hw_disable_dump(struct dc_hw *hw);
void dc_hw_setup_display(struct dc_hw *hw, struct dc_hw_display *display);
-void dc_hw_enable_interrupt(struct dc_hw *hw, bool enable);
+void dc_hw_enable_interrupt(struct dc_hw *hw, bool enable, u32 ctrc_mask);
u32 dc_hw_get_interrupt(struct dc_hw *hw);
bool dc_hw_check_underflow(struct dc_hw *hw);
void dc_hw_enable_shadow_register(struct dc_hw *hw, bool enable);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 341010f20b77..ce11a126701f 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -199,4 +199,14 @@ config MARVELL_CN10K_DDR_PMU
Enable perf support for Marvell DDR Performance monitoring
event on CN10K platform.
+config SIFIVE_U74_L2_PMU
+ bool "Sifive U74 L2 Cache PMU"
+ depends on SIFIVE_CCACHE && PERF_EVENTS
+ default y
+ help
+ Support for the private L2 cache controller performance monitor unit
+ (PMU) on SiFive platforms. The SiFive private L2 PMU can monitor the
+ each hart L2 cache performance and it consists of a set of event
+ programmable counters and their event selector registers.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 050d04ee19dd..67c476588954 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
+obj-$(CONFIG_SIFIVE_U74_L2_PMU) += sifive_u74_l2_pmu.o
diff --git a/drivers/perf/sifive_u74_l2_pmu.c b/drivers/perf/sifive_u74_l2_pmu.c
new file mode 100644
index 000000000000..2ac6ae889243
--- /dev/null
+++ b/drivers/perf/sifive_u74_l2_pmu.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive private L2 cache controller PMU Driver
+ *
+ * Copyright (C) 2018-2023 SiFive, Inc.
+ */
+
+#define pr_fmt(fmt) "pL2CACHE_PMU: " fmt
+
+#include <linux/kprobes.h>
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/perf_event.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/cpu_pm.h>
+
+#define SIFIVE_PL2_PMU_MAX_COUNTERS 64
+#define SIFIVE_PL2_SELECT_BASE_OFFSET 0x2000
+#define SIFIVE_PL2_COUNTER_BASE_OFFSET 0x3000
+
+#define SIFIVE_PL2_COUNTER_MASK GENMASK_ULL(63, 0)
+
+struct sifive_u74_l2_pmu_event {
+ struct perf_event **events;
+ void __iomem *event_counter_base;
+ void __iomem *event_select_base;
+ u32 counters;
+ DECLARE_BITMAP(used_mask, SIFIVE_PL2_PMU_MAX_COUNTERS);
+};
+
+struct sifive_u74_l2_pmu {
+ struct pmu *pmu;
+};
+
+static bool pl2pmu_init_done;
+static struct sifive_u74_l2_pmu sifive_u74_l2_pmu;
+static DEFINE_PER_CPU(struct sifive_u74_l2_pmu_event, sifive_u74_l2_pmu_event);
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+ return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+ writel(lower_32_bits(v), addr);
+ writel(upper_32_bits(v), addr + 4);
+}
+#endif
+
+/* formats */
+static ssize_t sifive_u74_l2_pmu_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+#define SIFIVE_PL2_PMU_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, sifive_u74_l2_pmu_format_show, NULL),\
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+static struct attribute *sifive_u74_l2_pmu_formats[] = {
+ SIFIVE_PL2_PMU_PMU_FORMAT_ATTR(event, "config:0-63"),
+ NULL,
+};
+
+static struct attribute_group sifive_u74_l2_pmu_format_group = {
+ .name = "format",
+ .attrs = sifive_u74_l2_pmu_formats,
+};
+
+/* events */
+
+static ssize_t sifive_u74_l2_pmu_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define SET_EVENT_SELECT(_event, _set) (((u64)1 << ((_event) + 8)) | (_set))
+#define PL2_PMU_EVENT_ATTR(_name, _event, _set) \
+ PMU_EVENT_ATTR_ID(_name, sifive_u74_l2_pmu_event_show, \
+ SET_EVENT_SELECT(_event, _set))
+
+enum pl2_pmu_event_set1 {
+ INNER_PUT_FULL_DATA = 0,
+ INNER_PUT_PARTIAL_DATA,
+ INNER_ARITHMETIC_DATA,
+ INNER_GET,
+ INNER_PREFETCH_READ,
+ INNER_PREFETCH_WRITE,
+ INNER_ACQUIRE_BLOCK_NTOB,
+ INNER_ACQUIRE_BLOCK_NTOT,
+ INNER_ACQUIRE_BLOCK_BTOT,
+ INNER_ACQUIRE_PERM_NTOT,
+ INNER_ACQUIRE_PERM_BTOT,
+ INNER_RELEASE_TTOB,
+ INNER_RELEASE_TTON,
+ INNER_RELEASE_BTON,
+ INNER_RELEASE_DATA_TTOB,
+ INNER_RELEASE_DATA_TTON,
+ INNER_RELEASE_DATA_BTON,
+ OUTER_PROBE_BLOCK_TOT,
+ OUTER_PROBE_BLOCK_TOB,
+ OUTER_PROBE_BLOCK_TON,
+ PL2_PMU_MAX_EVENT1_IDX
+};
+
+enum pl2_pmu_event_set2 {
+ INNER_PUT_FULL_DATA_HIT = 0,
+ INNER_PUT_PARTIAL_DATA_HIT,
+ INNER_ARITHMETIC_DATA_HIT,
+ INNER_GET_HIT,
+ INNER_PREFETCH_READ_HIT,
+ INNER_ACQUIRE_BLOCK_NTOB_HIT,
+ INNER_ACQUIRE_PERM_NTOT_HIT,
+ INNER_RELEASE_TTOB_HIT,
+ INNER_RELEASE_DATA_TTOB_HIT,
+ OUTER_PROBE_BLOCK_TOT_HIT,
+ INNER_PUT_FULL_DATA_HIT_SHARED,
+ INNER_PUT_PARTIAL_DATA_HIT_SHARED,
+ INNER_ARITHMETIC_DATA_HIT_SHARED,
+ INNER_GET_HIT_SHARED,
+ INNER_PREFETCH_READ_HIT_SHARED,
+ INNER_ACQUIRE_BLOCK_HIT_SHARED,
+ INNER_ACQUIRE_PERM_NTOT_HIT_SHARED,
+ OUTER_PROBE_BLOCK_TOT_HIT_SHARED,
+ OUTER_PROBE_BLOCK_TOT_HIT_DIRTY,
+ PL2_PMU_MAX_EVENT2_IDX
+};
+
+enum pl2_pmu_event_set3 {
+ OUTER_ACQUIRE_BLOCK_NTOB,
+ OUTER_ACQUIRE_BLOCK_NTOT,
+ OUTER_ACQUIRE_BLOCK_BTOT,
+ OUTER_ACQUIRE_PERM_NTOT,
+ OUTER_ACQUIRE_PERM_BTOT,
+ OUTER_RELEARE_TTOB,
+ OUTER_RELEARE_TTON,
+ OUTER_RELEARE_BTON,
+ OUTER_RELEARE_DATA_TTOB,
+ OUTER_RELEARE_DATA_TTON,
+ OUTER_RELEARE_DATA_BTON,
+ INNER_PROBE_BLOCK_TOT,
+ INNER_PROBE_BLOCK_TOB,
+ INNER_PROBE_BLOCK_TON,
+ PL2_PMU_MAX_EVENT3_IDX
+};
+
+enum pl2_pmu_event_set4 {
+ INNER_HINT_HITS_MSHR = 0,
+ PL2_PMU_MAX_EVENT4_IDX
+};
+
+static struct attribute *sifive_u74_l2_pmu_events[] = {
+ PL2_PMU_EVENT_ATTR(inner_put_full_data, INNER_PUT_FULL_DATA, 1),
+ PL2_PMU_EVENT_ATTR(inner_put_partial_data, INNER_PUT_PARTIAL_DATA, 1),
+ PL2_PMU_EVENT_ATTR(inner_arithmetic_data, INNER_ARITHMETIC_DATA, 1),
+ PL2_PMU_EVENT_ATTR(inner_get, INNER_GET, 1),
+ PL2_PMU_EVENT_ATTR(inner_prefetch_read, INNER_PREFETCH_READ, 1),
+ PL2_PMU_EVENT_ATTR(inner_prefetch_write, INNER_PREFETCH_WRITE, 1),
+ PL2_PMU_EVENT_ATTR(inner_acquire_block_ntob, INNER_ACQUIRE_BLOCK_NTOB, 1),
+ PL2_PMU_EVENT_ATTR(inner_acquire_block_ntot, INNER_ACQUIRE_BLOCK_NTOT, 1),
+ PL2_PMU_EVENT_ATTR(inner_acquire_block_btot, INNER_ACQUIRE_BLOCK_BTOT, 1),
+ PL2_PMU_EVENT_ATTR(inner_acquire_perm_ntot, INNER_ACQUIRE_PERM_NTOT, 1),
+ PL2_PMU_EVENT_ATTR(inner_acquire_perm_btot, INNER_ACQUIRE_PERM_BTOT, 1),
+ PL2_PMU_EVENT_ATTR(inner_release_ttob, INNER_RELEASE_TTOB, 1),
+ PL2_PMU_EVENT_ATTR(inner_release_tton, INNER_RELEASE_TTON, 1),
+ PL2_PMU_EVENT_ATTR(inner_release_bton, INNER_RELEASE_BTON, 1),
+ PL2_PMU_EVENT_ATTR(inner_release_data_ttob, INNER_RELEASE_DATA_TTOB, 1),
+ PL2_PMU_EVENT_ATTR(inner_release_data_tton, INNER_RELEASE_DATA_TTON, 1),
+ PL2_PMU_EVENT_ATTR(inner_release_data_bton, INNER_RELEASE_DATA_BTON, 1),
+ PL2_PMU_EVENT_ATTR(outer_probe_block_tot, OUTER_PROBE_BLOCK_TOT, 1),
+ PL2_PMU_EVENT_ATTR(outer_probe_block_tob, OUTER_PROBE_BLOCK_TOB, 1),
+ PL2_PMU_EVENT_ATTR(outer_probe_block_ton, OUTER_PROBE_BLOCK_TON, 1),
+
+ PL2_PMU_EVENT_ATTR(inner_put_full_data_hit, INNER_PUT_FULL_DATA_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_put_partial_data_hit, INNER_PUT_PARTIAL_DATA_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_arithmetic_data_hit, INNER_ARITHMETIC_DATA_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_get_hit, INNER_GET_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_prefetch_read_hit, INNER_PREFETCH_READ_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_acquire_block_ntob_hit, INNER_ACQUIRE_BLOCK_NTOB_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_acquire_perm_ntot_hit, INNER_ACQUIRE_PERM_NTOT_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_release_ttob_hit, INNER_RELEASE_TTOB_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_release_data_ttob_hit, INNER_RELEASE_DATA_TTOB_HIT, 2),
+ PL2_PMU_EVENT_ATTR(outer_probe_block_tot_hit, OUTER_PROBE_BLOCK_TOT_HIT, 2),
+ PL2_PMU_EVENT_ATTR(inner_put_full_data_hit_shared, INNER_PUT_FULL_DATA_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(inner_put_partial_data_hit_shared, INNER_PUT_PARTIAL_DATA_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(inner_arithmetic_data_hit_shared, INNER_ARITHMETIC_DATA_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(inner_get_hit_shared, INNER_GET_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(inner_prefetch_read_hit_shared, INNER_PREFETCH_READ_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(inner_acquire_block_hit_shared, INNER_ACQUIRE_BLOCK_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(inner_acquire_perm_hit_shared, INNER_ACQUIRE_PERM_NTOT_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(outer_probe_block_tot_hit_shared, OUTER_PROBE_BLOCK_TOT_HIT_SHARED, 2),
+ PL2_PMU_EVENT_ATTR(outer_probe_block_tot_hit_dirty, OUTER_PROBE_BLOCK_TOT_HIT_DIRTY, 2),
+
+ PL2_PMU_EVENT_ATTR(outer_acquire_block_ntob, OUTER_ACQUIRE_BLOCK_NTOB, 3),
+ PL2_PMU_EVENT_ATTR(outer_acquire_block_ntot, OUTER_ACQUIRE_BLOCK_NTOT, 3),
+ PL2_PMU_EVENT_ATTR(outer_acquire_block_btot, OUTER_ACQUIRE_BLOCK_BTOT, 3),
+ PL2_PMU_EVENT_ATTR(outer_acquire_perm_ntot, OUTER_ACQUIRE_PERM_NTOT, 3),
+ PL2_PMU_EVENT_ATTR(outer_acquire_perm_btot, OUTER_ACQUIRE_PERM_BTOT, 3),
+ PL2_PMU_EVENT_ATTR(outer_release_ttob, OUTER_RELEARE_TTOB, 3),
+ PL2_PMU_EVENT_ATTR(outer_release_tton, OUTER_RELEARE_TTON, 3),
+ PL2_PMU_EVENT_ATTR(outer_release_bton, OUTER_RELEARE_BTON, 3),
+ PL2_PMU_EVENT_ATTR(outer_release_data_ttob, OUTER_RELEARE_DATA_TTOB, 3),
+ PL2_PMU_EVENT_ATTR(outer_release_data_tton, OUTER_RELEARE_DATA_TTON, 3),
+ PL2_PMU_EVENT_ATTR(outer_release_data_bton, OUTER_RELEARE_DATA_BTON, 3),
+ PL2_PMU_EVENT_ATTR(inner_probe_block_tot, INNER_PROBE_BLOCK_TOT, 3),
+ PL2_PMU_EVENT_ATTR(inner_probe_block_tob, INNER_PROBE_BLOCK_TOB, 3),
+ PL2_PMU_EVENT_ATTR(inner_probe_block_ton, INNER_PROBE_BLOCK_TON, 3),
+
+ PL2_PMU_EVENT_ATTR(inner_hint_hits_mshr, INNER_HINT_HITS_MSHR, 4),
+ NULL
+};
+
+static struct attribute_group sifive_u74_l2_pmu_events_group = {
+ .name = "events",
+ .attrs = sifive_u74_l2_pmu_events,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+
+static const struct attribute_group *sifive_u74_l2_pmu_attr_grps[] = {
+ &sifive_u74_l2_pmu_format_group,
+ &sifive_u74_l2_pmu_events_group,
+ NULL,
+};
+
+/*
+ * Low-level functions: reading and writing counters
+ */
+
+static inline u64 read_counter(int idx)
+{
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+
+ if (WARN_ON_ONCE(idx < 0 || idx > ptr->counters))
+ return -EINVAL;
+
+ return readq(ptr->event_counter_base + idx * 8);
+}
+
+static inline void write_counter(int idx, u64 val)
+{
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+
+ writeq(val, ptr->event_counter_base + idx * 8);
+}
+
+/*
+ * pmu->read: read and update the counter
+ */
+static void sifive_u74_l2_pmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_raw_count, new_raw_count;
+ u64 oldval;
+ int idx = hwc->idx;
+ u64 delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = read_counter(idx);
+
+ oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count);
+ } while (oldval != prev_raw_count);
+
+ /* delta is the value to update the counter we maintain in the kernel. */
+ delta = (new_raw_count - prev_raw_count) & SIFIVE_PL2_COUNTER_MASK;
+ local64_add(delta, &event->count);
+}
+
+/*
+ * State transition functions:
+ *
+ * stop()/start() & add()/del()
+ */
+
+/*
+ * pmu->stop: stop the counter
+ */
+static void sifive_u74_l2_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+
+ /* Disable this counter to count events */
+ writeq(0, ptr->event_select_base + (hwc->idx * 8));
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ sifive_u74_l2_pmu_read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+/*
+ * pmu->start: start the event.
+ */
+static void sifive_u74_l2_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+ perf_event_update_userpage(event);
+
+ /* Set initial value 0 */
+ local64_set(&hwc->prev_count, 0);
+ write_counter(hwc->idx, 0);
+
+ /* Enable counter to count these events */
+ writeq(hwc->config, ptr->event_select_base + (hwc->idx * 8));
+}
+
+/*
+ * pmu->add: add the event to PMU.
+ */
+static int sifive_u74_l2_pmu_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+ int idx;
+ u64 config = event->attr.config;
+ u64 set = config & 0xff;
+ u64 ev_type = config >> 8;
+
+ /* Check if this is a valid set and event. */
+ switch (set) {
+ case 1:
+ if (ev_type >= (BIT_ULL(PL2_PMU_MAX_EVENT1_IDX)))
+ return -ENOENT;
+ break;
+ case 2:
+ if (ev_type >= (BIT_ULL(PL2_PMU_MAX_EVENT2_IDX)))
+ return -ENOENT;
+ break;
+ case 3:
+ if (ev_type >= (BIT_ULL(PL2_PMU_MAX_EVENT3_IDX)))
+ return -ENOENT;
+ break;
+ case 4:
+ if (ev_type >= (BIT_ULL(PL2_PMU_MAX_EVENT4_IDX)))
+ return -ENOENT;
+ break;
+ case 0:
+ default:
+ return -ENOENT;
+ }
+
+ idx = find_first_zero_bit(ptr->used_mask, ptr->counters);
+ /* The counters are all in use. */
+ if (idx == ptr->counters)
+ return -EAGAIN;
+
+ set_bit(idx, ptr->used_mask);
+
+ /* Found an available counter idx for this event. */
+ hwc->idx = idx;
+ ptr->events[hwc->idx] = event;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ sifive_u74_l2_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+/*
+ * pmu->del: delete the event from PMU.
+ */
+static void sifive_u74_l2_pmu_del(struct perf_event *event, int flags)
+{
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Stop the counter and release this counter. */
+ ptr->events[hwc->idx] = NULL;
+ sifive_u74_l2_pmu_stop(event, PERF_EF_UPDATE);
+ clear_bit(hwc->idx, ptr->used_mask);
+ perf_event_update_userpage(event);
+}
+
+/*
+ * Event Initialization/Finalization
+ */
+
+static int sifive_u74_l2_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't allocate hw counter yet. */
+ hwc->idx = -1;
+ hwc->config = event->attr.config;
+
+ return 0;
+}
+
+/*
+ * Initialization
+ */
+
+static struct pmu sifive_u74_l2_generic_pmu = {
+ .name = "sifive_u74_l2_pmu",
+ .task_ctx_nr = perf_sw_context,
+ .event_init = sifive_u74_l2_pmu_event_init,
+ .add = sifive_u74_l2_pmu_add,
+ .del = sifive_u74_l2_pmu_del,
+ .start = sifive_u74_l2_pmu_start,
+ .stop = sifive_u74_l2_pmu_stop,
+ .read = sifive_u74_l2_pmu_read,
+ .attr_groups = sifive_u74_l2_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+};
+
+static struct sifive_u74_l2_pmu sifive_u74_l2_pmu = {
+ .pmu = &sifive_u74_l2_generic_pmu,
+};
+
+/*
+ * PM notifer for suspend to ram
+ */
+#ifdef CONFIG_CPU_PM
+static int sifive_u74_l2_pmu_pm_notify(struct notifier_block *b, unsigned long cmd,
+ void *v)
+{
+ struct sifive_u74_l2_pmu_event *ptr = this_cpu_ptr(&sifive_u74_l2_pmu_event);
+ struct perf_event *event;
+ int idx;
+ int enabled_event = bitmap_weight(ptr->used_mask, ptr->counters);
+
+ if (!enabled_event)
+ return NOTIFY_OK;
+
+ for (idx = 0; idx < ptr->counters; idx++) {
+ event = ptr->events[idx];
+ if (!event)
+ continue;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* Stop and update the counter */
+ sifive_u74_l2_pmu_stop(event, PERF_EF_UPDATE);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ /* Restore and enable the counter */
+ sifive_u74_l2_pmu_start(event, PERF_EF_RELOAD);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sifive_u74_l2_pmu_pm_notifier_block = {
+ .notifier_call = sifive_u74_l2_pmu_pm_notify,
+};
+
+static inline void sifive_u74_l2_pmu_pm_init(void)
+{
+ cpu_pm_register_notifier(&sifive_u74_l2_pmu_pm_notifier_block);
+}
+
+#else
+static inline void sifive_u74_l2_pmu_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+int sifive_u74_l2_pmu_probe(struct device_node *pl2_node,
+ void __iomem *pl2_base, int cpu)
+{
+ struct sifive_u74_l2_pmu_event *ptr = per_cpu_ptr(&sifive_u74_l2_pmu_event, cpu);
+ int ret;
+
+ /* Get counter numbers. */
+ ptr->counters = SIFIVE_PL2_PMU_MAX_COUNTERS;
+ pr_info("perfmon-counters: 64 for CPU %d\n", cpu);
+
+ /* Allocate perf_event. */
+ ptr->events = kcalloc(ptr->counters, sizeof(struct perf_event), GFP_KERNEL);
+ if (!ptr->events)
+ return -ENOMEM;
+
+ ptr->event_select_base = pl2_base + SIFIVE_PL2_SELECT_BASE_OFFSET;
+ ptr->event_counter_base = pl2_base + SIFIVE_PL2_COUNTER_BASE_OFFSET;
+
+ if (!pl2pmu_init_done) {
+ ret = perf_pmu_register(sifive_u74_l2_pmu.pmu, sifive_u74_l2_pmu.pmu->name, -1);
+ if (ret) {
+ pr_err("Failed to register sifive_u74_l2_pmu.pmu: %d\n", ret);
+ return ret;
+ }
+ sifive_u74_l2_pmu_pm_init();
+ pl2pmu_init_done = true;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sifive_u74_l2_pmu_probe);
diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
index 42f88279366f..31e9977d24ba 100644
--- a/drivers/soc/sifive/sifive_ccache.c
+++ b/drivers/soc/sifive/sifive_ccache.c
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
#include <asm/page.h>
+#include "sifive_pl2.h"
#include <soc/sifive/sifive_ccache.h>
#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
@@ -263,7 +264,7 @@ static int __init sifive_ccache_init(void)
{
struct device_node *np;
struct resource res;
- int i, rc, intr_num;
+ int i, rc, intr_num, cpu;
u64 __maybe_unused offset;
np = of_find_matching_node(NULL, sifive_ccache_ids);
@@ -315,6 +316,16 @@ static int __init sifive_ccache_init(void)
ccache_config_read();
+ if (IS_ENABLED(CONFIG_SIFIVE_U74_L2_PMU)) {
+ for_each_cpu(cpu, cpu_possible_mask) {
+ rc = sifive_u74_l2_pmu_probe(np, ccache_base, cpu);
+ if (rc) {
+ pr_err("Failed to probe sifive_u74_l2_pmu driver.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
ccache_cache_ops.get_priv_group = ccache_get_priv_group;
riscv_set_cacheinfo_ops(&ccache_cache_ops);
diff --git a/drivers/soc/sifive/sifive_pl2.h b/drivers/soc/sifive/sifive_pl2.h
new file mode 100644
index 000000000000..472b6a335bb1
--- /dev/null
+++ b/drivers/soc/sifive/sifive_pl2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 SiFive, Inc.
+ *
+ */
+
+#ifndef _SIFIVE_PL2_H
+#define _SIFIVE_PL2_H
+
+#define SIFIVE_PL2_PMCLIENT_OFFSET 0x2800
+
+int sifive_u74_l2_pmu_probe(struct device_node *pl2_node,
+ void __iomem *pl2_base, int cpu);
+#endif /*_SIFIVE_PL2_H */