From fba9e07208c0f9d92d9f73761c99c8612039da44 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 11 Mar 2015 21:16:40 -0700 Subject: clocksource: Rename __clocksource_updatefreq_*() to __clocksource_update_freq_*() Ingo requested this function be renamed to improve readability, so I've renamed __clocksource_updatefreq_scale() as well as the __clocksource_updatefreq_hz/khz() functions to avoid squishedtogethernames. This touches some of the sh clocksources, which I've not tested. The arch/arm/plat-omap change is just a comment change for consistency. Signed-off-by: John Stultz Cc: Daniel Lezcano Cc: Dave Jones Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Prarit Bhargava Cc: Richard Cochran Cc: Stephen Boyd Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1426133800-29329-13-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar --- arch/arm/plat-omap/counter_32k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 61b4d705c267..43cf74561cfd 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -103,7 +103,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase) /* * 120000 rough estimate from the calculations in - * __clocksource_updatefreq_scale. + * __clocksource_update_freq_scale. */ clocks_calc_mult_shift(&persistent_mult, &persistent_shift, 32768, NSEC_PER_SEC, 120000); -- cgit v1.2.3 From 77e32c89a7117614ab3d66d20c1088de721abfaa Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 27 Feb 2015 17:21:33 +0530 Subject: clockevents: Manage device's state separately for the core 'enum clock_event_mode' is used for two purposes today: - to pass mode to the driver of clockevent device::set_mode(). - for managing state of the device for clockevents core. For supporting new modes/states we have moved away from the legacy set_mode() callback to new per-mode/state callbacks. New modes/states shouldn't be exposed to the legacy (now OBSOLOTE) callbacks and so we shouldn't add new states to 'enum clock_event_mode'. Lets have separate enums for the two use cases mentioned above. Keep using the earlier enum for legacy set_mode() callback and mark it OBSOLETE. And add another enum to clearly specify the possible states of a clockevent device. This also renames the newly added per-mode callbacks to reflect state changes. We haven't got rid of 'mode' member of 'struct clock_event_device' as it is used by some of the clockevent drivers and it would automatically die down once we migrate those drivers to the new interface. It ('mode') is only updated now for the drivers using the legacy interface. Suggested-by: Peter Zijlstra Suggested-by: Ingo Molnar Signed-off-by: Viresh Kumar Acked-by: Peter Zijlstra Cc: Daniel Lezcano Cc: Frederic Weisbecker Cc: Kevin Hilman Cc: Preeti U Murthy Cc: linaro-kernel@lists.linaro.org Cc: linaro-networking@linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/b6b0143a8a57bd58352ad35e08c25424c879c0cb.1425037853.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- arch/arm/common/bL_switcher.c | 8 ++-- include/linux/clockchips.h | 44 +++++++++++++------ kernel/time/clockevents.c | 99 +++++++++++++++++++++++-------------------- kernel/time/tick-broadcast.c | 20 ++++----- kernel/time/tick-common.c | 7 +-- kernel/time/tick-oneshot.c | 6 +-- kernel/time/timer_list.c | 12 +++--- 7 files changed, 111 insertions(+), 85 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 6eaddc47c43d..d4f970a4d255 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -152,7 +152,7 @@ static int bL_switch_to(unsigned int new_cluster_id) unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; struct completion inbound_alive; struct tick_device *tdev; - enum clock_event_mode tdev_mode; + enum clock_event_state tdev_state; long volatile *handshake_ptr; int ipi_nr, ret; @@ -223,8 +223,8 @@ static int bL_switch_to(unsigned int new_cluster_id) if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) tdev = NULL; if (tdev) { - tdev_mode = tdev->evtdev->mode; - clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); + tdev_state = tdev->evtdev->state; + clockevents_set_state(tdev->evtdev, CLOCK_EVT_STATE_SHUTDOWN); } ret = cpu_pm_enter(); @@ -252,7 +252,7 @@ static int bL_switch_to(unsigned int new_cluster_id) ret = cpu_pm_exit(); if (tdev) { - clockevents_set_mode(tdev->evtdev, tdev_mode); + clockevents_set_state(tdev->evtdev, tdev_state); clockevents_program_event(tdev->evtdev, tdev->evtdev->next_event, 1); } diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index a41749543d48..e20232c3320a 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -32,15 +32,31 @@ enum clock_event_nofitiers { struct clock_event_device; struct module; -/* Clock event mode commands */ +/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ enum clock_event_mode { CLOCK_EVT_MODE_UNUSED = 0, CLOCK_EVT_MODE_SHUTDOWN, CLOCK_EVT_MODE_PERIODIC, CLOCK_EVT_MODE_ONESHOT, CLOCK_EVT_MODE_RESUME, +}; - /* Legacy ->set_mode() callback doesn't support below modes */ +/* + * Possible states of a clock event device. + * + * DETACHED: Device is not used by clockevents core. Initial state or can be + * reached from SHUTDOWN. + * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. + * PERIODIC: Device is programmed to generate events periodically. Can be + * reached from DETACHED or SHUTDOWN. + * ONESHOT: Device is programmed to generate event only once. Can be reached + * from DETACHED or SHUTDOWN. + */ +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN, + CLOCK_EVT_STATE_PERIODIC, + CLOCK_EVT_STATE_ONESHOT, }; /* @@ -80,13 +96,14 @@ enum clock_event_mode { * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) - * @mode: operating mode assigned by the management code + * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE + * @state: current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. - * @set_mode_periodic: switch mode to periodic, if !set_mode - * @set_mode_oneshot: switch mode to oneshot, if !set_mode - * @set_mode_shutdown: switch mode to shutdown, if !set_mode + * @set_state_periodic: switch state to periodic, if !set_mode + * @set_state_oneshot: switch state to oneshot, if !set_mode + * @set_state_shutdown: switch state to shutdown, if !set_mode * @tick_resume: resume clkevt device, if !set_mode * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration @@ -111,20 +128,21 @@ struct clock_event_device { u32 mult; u32 shift; enum clock_event_mode mode; + enum clock_event_state state; unsigned int features; unsigned long retries; /* - * Mode transition callback(s): Only one of the two groups should be + * State transition callback(s): Only one of the two groups should be * defined: * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. - * - set_mode_{shutdown|periodic|oneshot|resume}(). + * - set_state_{shutdown|periodic|oneshot}(), tick_resume(). */ void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); - int (*set_mode_periodic)(struct clock_event_device *); - int (*set_mode_oneshot)(struct clock_event_device *); - int (*set_mode_shutdown)(struct clock_event_device *); + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *mask); @@ -177,8 +195,8 @@ extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); extern void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new); -extern void clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode); +extern void clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state); extern int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1b0ea63de69c..6e53e9a0c2e8 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -94,44 +94,49 @@ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) } EXPORT_SYMBOL_GPL(clockevent_delta2ns); -static int __clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode) +static int __clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state) { /* Transition with legacy set_mode() callback */ if (dev->set_mode) { /* Legacy callback doesn't support new modes */ - if (mode > CLOCK_EVT_MODE_ONESHOT) + if (state > CLOCK_EVT_STATE_ONESHOT) return -ENOSYS; - dev->set_mode(mode, dev); + /* + * 'clock_event_state' and 'clock_event_mode' have 1-to-1 + * mapping until *_ONESHOT, and so a simple cast will work. + */ + dev->set_mode((enum clock_event_mode)state, dev); + dev->mode = (enum clock_event_mode)state; return 0; } if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; - /* Transition with new mode-specific callbacks */ - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: + /* Transition with new state-specific callbacks */ + switch (state) { + case CLOCK_EVT_STATE_DETACHED: /* * This is an internal state, which is guaranteed to go from - * SHUTDOWN to UNUSED. No driver interaction required. + * SHUTDOWN to DETACHED. No driver interaction required. */ return 0; - case CLOCK_EVT_MODE_SHUTDOWN: - return dev->set_mode_shutdown(dev); + case CLOCK_EVT_STATE_SHUTDOWN: + return dev->set_state_shutdown(dev); - case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_STATE_PERIODIC: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) return -ENOSYS; - return dev->set_mode_periodic(dev); + return dev->set_state_periodic(dev); - case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_STATE_ONESHOT: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return -ENOSYS; - return dev->set_mode_oneshot(dev); + return dev->set_state_oneshot(dev); default: return -ENOSYS; @@ -139,26 +144,26 @@ static int __clockevents_set_mode(struct clock_event_device *dev, } /** - * clockevents_set_mode - set the operating mode of a clock event device + * clockevents_set_state - set the operating state of a clock event device * @dev: device to modify - * @mode: new mode + * @state: new state * * Must be called with interrupts disabled ! */ -void clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode) +void clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state) { - if (dev->mode != mode) { - if (__clockevents_set_mode(dev, mode)) + if (dev->state != state) { + if (__clockevents_set_state(dev, state)) return; - dev->mode = mode; + dev->state = state; /* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ - if (mode == CLOCK_EVT_MODE_ONESHOT) { + if (state == CLOCK_EVT_STATE_ONESHOT) { if (unlikely(!dev->mult)) { dev->mult = 1; WARN_ON(1); @@ -173,7 +178,7 @@ void clockevents_set_mode(struct clock_event_device *dev, */ void clockevents_shutdown(struct clock_event_device *dev) { - clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); + clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); dev->next_event.tv64 = KTIME_MAX; } @@ -185,13 +190,12 @@ int clockevents_tick_resume(struct clock_event_device *dev) { int ret = 0; - if (dev->set_mode) + if (dev->set_mode) { dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); - else if (dev->tick_resume) - ret = dev->tick_resume(dev); - - if (likely(!ret)) dev->mode = CLOCK_EVT_MODE_RESUME; + } else if (dev->tick_resume) { + ret = dev->tick_resume(dev); + } return ret; } @@ -248,7 +252,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev) delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); - if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) return 0; dev->retries++; @@ -285,7 +289,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev) delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); - if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) return 0; dev->retries++; @@ -317,7 +321,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, dev->next_event = expires; - if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) return 0; /* Shortcut for clockevent devices that can deal with ktime. */ @@ -362,7 +366,7 @@ static int clockevents_replace(struct clock_event_device *ced) struct clock_event_device *dev, *newdev = NULL; list_for_each_entry(dev, &clockevent_devices, list) { - if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) + if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED) continue; if (!tick_check_replacement(newdev, dev)) @@ -388,7 +392,7 @@ static int clockevents_replace(struct clock_event_device *ced) static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) { /* Fast track. Device is unused */ - if (ced->mode == CLOCK_EVT_MODE_UNUSED) { + if (ced->state == CLOCK_EVT_STATE_DETACHED) { list_del_init(&ced->list); return 0; } @@ -438,30 +442,30 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) } EXPORT_SYMBOL_GPL(clockevents_unbind); -/* Sanity check of mode transition callbacks */ +/* Sanity check of state transition callbacks */ static int clockevents_sanity_check(struct clock_event_device *dev) { /* Legacy set_mode() callback */ if (dev->set_mode) { /* We shouldn't be supporting new modes now */ - WARN_ON(dev->set_mode_periodic || dev->set_mode_oneshot || - dev->set_mode_shutdown || dev->tick_resume); + WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || + dev->set_state_shutdown || dev->tick_resume); return 0; } if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; - /* New mode-specific callbacks */ - if (!dev->set_mode_shutdown) + /* New state-specific callbacks */ + if (!dev->set_state_shutdown) return -EINVAL; if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && - !dev->set_mode_periodic) + !dev->set_state_periodic) return -EINVAL; if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && - !dev->set_mode_oneshot) + !dev->set_state_oneshot) return -EINVAL; return 0; @@ -478,6 +482,9 @@ void clockevents_register_device(struct clock_event_device *dev) BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); BUG_ON(clockevents_sanity_check(dev)); + /* Initialize state to DETACHED */ + dev->state = CLOCK_EVT_STATE_DETACHED; + if (!dev->cpumask) { WARN_ON(num_possible_cpus() > 1); dev->cpumask = cpumask_of(smp_processor_id()); @@ -541,11 +548,11 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq); - if (dev->mode == CLOCK_EVT_MODE_ONESHOT) + if (dev->state == CLOCK_EVT_STATE_ONESHOT) return clockevents_program_event(dev, dev->next_event, false); - if (dev->mode == CLOCK_EVT_MODE_PERIODIC) - return __clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); + if (dev->state == CLOCK_EVT_STATE_PERIODIC) + return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); return 0; } @@ -601,13 +608,13 @@ void clockevents_exchange_device(struct clock_event_device *old, */ if (old) { module_put(old->owner); - clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); + clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED); list_del(&old->list); list_add(&old->list, &clockevents_released); } if (new) { - BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); + BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED); clockevents_shutdown(new); } local_irq_restore(flags); @@ -693,7 +700,7 @@ int clockevents_notify(unsigned long reason, void *arg) if (cpumask_test_cpu(cpu, dev->cpumask) && cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { - BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); + BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED); list_del(&dev->list); } } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 542d5bb5c13d..f0f8ee9dbc28 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -303,7 +303,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) /* * The device is in periodic mode. No reprogramming necessary: */ - if (dev->mode == CLOCK_EVT_MODE_PERIODIC) + if (dev->state == CLOCK_EVT_STATE_PERIODIC) goto unlock; /* @@ -532,8 +532,8 @@ static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, { int ret; - if (bc->mode != CLOCK_EVT_MODE_ONESHOT) - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + if (bc->state != CLOCK_EVT_STATE_ONESHOT) + clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); ret = clockevents_program_event(bc, expires, force); if (!ret) @@ -543,7 +543,7 @@ static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, int tick_resume_broadcast_oneshot(struct clock_event_device *bc) { - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); return 0; } @@ -562,8 +562,8 @@ void tick_check_oneshot_broadcast_this_cpu(void) * switched over, leave the device alone. */ if (td->mode == TICKDEV_MODE_ONESHOT) { - clockevents_set_mode(td->evtdev, - CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(td->evtdev, + CLOCK_EVT_STATE_ONESHOT); } } } @@ -666,7 +666,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, if (dev->next_event.tv64 < bc->next_event.tv64) return; } - clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); + clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); } static void broadcast_move_bc(int deadcpu) @@ -741,7 +741,7 @@ int tick_broadcast_oneshot_control(unsigned long reason) cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); } else { if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); /* * The cpu which was handling the broadcast * timer marked this cpu in the broadcast @@ -842,7 +842,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { - int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; + int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC; bc->event_handler = tick_handle_oneshot_broadcast; @@ -858,7 +858,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_broadcast_oneshot_mask, tmpmask); if (was_periodic && !cpumask_empty(tmpmask)) { - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); tick_broadcast_init_next_event(tmpmask, tick_next_period); tick_broadcast_set_event(bc, cpu, tick_next_period, 1); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 5c50664c21d7..a5b877130ae9 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -102,7 +102,7 @@ void tick_handle_periodic(struct clock_event_device *dev) tick_periodic(cpu); - if (dev->mode != CLOCK_EVT_MODE_ONESHOT) + if (dev->state != CLOCK_EVT_STATE_ONESHOT) return; for (;;) { /* @@ -140,7 +140,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && !tick_broadcast_oneshot_active()) { - clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); + clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); } else { unsigned long seq; ktime_t next; @@ -150,7 +150,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) next = tick_next_period; } while (read_seqretry(&jiffies_lock, seq)); - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); for (;;) { if (!clockevents_program_event(dev, next, false)) @@ -365,6 +365,7 @@ void tick_shutdown(unsigned int *cpup) * Prevent that the clock events layer tries to call * the set mode function! */ + dev->state = CLOCK_EVT_STATE_DETACHED; dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); dev->event_handler = clockevents_handle_noop; diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 7ce740e78e1b..67a64b1670bf 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -38,7 +38,7 @@ void tick_resume_oneshot(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(dev, ktime_get(), true); } @@ -50,7 +50,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, ktime_t next_event) { newdev->event_handler = handler; - clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(newdev, next_event, true); } @@ -81,7 +81,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) td->mode = TICKDEV_MODE_ONESHOT; dev->event_handler = handler; - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); tick_broadcast_switch_to_oneshot(); return 0; } diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 2b3e9393034d..05aa5590106a 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -233,21 +233,21 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) print_name_offset(m, dev->set_mode); SEQ_printf(m, "\n"); } else { - if (dev->set_mode_shutdown) { + if (dev->set_state_shutdown) { SEQ_printf(m, " shutdown: "); - print_name_offset(m, dev->set_mode_shutdown); + print_name_offset(m, dev->set_state_shutdown); SEQ_printf(m, "\n"); } - if (dev->set_mode_periodic) { + if (dev->set_state_periodic) { SEQ_printf(m, " periodic: "); - print_name_offset(m, dev->set_mode_periodic); + print_name_offset(m, dev->set_state_periodic); SEQ_printf(m, "\n"); } - if (dev->set_mode_oneshot) { + if (dev->set_state_oneshot) { SEQ_printf(m, " oneshot: "); - print_name_offset(m, dev->set_mode_oneshot); + print_name_offset(m, dev->set_state_oneshot); SEQ_printf(m, "\n"); } -- cgit v1.2.3 From 7270d11c56f594af4d166b2988421cd8ed933dc1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 25 Mar 2015 13:11:52 +0100 Subject: arm/bL_switcher: Kill tick suspend hackery Use the new tick_suspend/resume_local() and get rid of the homebrewn implementation of these in the ARM bL switcher. The check for the cpumask is completely pointless. There is no harm to suspend a per cpu tick device unconditionally. If that's a real issue then we fix it proper at the core level and not with some completely undocumented hacks in some random core code. Move the tick internals to the core code, now that this nuisance is gone. Signed-off-by: Thomas Gleixner [ rjw: Rebase, changelog ] Signed-off-by: Rafael J. Wysocki Cc: Nicolas Pitre Cc: Peter Zijlstra Cc: Russell King Link: http://lkml.kernel.org/r/1655112.Ws17YsMfN7@vostro.rjw.lan Signed-off-by: Ingo Molnar --- arch/arm/common/bL_switcher.c | 16 ++-------------- include/linux/clockchips.h | 6 ------ include/linux/tick.h | 19 ++++--------------- kernel/time/tick-common.c | 2 +- kernel/time/tick-internal.h | 5 +++++ kernel/time/tick-sched.h | 10 ++++++++++ 6 files changed, 22 insertions(+), 36 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index d4f970a4d255..37dc0fe1093f 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -151,8 +151,6 @@ static int bL_switch_to(unsigned int new_cluster_id) unsigned int mpidr, this_cpu, that_cpu; unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; struct completion inbound_alive; - struct tick_device *tdev; - enum clock_event_state tdev_state; long volatile *handshake_ptr; int ipi_nr, ret; @@ -219,13 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id) /* redirect GIC's SGIs to our counterpart */ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); - tdev = tick_get_device(this_cpu); - if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) - tdev = NULL; - if (tdev) { - tdev_state = tdev->evtdev->state; - clockevents_set_state(tdev->evtdev, CLOCK_EVT_STATE_SHUTDOWN); - } + tick_suspend_local(); ret = cpu_pm_enter(); @@ -251,11 +243,7 @@ static int bL_switch_to(unsigned int new_cluster_id) ret = cpu_pm_exit(); - if (tdev) { - clockevents_set_state(tdev->evtdev, tdev_state); - clockevents_program_event(tdev->evtdev, - tdev->evtdev->next_event, 1); - } + tick_resume_local(); trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); local_fiq_enable(); diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 50ce9750754f..3ac7e2d90374 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -198,12 +198,6 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) freq, minsec); } -/* Should be core only, but is abused by arm bl_switcher */ -extern void clockevents_set_state(struct clock_event_device *dev, - enum clock_event_state state); -extern int clockevents_program_event(struct clock_event_device *dev, - ktime_t expires, bool force); - extern void clockevents_suspend(void); extern void clockevents_resume(void); diff --git a/include/linux/tick.h b/include/linux/tick.h index a3d4d2840e7f..589868b09aff 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -11,30 +11,19 @@ #include #include -/* ARM BL switcher abuse support */ -#ifdef CONFIG_GENERIC_CLOCKEVENTS -enum tick_device_mode { - TICKDEV_MODE_PERIODIC, - TICKDEV_MODE_ONESHOT, -}; - -struct tick_device { - struct clock_event_device *evtdev; - enum tick_device_mode mode; -}; -extern struct tick_device *tick_get_device(int cpu); -#endif - #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void __init tick_init(void); extern void tick_freeze(void); extern void tick_unfreeze(void); -/* Should be core only, but XEN resume magic requires this */ +/* Should be core only, but ARM BL switcher requires it */ +extern void tick_suspend_local(void); +/* Should be core only, but XEN resume magic and ARM BL switcher require it */ extern void tick_resume_local(void); #else /* CONFIG_GENERIC_CLOCKEVENTS */ static inline void tick_init(void) { } static inline void tick_freeze(void) { } static inline void tick_unfreeze(void) { } +static inline void tick_suspend_local(void) { } static inline void tick_resume_local(void) { } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index da796d65d1fb..e28ba5c044c5 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -380,7 +380,7 @@ void tick_shutdown(unsigned int *cpup) * * No locks required. Nothing can change the per cpu device. */ -static void tick_suspend_local(void) +void tick_suspend_local(void) { struct tick_device *td = this_cpu_ptr(&tick_cpu_device); diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 6ba7bce732f2..5fc2dafabd58 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -28,6 +28,7 @@ extern bool tick_check_replacement(struct clock_event_device *curdev, struct clock_event_device *newdev); extern void tick_install_replacement(struct clock_event_device *dev); extern int tick_is_oneshot_available(void); +extern struct tick_device *tick_get_device(int cpu); extern int clockevents_tick_resume(struct clock_event_device *dev); /* Check, if the device is functional or a dummy for broadcast */ @@ -39,6 +40,10 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) extern void clockevents_shutdown(struct clock_event_device *dev); extern void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new); +extern void clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state); +extern int clockevents_program_event(struct clock_event_device *dev, + ktime_t expires, bool force); extern void clockevents_handle_noop(struct clock_event_device *dev); extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 930743249127..28b5da3e1a17 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -3,6 +3,16 @@ #include +enum tick_device_mode { + TICKDEV_MODE_PERIODIC, + TICKDEV_MODE_ONESHOT, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + enum tick_nohz_mode { NOHZ_MODE_INACTIVE, NOHZ_MODE_LOWRES, -- cgit v1.2.3 From a451570c008b9e19592e29f15cfd295bdf818c7a Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 1 Apr 2015 20:34:24 -0700 Subject: ARM: OMAP: 32k counter: Provide y2038-safe omap_read_persistent_clock() replacement As part of addressing "y2038 problem" for in-kernel uses, this patch adds the y2038-safe omap_read_persistent_clock64() using timespec64. Because we rely on some subsequent changes to convert arm multiarch support, omap_read_persistent_clock() will be removed then. Also remove the needless spinlock, because read_persistent_clock() doesn't run simultaneously. Signed-off-by: Xunlei Pang Signed-off-by: John Stultz Acked-by: Tony Lindgren Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1427945681-29972-5-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar --- arch/arm/plat-omap/counter_32k.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 43cf74561cfd..b7b7b0793228 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -44,24 +44,20 @@ static u64 notrace omap_32k_read_sched_clock(void) } /** - * omap_read_persistent_clock - Return time from a persistent clock. + * omap_read_persistent_clock64 - Return time from a persistent clock. * * Reads the time from a source which isn't disabled during PM, the * 32k sync timer. Convert the cycles elapsed since last read into - * nsecs and adds to a monotonically increasing timespec. + * nsecs and adds to a monotonically increasing timespec64. */ -static struct timespec persistent_ts; +static struct timespec64 persistent_ts; static cycles_t cycles; static unsigned int persistent_mult, persistent_shift; -static DEFINE_SPINLOCK(read_persistent_clock_lock); -static void omap_read_persistent_clock(struct timespec *ts) +static void omap_read_persistent_clock64(struct timespec64 *ts) { unsigned long long nsecs; cycles_t last_cycles; - unsigned long flags; - - spin_lock_irqsave(&read_persistent_clock_lock, flags); last_cycles = cycles; cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0; @@ -69,11 +65,17 @@ static void omap_read_persistent_clock(struct timespec *ts) nsecs = clocksource_cyc2ns(cycles - last_cycles, persistent_mult, persistent_shift); - timespec_add_ns(&persistent_ts, nsecs); + timespec64_add_ns(&persistent_ts, nsecs); *ts = persistent_ts; +} + +static void omap_read_persistent_clock(struct timespec *ts) +{ + struct timespec64 ts64; - spin_unlock_irqrestore(&read_persistent_clock_lock, flags); + omap_read_persistent_clock64(&ts64); + *ts = timespec64_to_timespec(ts64); } /** -- cgit v1.2.3 From cb850717b076d979058d52529e15f1736359d811 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 1 Apr 2015 20:34:26 -0700 Subject: ARM, clocksource/drivers: Provide read_boot_clock64() and read_persistent_clock64() and use them As part of addressing "y2038 problem" for in-kernel uses, this patch converts read_boot_clock() to read_boot_clock64() and read_persistent_clock() to read_persistent_clock64() using timespec64 by converting clock_access_fn to use timespec64. Signed-off-by: Xunlei Pang Signed-off-by: John Stultz Acked-by: Thierry Reding (for tegra part) Cc: Russell King Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1427945681-29972-7-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/mach/time.h | 3 +-- arch/arm/kernel/time.c | 6 +++--- arch/arm/plat-omap/counter_32k.c | 10 +--------- drivers/clocksource/tegra20_timer.c | 10 +--------- 4 files changed, 6 insertions(+), 23 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 90c12e1e695c..0f79e4dec7f9 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -12,8 +12,7 @@ extern void timer_tick(void); -struct timespec; -typedef void (*clock_access_fn)(struct timespec *); +typedef void (*clock_access_fn)(struct timespec64 *); extern int register_persistent_clock(clock_access_fn read_boot, clock_access_fn read_persistent); diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 0cc7e58c47cc..a66e37e211a9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -76,7 +76,7 @@ void timer_tick(void) } #endif -static void dummy_clock_access(struct timespec *ts) +static void dummy_clock_access(struct timespec64 *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; @@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts) static clock_access_fn __read_persistent_clock = dummy_clock_access; static clock_access_fn __read_boot_clock = dummy_clock_access;; -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { __read_persistent_clock(ts); } -void read_boot_clock(struct timespec *ts) +void read_boot_clock64(struct timespec64 *ts) { __read_boot_clock(ts); } diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index b7b7b0793228..2438b96004c1 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -70,14 +70,6 @@ static void omap_read_persistent_clock64(struct timespec64 *ts) *ts = persistent_ts; } -static void omap_read_persistent_clock(struct timespec *ts) -{ - struct timespec64 ts64; - - omap_read_persistent_clock64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - /** * omap_init_clocksource_32k - setup and register counter 32k as a * kernel clocksource @@ -118,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase) } sched_clock_register(omap_32k_read_sched_clock, 32, 32768); - register_persistent_clock(NULL, omap_read_persistent_clock); + register_persistent_clock(NULL, omap_read_persistent_clock64); pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); return 0; diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index 4a0a603edecc..5a112d72fc2d 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -141,14 +141,6 @@ static void tegra_read_persistent_clock64(struct timespec64 *ts) *ts = persistent_ts; } -static void tegra_read_persistent_clock(struct timespec *ts) -{ - struct timespec ts64; - - tegra_read_persistent_clock64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - static unsigned long tegra_delay_timer_read_counter_long(void) { return readl(timer_reg_base + TIMERUS_CNTR_1US); @@ -259,7 +251,7 @@ static void __init tegra20_init_rtc(struct device_node *np) else clk_prepare_enable(clk); - register_persistent_clock(NULL, tegra_read_persistent_clock); + register_persistent_clock(NULL, tegra_read_persistent_clock64); } CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); -- cgit v1.2.3 From fa8589fe3bfafadd80677c8eabae97dc5dab22c0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Apr 2015 02:02:47 +0200 Subject: ARM: OMAP: Use explicit broadcast control function Replace the clockevents_notify() call with an explicit function call. Signed-off-by: Thomas Gleixner Signed-off-by: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Tony Lindgren Link: http://lkml.kernel.org/r/2124877.3nbWGILHCV@vostro.rjw.lan Signed-off-by: Ingo Molnar --- arch/arm/mach-omap2/cpuidle44xx.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 01e398a868bc..9284dc547263 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -184,8 +184,7 @@ fail: */ static void omap_setup_broadcast_timer(void *arg) { - int cpu = smp_processor_id(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); + tick_broadcast_enable(); } static struct cpuidle_driver omap4_idle_driver = { -- cgit v1.2.3 From fb7f0398a98020def9429ddd7b4a8fc2d948b092 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Apr 2015 02:31:29 +0200 Subject: ARM: OMAP: Use explicit broadcast oneshot control function Replace the clockevents_notify() call with an explicit function call. Signed-off-by: Thomas Gleixner Signed-off-by: Rafael J. Wysocki Cc: Peter Zijlstra Cc: Tony Lindgren Link: http://lkml.kernel.org/r/3123047.uVjevtxDV7@vostro.rjw.lan Signed-off-by: Ingo Molnar --- arch/arm/mach-omap2/cpuidle44xx.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 9284dc547263..57d429830e09 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -84,7 +84,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, { struct idle_statedata *cx = state_ptr + index; u32 mpuss_can_lose_context = 0; - int cpu_id = smp_processor_id(); /* * CPU0 has to wait and stay ON until CPU1 is OFF state. @@ -112,7 +111,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); + tick_broadcast_enter(); /* * Call idle CPU PM enter notifier chain so that @@ -169,7 +168,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + tick_broadcast_exit(); fail: cpuidle_coupled_parallel_barrier(dev, &abort_barrier); -- cgit v1.2.3 From a0b4122447a3c1a467ce4e4f1bb863e1170394d5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Apr 2015 02:32:14 +0200 Subject: ARM: Tegra: Use explicit broadcast oneshot control function Replace the clockevents_notify() call with an explicit function call. Signed-off-by: Thomas Gleixner Signed-off-by: Rafael J. Wysocki Cc: Alexandre Courbot Cc: Peter Zijlstra Cc: Stephen Warren Cc: Thierry Reding Link: http://lkml.kernel.org/r/2131111.rjxRLX1eZB@vostro.rjw.lan Signed-off-by: Ingo Molnar --- arch/arm/mach-tegra/cpuidle-tegra114.c | 6 +++--- arch/arm/mach-tegra/cpuidle-tegra20.c | 10 +++++----- arch/arm/mach-tegra/cpuidle-tegra30.c | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c index f2b586d7b15d..155807fa6fdd 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra114.c +++ b/arch/arm/mach-tegra/cpuidle-tegra114.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include @@ -44,7 +44,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, tegra_set_cpu_in_lp2(); cpu_pm_enter(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); + tick_broadcast_enter(); call_firmware_op(prepare_idle); @@ -52,7 +52,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, if (call_firmware_op(do_idle, 0) == -ENOSYS) cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + tick_broadcast_exit(); cpu_pm_exit(); tegra_clear_cpu_in_lp2(); diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index 4f25a7c7ca0f..48844ae6c3a1 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include #include @@ -136,11 +136,11 @@ static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev, if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready()) return false; - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); + tick_broadcast_enter(); tegra_idle_lp2_last(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + tick_broadcast_exit(); if (cpu_online(1)) tegra20_wake_cpu1_from_reset(); @@ -153,13 +153,13 @@ static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); + tick_broadcast_enter(); cpu_suspend(0, tegra20_sleep_cpu_secondary_finish); tegra20_cpu_clear_resettable(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + tick_broadcast_exit(); return true; } diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c index f8815ed65d9d..84d809a3cba3 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra30.c +++ b/arch/arm/mach-tegra/cpuidle-tegra30.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include #include @@ -76,11 +76,11 @@ static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, return false; } - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); + tick_broadcast_enter(); tegra_idle_lp2_last(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + tick_broadcast_exit(); return true; } @@ -90,13 +90,13 @@ static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); + tick_broadcast_enter(); smp_wmb(); cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + tick_broadcast_exit(); return true; } -- cgit v1.2.3