diff options
author | Anna-Maria Gleixner <anna-maria@linutronix.de> | 2017-12-21 11:41:56 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-01-16 03:01:20 +0100 |
commit | c458b1d102036eaa2c70e03000c959bd491c2037 (patch) | |
tree | 60311825bdc27386a846492cf9ef198a5a29a5af /kernel/time | |
parent | 98ecadd4305d8677ba77162152485798d47dcc85 (diff) |
hrtimer: Prepare handling of hard and softirq based hrtimers
The softirq based hrtimer can utilize most of the existing hrtimers
functions, but need to operate on a different data set.
Add an 'active_mask' parameter to various functions so the hard and soft bases
can be selected. Fixup the existing callers and hand in the ACTIVE_HARD
mask.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: keescook@chromium.org
Link: http://lkml.kernel.org/r/20171221104205.7269-28-anna-maria@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/hrtimer.c | 38 |
1 files changed, 29 insertions, 9 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index e2353f5cdf51..ba4674e9adc2 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -60,6 +60,15 @@ #include "tick-internal.h" /* + * Masks for selecting the soft and hard context timers from + * cpu_base->active + */ +#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT) +#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1) +#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) +#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) + +/* * The timer bases: * * There are more clockids than hrtimer bases. Thus, we index @@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, return expires_next; } -static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) +/* + * Recomputes cpu_base::*next_timer and returns the earliest expires_next but + * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. + * + * @active_mask must be one of: + * - HRTIMER_ACTIVE, + * - HRTIMER_ACTIVE_SOFT, or + * - HRTIMER_ACTIVE_HARD. + */ +static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, + unsigned int active_mask) { - unsigned int active = cpu_base->active_bases; + unsigned int active; ktime_t expires_next = KTIME_MAX; cpu_base->next_timer = NULL; + active = cpu_base->active_bases & active_mask; expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); return expires_next; @@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) { ktime_t expires_next; - expires_next = __hrtimer_get_next_event(cpu_base); + expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); if (skip_equal && expires_next == cpu_base->expires_next) return; @@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void) raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!__hrtimer_hres_active(cpu_base)) - expires = __hrtimer_get_next_event(cpu_base); + expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); @@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, } static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, - unsigned long flags) + unsigned long flags, unsigned int active_mask) { struct hrtimer_clock_base *base; - unsigned int active = cpu_base->active_bases; + unsigned int active = cpu_base->active_bases & active_mask; for_each_active_base(base, cpu_base, active) { struct timerqueue_node *node; @@ -1314,10 +1334,10 @@ retry: */ cpu_base->expires_next = KTIME_MAX; - __hrtimer_run_queues(cpu_base, now, flags); + __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); /* Reevaluate the clock bases for the next expiry */ - expires_next = __hrtimer_get_next_event(cpu_base); + expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); /* * Store the new expiry value so the migration code can verify * against it. @@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void) raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); - __hrtimer_run_queues(cpu_base, now, flags); + __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); } |