summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-05-30 23:15:43 +0200
committerThomas Gleixner <tglx@linutronix.de>2017-06-04 15:40:24 +0200
commit80105cd0e62ba8a2caf8eebd52f42952c7c04046 (patch)
tree07a96085c4edea3adb431799fc82f1967a37f601 /kernel/time
parentaf888d677a3f4473c198b4720319dd037f398b51 (diff)
downloadop-kernel-dev-80105cd0e62ba8a2caf8eebd52f42952c7c04046.zip
op-kernel-dev-80105cd0e62ba8a2caf8eebd52f42952c7c04046.tar.gz
posix-timers: Move interval out of the union
Preparatory patch to unify the alarm timer and hrtimer based posix interval timer handling. The interval is used as a criteria for rearming decisions so moving it out of the clock specific data structures allows later unification. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: John Stultz <john.stultz@linaro.org> Link: http://lkml.kernel.org/r/20170530211656.563922908@linutronix.de
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/alarmtimer.c13
-rw-r--r--kernel/time/posix-timers.c20
2 files changed, 16 insertions, 17 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 36855d6..5b8cf4b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -527,9 +527,8 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
}
/* Re-add periodic timers */
- if (ptr->it.alarm.interval) {
- ptr->it_overrun += alarm_forward(alarm, now,
- ptr->it.alarm.interval);
+ if (ptr->it_interval) {
+ ptr->it_overrun += alarm_forward(alarm, now, ptr->it_interval);
result = ALARMTIMER_RESTART;
}
spin_unlock_irqrestore(&ptr->it_lock, flags);
@@ -613,7 +612,7 @@ static void alarm_timer_get(struct k_itimer *timr,
cur_setting->it_value.tv_nsec = 0;
}
- cur_setting->it_interval = ktime_to_timespec64(timr->it.alarm.interval);
+ cur_setting->it_interval = ktime_to_timespec64(timr->it_interval);
}
/**
@@ -662,14 +661,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
return TIMER_RETRY;
/* start the timer */
- timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval);
+ timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
/*
* Rate limit to the tick as a hot fix to prevent DOS. Will be
* mopped up later.
*/
- if (timr->it.alarm.interval < TICK_NSEC)
- timr->it.alarm.interval = TICK_NSEC;
+ if (timr->it_interval < TICK_NSEC)
+ timr->it_interval = TICK_NSEC;
exp = timespec64_to_ktime(new_setting->it_value);
/* Convert (if necessary) to absolute time */
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 79a00e0..7dd992c 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -285,12 +285,12 @@ static void schedule_next_timer(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
- if (timr->it.real.interval == 0)
+ if (!timr->it_interval)
return;
timr->it_overrun += (unsigned int) hrtimer_forward(timer,
timer->base->get_time(),
- timr->it.real.interval);
+ timr->it_interval);
hrtimer_restart(timer);
}
@@ -375,7 +375,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
timr = container_of(timer, struct k_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
- if (timr->it.real.interval != 0)
+ if (timr->it_interval != 0)
si_private = ++timr->it_requeue_pending;
if (posix_timer_event(timr, si_private)) {
@@ -384,7 +384,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
* we will not get a call back to restart it AND
* it should be restarted.
*/
- if (timr->it.real.interval != 0) {
+ if (timr->it_interval != 0) {
ktime_t now = hrtimer_cb_get_time(timer);
/*
@@ -413,13 +413,13 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
ktime_t kj = NSEC_PER_SEC / HZ;
- if (timr->it.real.interval < kj)
+ if (timr->it_interval < kj)
now = ktime_add(now, kj);
}
#endif
timr->it_overrun += (unsigned int)
hrtimer_forward(timer, now,
- timr->it.real.interval);
+ timr->it_interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
}
@@ -631,7 +631,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
memset(cur_setting, 0, sizeof(*cur_setting));
- iv = timr->it.real.interval;
+ iv = timr->it_interval;
/* interval timer ? */
if (iv)
@@ -732,7 +732,7 @@ common_timer_set(struct k_itimer *timr, int flags,
common_timer_get(timr, old_setting);
/* disable the timer */
- timr->it.real.interval = 0;
+ timr->it_interval = 0;
/*
* careful here. If smp we could be in the "fire" routine which will
* be spinning as we hold the lock. But this is ONLY an SMP issue.
@@ -755,7 +755,7 @@ common_timer_set(struct k_itimer *timr, int flags,
hrtimer_set_expires(timer, timespec64_to_ktime(new_setting->it_value));
/* Convert interval */
- timr->it.real.interval = timespec64_to_ktime(new_setting->it_interval);
+ timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
/* SIGEV_NONE timers are not queued ! See common_timer_get */
if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
@@ -820,7 +820,7 @@ retry:
static int common_timer_del(struct k_itimer *timer)
{
- timer->it.real.interval = 0;
+ timer->it_interval = 0;
if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
return TIMER_RETRY;
OpenPOWER on IntegriCloud