timekeeping: Use cached ntp_tick_length when accumulating error
authorJohn Stultz <john.stultz@linaro.org>
Thu, 24 Apr 2014 03:53:29 +0000 (20:53 -0700)
committerJohn Stultz <john.stultz@linaro.org>
Wed, 23 Jul 2014 22:01:57 +0000 (15:01 -0700)
By caching the ntp_tick_length() when we correct the frequency error,
and then using that cached value to accumulate error, we avoid large
initial errors when the tick length is changed.

This makes convergence happen much faster in the simulator, since the
initial error doesn't have to be slowly whittled away.

This initially seems like an accounting error, but Miroslav pointed out
that ntp_tick_length() can change mid-tick, so when we apply it in the
error accumulation, we are applying any recent change to the entire tick.

This approach chooses to apply changes in the ntp_tick_length() only to
the next tick, which allows us to calculate the freq correction before
using the new tick length, which avoids accummulating error.

Credit to Miroslav for pointing this out and providing the original patch
this functionality has been pulled out from, along with the rational.

Cc: Miroslav Lichvar <mlichvar@redhat.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Prarit Bhargava <prarit@redhat.com>
Reported-by: Miroslav Lichvar <mlichvar@redhat.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
include/linux/timekeeper_internal.h
kernel/time/timekeeping.c

index f7ac48d2edf50e3886b37cd22d3ba64fe3b919f5..e9660e52dc090610cd43a368d03e1cc442114dfe 100644 (file)
@@ -90,6 +90,15 @@ struct timekeeper {
        u64                     xtime_interval;
        s64                     xtime_remainder;
        u32                     raw_interval;
+       /* The ntp_tick_length() value currently being used.
+        * This cached copy ensures we consistently apply the tick
+        * length for an entire tick, as ntp_tick_length may change
+        * mid-tick, and we don't want to apply that new value to
+        * the tick in progress.
+        */
+       u64                     ntp_tick;
+       /* Difference between accumulated time and NTP time in ntp
+        * shifted nano seconds. */
        s64                     ntp_error;
        u32                     ntp_error_shift;
        u32                     ntp_err_mult;
index 43c706a7a72895bcefd8fa9fc8c671e28c1a1009..f36b02838a4772a7ddaf4064e95a27ab22a6de3f 100644 (file)
@@ -171,6 +171,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 
        tk->ntp_error = 0;
        tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
+       tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
 
        /*
         * The timekeeper keeps its own mult values for the currently
@@ -1352,6 +1353,8 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
        if (tk->ntp_err_mult)
                xinterval -= tk->cycle_interval;
 
+       tk->ntp_tick = ntp_tick_length();
+
        /* Calculate current error per tick */
        tick_error = ntp_tick_length() >> tk->ntp_error_shift;
        tick_error -= (xinterval + tk->xtime_remainder);
@@ -1497,7 +1500,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
        tk->raw_time.tv_nsec = raw_nsecs;
 
        /* Accumulate error between NTP and clock interval */
-       tk->ntp_error += ntp_tick_length() << shift;
+       tk->ntp_error += tk->ntp_tick << shift;
        tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
                                                (tk->ntp_error_shift + shift);