diff options
-rw-r--r-- | kernel/time/timekeeping.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index eff0b1e96331..59b896621bfd 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -988,7 +988,8 @@ static void timekeeping_adjust(s64 offset) * * Returns the unconsumed cycles. */ -static cycle_t logarithmic_accumulation(cycle_t offset, int shift) +static cycle_t logarithmic_accumulation(cycle_t offset, int shift, + unsigned int *clock_set) { u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; u64 raw_nsecs; @@ -1010,7 +1011,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) timekeeper.xtime.tv_sec += leap; timekeeper.wall_to_monotonic.tv_sec -= leap; if (leap) - clock_was_set_delayed(); + *clock_set = 1; } /* Accumulate raw time */ @@ -1042,6 +1043,7 @@ static void update_wall_time(void) struct clocksource *clock; cycle_t offset; int shift = 0, maxshift; + unsigned int clock_set = 0; unsigned long flags; write_seqlock_irqsave(&timekeeper.lock, flags); @@ -1077,7 +1079,7 @@ static void update_wall_time(void) maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; shift = min(shift, maxshift); while (offset >= timekeeper.cycle_interval) { - offset = logarithmic_accumulation(offset, shift); + offset = logarithmic_accumulation(offset, shift, &clock_set); if(offset < timekeeper.cycle_interval<<shift) shift--; } @@ -1131,7 +1133,7 @@ static void update_wall_time(void) timekeeper.xtime.tv_sec += leap; timekeeper.wall_to_monotonic.tv_sec -= leap; if (leap) - clock_was_set_delayed(); + clock_set = 1; } timekeeping_update(false); @@ -1139,6 +1141,8 @@ static void update_wall_time(void) out: write_sequnlock_irqrestore(&timekeeper.lock, flags); + if (clock_set) + clock_was_set_delayed(); } /** |