Move source tree closer to the 2.6.32.17 mainline
/arch/x86/kernel/pvclock.c
blob:03801f2f761fc312ba80353425c1cc7243a0105f -> blob:dfdfe4662e0508073fbc72f946d006c1ac299497
--- arch/x86/kernel/pvclock.c
+++ arch/x86/kernel/pvclock.c
@@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvc
return pv_tsc_khz;
}
+static atomic64_t last_value = ATOMIC64_INIT(0);
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
unsigned version;
cycle_t ret, offset;
+ u64 last;
do {
version = pvclock_get_time_values(&shadow, src);
@@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct
barrier();
} while (version != src->version);
+ /*
+ * Assumption here is that last_value, a global accumulator, always goes
+ * forward. If we are less than that, we should not be much smaller.
+ * We assume there is an error marging we're inside, and then the correction
+ * does not sacrifice accuracy.
+ *
+ * For reads: global may have changed between test and return,
+ * but this means someone else updated poked the clock at a later time.
+ * We just need to make sure we are not seeing a backwards event.
+ *
+ * For updates: last_value = ret is not enough, since two vcpus could be
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+ last = atomic64_read(&last_value);
+ do {
+ if (ret < last)
+ return last;
+ last = atomic64_cmpxchg(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
return ret;
}