Patch 2.6.32.33 to 2.6.32.35
/arch/powerpc/kernel/perf_event.c
blob:87f1663584b054182b3f7719fe96088e637fb78c -> blob:c3f7ac76231e5aec5919ea836abf172bff6fc74b
--- arch/powerpc/kernel/perf_event.c
+++ arch/powerpc/kernel/perf_event.c
@@ -1220,6 +1220,28 @@ unsigned long perf_instruction_pointer(s
return ip;
}
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
+ return true;
+
+ /*
+ * Events on POWER7 can roll back if a speculative event doesn't
+ * eventually complete. Unfortunately in some rare cases they will
+ * raise a performance monitor exception. We need to catch this to
+ * ensure we reset the PMC. In all cases the PMC will be 256 or less
+ * cycles from overflow.
+ *
+ * We only do this if the first pass fails to find any overflowing
+ * PMCs because a user might set a period of less than 256 and we
+ * don't want to mistakenly reset them.
+ */
+ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+ return true;
+
+ return false;
+}
+
/*
* Performance monitor interrupt stuff
*/
@@ -1267,7 +1289,7 @@ static void perf_event_interrupt(struct
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
- if ((int)val < 0)
+ if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}