Patch 2.6.32.28 to 2.6.32.32
/kernel/smp.c
blob:ea5dc8f139371d420d00d39705d9d216fa89c48a -> blob:c9d1c7835c2fa150e154ae95d35e16364b8e385a
--- kernel/smp.c
+++ kernel/smp.c
@@ -193,24 +193,6 @@ void generic_smp_call_function_interrupt
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- /*
- * Since we walk the list without any locks, we might
- * see an entry that was completed, removed from the
- * list and is in the process of being reused.
- *
- * We must check that the cpu is in the cpumask before
- * checking the refs, and both must be set before
- * executing the callback on this cpu.
- */
-
- if (!cpumask_test_cpu(cpu, data->cpumask))
- continue;
-
- smp_rmb();
-
- if (atomic_read(&data->refs) == 0)
- continue;
-
if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
continue;
@@ -219,8 +201,6 @@ void generic_smp_call_function_interrupt
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
- WARN_ON(!cpumask_empty(data->cpumask));
-
spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
spin_unlock(&call_function.lock);
@@ -421,21 +401,11 @@ void smp_call_function_many(const struct
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
- BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
-
- /*
- * To ensure the interrupt handler gets an complete view
- * we order the cpumask and refs writes and order the read
- * of them in the interrupt handler. In addition we may
- * only clear our own cpu bit from the mask.
- */
- smp_wmb();
-
atomic_set(&data->refs, cpumask_weight(data->cpumask));
spin_lock_irqsave(&call_function.lock, flags);