--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1780,6 +1780,16 @@ config ALIGNMENT_TRAP
correct operation of some network protocols. With an IP-only
configuration it is safe to say N, otherwise say Y.
+config ALLOW_CPU_ALIGNMENT
+ bool "Allow CPU-based alignment handling"
+ default y if ARCH_MSM_SCORPION || ARCH_MSM_SCORPIONMP
+ help
+ Advanced ARM processors, such as the Cortex series and ARMv7-based
+ CPUS are capable of performing unaligned accesses for many types of
+ memory accesses. Typically, using a cpu-based alignment fixup is
+ faster than doing such a fixup in software. For best performance
+ on advanced CPUs, say Y here.
+
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user() (EXPERIMENTAL)"
depends on MMU && EXPERIMENTAL
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -17,7 +17,7 @@ endif
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
-#KBUILD_CFLAGS +=-pipe
+KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
KBUILD_CFLAGS +=$(call cc-option,-marm,)
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -60,10 +60,10 @@ __after_proc_init:
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
-#ifdef CONFIG_ALIGNMENT_TRAP
- orr r0, r0, #CR_A
-#else
+#ifdef CONFIG_ALLOW_CPU_ALIGNMENT
bic r0, r0, #CR_A
+#else
+ orr r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -348,10 +348,10 @@ __secondary_data:
* r13 = *virtual* address to jump to upon completion
*/
__enable_mmu:
-#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
- orr r0, r0, #CR_A
-#else
+#ifdef CONFIG_ALLOW_CPU_ALIGNMENT
bic r0, r0, #CR_A
+#else
+ orr r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -143,6 +143,10 @@ SYSCALL_DEFINE1(syncfs, int, fd)
int ret;
int fput_needed;
+#ifdef CONFIG_DYNAMIC_FSYNC
+ if (!early_suspend_active)
+ return 0;
+#endif
file = fget_light(fd, &fput_needed);
if (!file)
return -EBADF;
@@ -261,6 +265,10 @@ SYSCALL_DEFINE1(fdatasync, unsigned int,
*/
int generic_write_sync(struct file *file, loff_t pos, loff_t count)
{
+#ifdef CONFIG_DYNAMIC_FSYNC
+ if (!early_suspend_active)
+ return 0;
+#endif
if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
return 0;
return vfs_fsync_range(file, pos, pos + count - 1,
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -122,10 +122,10 @@ extern void get_avenrun(unsigned long *l
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
-#define LOAD_FREQ (4*HZ+61) /* 5 sec intervals */
-#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
-#define EXP_5 2014 /* 1/exp(5sec/5min) */
-#define EXP_15 2037 /* 1/exp(5sec/15min) */
+#define LOAD_FREQ (4*HZ+61) /* 4.61 sec intervals */
+#define EXP_1 1896 /* 1/exp(4.61sec/1min) as fixed-point */
+#define EXP_5 2017 /* 1/exp(4.61sec/5min) */
+#define EXP_15 2038 /* 1/exp(4.61sec/15min) */
#define CALC_LOAD(load,exp,n) \
load *= exp; \
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -47,7 +47,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, 1)
/*
* Use arch dependent cpu power functions
*/
-SCHED_FEAT(ARCH_POWER, 0)
+SCHED_FEAT(ARCH_POWER, 1)
SCHED_FEAT(HRTICK, 0)
SCHED_FEAT(DOUBLE_TICK, 0)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2234,7 +2234,7 @@ EXPORT_SYMBOL(kmem_cache_free);
* take the list_lock.
*/
static int slub_min_order;
-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+static int slub_max_order;
static int slub_min_objects;
/*