--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -878,6 +878,7 @@ and is between 256 and 4096 characters.
i8042.panicblink=
[HW] Frequency with which keyboard LEDs should blink
when kernel panics (default is 0.5 sec)
+ i8042.notimeout [HW] Ignore timeout condition signalled by conroller
i8042.reset [HW] Reset the controller during init and cleanup
i8042.unlock [HW] Unlock (ignore) the keylock
@@ -2579,6 +2580,10 @@ and is between 256 and 4096 characters.
disables clocksource verification at runtime.
Used to enable high-resolution timer mode on older
hardware, and in virtualized environment.
+ [x86] noirqtime: Do not use TSC to do irq accounting.
+ Used to run time disable IRQ_TIME_ACCOUNTING on any
+ platforms where RDTSC is slow and this accounting
+ can add overhead.
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 32
-EXTRAVERSION = .28-ck2
+EXTRAVERSION = .32-ck2
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -389,7 +389,9 @@ setup_return(struct pt_regs *regs, struc
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
- unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
+ unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+
+ cpsr |= PSR_ENDSTATE;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
--- a/arch/ia64/include/asm/system.h
+++ b/arch/ia64/include/asm/system.h
@@ -281,10 +281,6 @@ void cpu_idle_wait(void);
void default_idle(void);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *
unsigned int i;
unsigned long flags;
- for (i = 0; i < count && i < 79;) {
+ for (i = 0; i < count;) {
switch(str[i]) {
case '\n':
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
goto print;
- case '\b': /* BS */
- i--; /* overwrite last */
default:
iodc_dbuf[i] = str[i];
i++;
@@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *
}
}
- /* if we're at the end of line, and not already inserting a newline,
- * insert one anyway. iodc console doesn't claim to support >79 char
- * lines. don't account for this in the return value.
- */
- if (i == 79 && iodc_dbuf[i-1] != '\n') {
- iodc_dbuf[i+0] = '\r';
- iodc_dbuf[i+1] = '\n';
- }
-
print:
spin_lock_irqsave(&pdc_lock, flags);
real32_call(PAGE0->mem_cons.iodc_io,
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -540,10 +540,6 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
extern struct dentry *powerpc_debugfs_root;
#endif /* __KERNEL__ */
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -18,7 +18,7 @@
#include <asm/mmu.h>
_GLOBAL(__setup_cpu_603)
- mflr r4
+ mflr r5
BEGIN_MMU_FTR_SECTION
li r10,0
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_604)
- mflr r4
+ mflr r5
bl setup_common_caches
bl setup_604_hid0
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_750)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_750cx)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_750fx)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_7400)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_7400_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_7410)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_7410_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
li r3,0
mtspr SPRN_L2CR2,r3
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_745x)
- mflr r4
+ mflr r5
bl setup_common_caches
bl setup_745x_specifics
- mtlr r4
+ mtlr r5
blr
/* Enable caches for 603's, 604, 750 & 7400 */
@@ -194,10 +194,10 @@ setup_750cx:
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
- lwz r6,CPU_SPEC_FEATURES(r5)
+ lwz r6,CPU_SPEC_FEATURES(r4)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r5)
+ stw r6,CPU_SPEC_FEATURES(r4)
blr
/* 750fx specific
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
andis. r11,r11,L3CR_L3E@h
beq 1f
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
- lwz r6,CPU_SPEC_FEATURES(r5)
+ lwz r6,CPU_SPEC_FEATURES(r4)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r5)
+ stw r6,CPU_SPEC_FEATURES(r4)
1:
mfspr r11,SPRN_HID0
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -832,7 +832,6 @@ fsl_rio_dbell_handler(int irq, void *dev
if (dsr & DOORBELL_DSR_QFI) {
pr_info("RIO: doorbell queue full\n");
out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
- goto out;
}
/* XXX Need to check/dispatch until queue empty */
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -150,11 +150,6 @@ extern int kernel_thread(int (*fn)(void
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
-/*
- * Print register of task into buffer. Used in fs/proc/array.c.
- */
-extern void task_show_regs(struct seq_file *m, struct task_struct *task);
-
extern void show_code(struct pt_regs *regs);
unsigned long get_wchan(struct task_struct *p);
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -97,7 +97,6 @@ static inline void restore_access_regs(u
extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
-extern void account_system_vtime(struct task_struct *);
#ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -7,7 +7,7 @@
#define VDSO32_LBASE 0
#define VDSO64_LBASE 0
-#define VDSO_VERSION_STRING LINUX_2.6.26
+#define VDSO_VERSION_STRING LINUX_2.6.29
#ifndef __ASSEMBLY__
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -243,43 +243,6 @@ void show_regs(struct pt_regs *regs)
show_last_breaking_event(regs);
}
-/* This is called from fs/proc/array.c */
-void task_show_regs(struct seq_file *m, struct task_struct *task)
-{
- struct pt_regs *regs;
-
- regs = task_pt_regs(task);
- seq_printf(m, "task: %p, ksp: %p\n",
- task, (void *)task->thread.ksp);
- seq_printf(m, "User PSW : %p %p\n",
- (void *) regs->psw.mask, (void *)regs->psw.addr);
-
- seq_printf(m, "User GPRS: " FOURLONG,
- regs->gprs[0], regs->gprs[1],
- regs->gprs[2], regs->gprs[3]);
- seq_printf(m, " " FOURLONG,
- regs->gprs[4], regs->gprs[5],
- regs->gprs[6], regs->gprs[7]);
- seq_printf(m, " " FOURLONG,
- regs->gprs[8], regs->gprs[9],
- regs->gprs[10], regs->gprs[11]);
- seq_printf(m, " " FOURLONG,
- regs->gprs[12], regs->gprs[13],
- regs->gprs[14], regs->gprs[15]);
- seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
- task->thread.acrs[0], task->thread.acrs[1],
- task->thread.acrs[2], task->thread.acrs[3]);
- seq_printf(m, " %08x %08x %08x %08x\n",
- task->thread.acrs[4], task->thread.acrs[5],
- task->thread.acrs[6], task->thread.acrs[7]);
- seq_printf(m, " %08x %08x %08x %08x\n",
- task->thread.acrs[8], task->thread.acrs[9],
- task->thread.acrs[10], task->thread.acrs[11]);
- seq_printf(m, " %08x %08x %08x %08x\n",
- task->thread.acrs[12], task->thread.acrs[13],
- task->thread.acrs[14], task->thread.acrs[15]);
-}
-
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -753,6 +753,17 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
+config IRQ_TIME_ACCOUNTING
+ bool "Fine granularity task level IRQ time accounting"
+ default n
+ ---help---
+ Select this option to enable fine granularity task irq time
+ accounting. This is done by reading a timestamp on each
+ transitions between softirq and hardirq state, so there can be a
+ small performance impact.
+
+ If in doubt, say N here.
+
source "kernel/Kconfig.preempt"
config X86_UP_APIC
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -89,6 +89,7 @@ extern int acpi_ht;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_s
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_s
/* Re-load page tables */
load_cr3(next->pgd);
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
/*
* load the LDT, if the LDT is different:
*/
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -10,5 +10,6 @@ unsigned long pvclock_tsc_khz(struct pvc
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct pvclock_vcpu_time_info *vcpu,
struct timespec *ts);
+void pvclock_resume(void);
#endif /* _ASM_X86_PVCLOCK_H */
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_
*/
CMOS_WRITE(0, 0xf);
- *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+ *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -70,6 +70,7 @@ u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -360,10 +361,15 @@ acpi_parse_int_src_ovr(struct acpi_subta
return 0;
}
- if (acpi_skip_timer_override &&
- intsrc->source_irq == 0 && intsrc->global_irq == 2) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
- return 0;
+ if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ if (acpi_skip_timer_override) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
+ }
+ if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ }
}
mp_override_legacy_irq(intsrc->source_irq,
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -762,13 +762,21 @@ void set_mtrr_aps_delayed_init(void)
}
/*
- * MTRR initialization for all AP's
+ * Delayed MTRR initialization for all AP's
*/
void mtrr_aps_init(void)
{
if (!use_intel())
return;
+ /*
+ * Check if someone has requested the delay of AP MTRR initialization,
+ * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
+ * then we are done.
+ */
+ if (!mtrr_aps_delayed_init)
+ return;
+
set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
}
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -145,15 +145,10 @@ static void __init ati_bugs(int num, int
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
- u32 old, d;
+ u32 d;
- d = read_pci_config(num, slot, func, 0x70);
- old = d;
- d &= ~(1<<8);
- write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
- write_pci_config(num, slot, func, 0x70, old);
return d;
}
@@ -162,13 +157,16 @@ static void __init ati_bugs_contd(int nu
{
u32 d, rev;
- if (acpi_use_timer_override)
- return;
-
rev = ati_sbx00_rev(num, slot, func);
+ if (rev >= 0x40)
+ acpi_fix_pin2_polarity = 1;
+
if (rev > 0x13)
return;
+ if (acpi_use_timer_override)
+ return;
+
/* check for IRQ0 interrupt swap */
d = read_pci_config(num, slot, func, 0x64);
if (!(d & (1<<14)))
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -606,6 +606,8 @@ static void hpet_msi_capability_lookup(u
if (hpet_msi_disable)
return;
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ return;
id = hpet_readl(HPET_ID);
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
@@ -894,10 +896,8 @@ int __init hpet_enable(void)
if (id & HPET_ID_LEGSUP) {
hpet_legacy_clockevent_register();
- hpet_msi_capability_lookup(2);
return 1;
}
- hpet_msi_capability_lookup(0);
return 0;
out_nohpet:
@@ -930,12 +930,20 @@ static __init int hpet_late_init(void)
if (!hpet_virt_address)
return -ENODEV;
+ if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
+ hpet_msi_capability_lookup(2);
+ else
+ hpet_msi_capability_lookup(0);
+
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
hpet_print_config();
if (hpet_msi_disable)
return 0;
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ return 0;
+
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
}
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -111,6 +111,11 @@ unsigned long pvclock_tsc_khz(struct pvc
static atomic64_t last_value = ATOMIC64_INIT(0);
+void pvclock_resume(void)
+{
+ atomic64_set(&last_value, 0);
+}
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,10 +104,14 @@ int __init notsc_setup(char *str)
__setup("notsc", notsc_setup);
+static int no_sched_irq_time;
+
static int __init tsc_setup(char *str)
{
if (!strcmp(str, "reliable"))
tsc_clocksource_reliable = 1;
+ if (!strncmp(str, "noirqtime", 9))
+ no_sched_irq_time = 1;
return 1;
}
@@ -802,6 +806,7 @@ void mark_tsc_unstable(char *reason)
if (!tsc_unstable) {
tsc_unstable = 1;
sched_clock_stable = 0;
+ disable_sched_clock_irqtime();
printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
@@ -990,6 +995,9 @@ void __init tsc_init(void)
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
+ if (!no_sched_irq_time)
+ enable_sched_clock_irqtime();
+
lpj = ((u64)tsc_khz * 1000);
do_div(lpj, HZ);
lpj_fine = lpj;
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -424,6 +424,8 @@ void xen_timer_resume(void)
{
int cpu;
+ pvclock_resume();
+
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx
};
static struct ata_port_operations mpc52xx_ata_port_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
.sff_dev_select = mpc52xx_ata_dev_select,
.set_piomode = mpc52xx_ata_set_piomode,
.set_dmamode = mpc52xx_ata_set_dmamode,
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2408,7 +2408,7 @@ static void pkt_release_dev(struct pktcd
pkt_shrink_pktlist(pd);
}
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -139,6 +139,8 @@ struct hvc_iucv_private *hvc_iucv_get_pr
*
* This function allocates a new struct iucv_tty_buffer element and, optionally,
* allocates an internal data buffer with the specified size @size.
+ * The internal data buffer is always allocated with GFP_DMA which is
+ * required for receiving and sending data with IUCV.
* Note: The total message size arises from the internal buffer size and the
* members of the iucv_tty_msg structure.
* The function returns NULL if memory allocation has failed.
@@ -154,7 +156,7 @@ static struct iucv_tty_buffer *alloc_tty
if (size > 0) {
bufp->msg.length = MSG_SIZE(size);
- bufp->mbuf = kmalloc(bufp->msg.length, flags);
+ bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
if (!bufp->mbuf) {
mempool_free(bufp, hvc_iucv_mempool);
return NULL;
@@ -237,7 +239,7 @@ static int hvc_iucv_write(struct hvc_iuc
if (!rb->mbuf) { /* message not yet received ... */
/* allocate mem to store msg data; if no memory is available
* then leave the buffer on the list and re-try later */
- rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
+ rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
if (!rb->mbuf)
return -ENOMEM;
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -353,12 +353,14 @@ unsigned long tpm_calc_ordinal_duration(
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED)
+ if (duration_idx != TPM_UNDEFINED) {
duration = chip->vendor.duration[duration_idx];
- if (duration <= 0)
+ /* if duration is 0, it's because chip->vendor.duration wasn't */
+ /* filled yet, so we set the lowest timeout just to give enough */
+ /* time for tpm_get_timeouts() to succeed */
+ return (duration <= 0 ? HZ : duration);
+ } else
return 2 * 60 * HZ;
- else
- return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
@@ -564,9 +566,11 @@ duration:
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code)
- != 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
return;
+
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -910,6 +914,18 @@ ssize_t tpm_show_caps_1_2(struct device
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d\n",
+ jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
+}
+EXPORT_SYMBOL_GPL(tpm_show_timeouts);
+
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,8 @@ extern ssize_t tpm_show_owned(struct dev
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
+extern ssize_t tpm_show_timeouts(struct device *,
+ struct device_attribute *attr, char *);
struct tpm_chip;
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -354,6 +354,7 @@ static DEVICE_ATTR(temp_deactivated, S_I
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
@@ -363,7 +364,8 @@ static struct attribute *tis_attrs[] = {
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr, NULL,
+ &dev_attr_cancel.attr,
+ &dev_attr_timeouts.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -92,7 +92,10 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
depends on AGP_INTEL
+ # we need shmfs for the swappable backing store, and in particular
+ # the shmem_readpage() which depends upon tmpfs
select SHMEM
+ select TMPFS
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -540,7 +540,8 @@ int drm_modeset_ctl(struct drm_device *d
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -884,6 +884,14 @@ static const struct dmi_system_id intel_
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
.matches = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -152,6 +152,13 @@ static bool radeon_atom_apply_quirks(str
return false;
}
+ /* mac rv630, rv730, others */
+ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+ (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+ *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+ *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+ }
+
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
@@ -1210,7 +1217,7 @@ void radeon_atom_initialize_bios_scratch
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
/* tell the bios not to handle mode switching */
- bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -1261,10 +1268,13 @@ void radeon_atom_output_lock(struct drm_
else
bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
- if (lock)
+ if (lock) {
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
- else
+ bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+ } else {
bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+ bios_6_scratch |= ATOM_S6_ACC_MODE;
+ }
if (rdev->family >= CHIP_R600)
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -540,6 +540,10 @@ void radeon_compute_pll(struct radeon_pl
*frac_fb_div_p = best_frac_feedback_div;
*ref_div_p = best_ref_div;
*post_div_p = best_post_div;
+ DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+ best_ref_div, best_post_div);
+
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1297,6 +1297,7 @@ static const struct hid_device_id hid_bl
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -126,6 +126,8 @@ static const struct hid_device_id cp_dev
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ }
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -148,6 +148,7 @@
#define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE 0x7417
#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
+#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -687,6 +687,13 @@ static int __devexit via686a_remove(stru
return 0;
}
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+ int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+ data->fan_div[0] = (reg >> 4) & 0x03;
+ data->fan_div[1] = reg >> 6;
+}
+
static void __devinit via686a_init_device(struct via686a_data *data)
{
u8 reg;
@@ -700,6 +707,9 @@ static void __devinit via686a_init_devic
via686a_write_value(data, VIA686A_REG_TEMP_MODE,
(reg & ~VIA686A_TEMP_MODE_MASK)
| VIA686A_TEMP_MODE_CONTINUOUS);
+
+ /* Pre-read fan clock divisor values */
+ via686a_update_fan_div(data);
}
static struct via686a_data *via686a_update_device(struct device *dev)
@@ -751,9 +761,7 @@ static struct via686a_data *via686a_upda
(via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
0xc0) >> 6;
- i = via686a_read_value(data, VIA686A_REG_FANDIV);
- data->fan_div[0] = (i >> 4) & 0x03;
- data->fan_div[1] = i >> 6;
+ via686a_update_fan_div(data);
data->alarms =
via686a_read_value(data,
VIA686A_REG_ALARM1) |
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -745,6 +745,14 @@ static int i2c_do_del_adapter(struct dev
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
+ if (client && strcmp(client->name, "dummy"))
+ i2c_unregister_device(client);
+ return 0;
+}
+
+static int __unregister_dummy(struct device *dev, void *dummy)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
if (client)
i2c_unregister_device(client);
return 0;
@@ -793,8 +801,12 @@ int i2c_del_adapter(struct i2c_adapter *
}
/* Detach any active clients. This can't fail, thus we do not
- checking the returned value. */
+ * check the returned value. This is a two-pass process, because
+ * we can't remove the dummy devices during the first pass: they
+ * could have been instantiated by real devices wishing to clean
+ * them up properly, so we give them a chance to do that first. */
res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+ res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
#ifdef CONFIG_I2C_COMPAT
class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -55,6 +55,14 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
+/* MacbookAir3,2 (unibody), aka wellspring5 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
+/* MacbookAir3,1 (unibody), aka wellspring4 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -80,6 +88,14 @@ static const struct usb_device_id bcm597
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
+ /* MacbookAir3,2 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
+ /* MacbookAir3,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
/* Terminating entry */
{}
};
@@ -233,6 +249,30 @@ static const struct bcm5974_config bcm59
{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4616, 5112 },
+ { DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
+ },
{}
};
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -416,6 +416,13 @@ static const struct dmi_system_id __init
DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
},
+ {
+ /* Dell Vostro V13 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ },
+ },
{ }
};
@@ -537,6 +544,17 @@ static const struct dmi_system_id __init
};
#endif
+static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ {
+ /* Dell Vostro V13 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ },
+ },
+ { }
+};
+
/*
* Some Wistron based laptops need us to explicitly enable the 'Dritek
* keyboard extension' to make their extra keys start generating scancodes.
@@ -866,6 +884,9 @@ static int __init i8042_platform_init(vo
if (dmi_check_system(i8042_dmi_nomux_table))
i8042_nomux = true;
+ if (dmi_check_system(i8042_dmi_notimeout_table))
+ i8042_notimeout = true;
+
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
#endif /* CONFIG_X86 */
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -64,6 +64,10 @@ static unsigned int i8042_blink_frequenc
module_param_named(panicblink, i8042_blink_frequency, uint, 0600);
MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics");
+static bool i8042_notimeout;
+module_param_named(notimeout, i8042_notimeout, bool, 0);
+MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -434,7 +438,7 @@ static irqreturn_t i8042_interrupt(int i
} else {
dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
- ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
+ ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
port_no = (str & I8042_STR_AUXDATA) ?
I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -800,6 +800,16 @@ static void closecard(int cardnr)
ll_unload(csta);
}
+static irqreturn_t card_irq(int intno, void *dev_id)
+{
+ struct IsdnCardState *cs = dev_id;
+ irqreturn_t ret = cs->irq_func(intno, cs);
+
+ if (ret == IRQ_HANDLED)
+ cs->irq_cnt++;
+ return ret;
+}
+
static int init_card(struct IsdnCardState *cs)
{
int irq_cnt, cnt = 3, ret;
@@ -808,10 +818,10 @@ static int init_card(struct IsdnCardStat
ret = cs->cardmsg(cs, CARD_INIT, NULL);
return(ret);
}
- irq_cnt = kstat_irqs(cs->irq);
+ irq_cnt = cs->irq_cnt = 0;
printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
cs->irq, irq_cnt);
- if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) {
+ if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
cs->irq);
return 1;
@@ -821,8 +831,8 @@ static int init_card(struct IsdnCardStat
/* Timeout 10ms */
msleep(10);
printk(KERN_INFO "%s: IRQ %d count %d\n",
- CardType[cs->typ], cs->irq, kstat_irqs(cs->irq));
- if (kstat_irqs(cs->irq) == irq_cnt) {
+ CardType[cs->typ], cs->irq, cs->irq_cnt);
+ if (cs->irq_cnt == irq_cnt) {
printk(KERN_WARNING
"%s: IRQ(%d) getting no interrupts during init %d\n",
CardType[cs->typ], cs->irq, 4 - cnt);
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -959,6 +959,7 @@ struct IsdnCardState {
u_long event;
struct work_struct tqueue;
struct timer_list dbusytimer;
+ unsigned int irq_cnt;
#ifdef ERROR_STATISTIC
int err_crc;
int err_tx;
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -33,7 +33,6 @@ struct pgpath {
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
- struct work_struct deactivate_path;
struct work_struct activate_path;
};
@@ -113,7 +112,6 @@ static struct workqueue_struct *kmultipa
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
-static void deactivate_path(struct work_struct *work);
/*-----------------------------------------------
@@ -126,7 +124,6 @@ static struct pgpath *alloc_pgpath(void)
if (pgpath) {
pgpath->is_active = 1;
- INIT_WORK(&pgpath->deactivate_path, deactivate_path);
INIT_WORK(&pgpath->activate_path, activate_path);
}
@@ -138,14 +135,6 @@ static void free_pgpath(struct pgpath *p
kfree(pgpath);
}
-static void deactivate_path(struct work_struct *work)
-{
- struct pgpath *pgpath =
- container_of(work, struct pgpath, deactivate_path);
-
- blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
-}
-
static struct priority_group *alloc_priority_group(void)
{
struct priority_group *pg;
@@ -949,7 +938,6 @@ static int fail_path(struct pgpath *pgpa
pgpath->path.dev->name, m->nr_valid_paths);
schedule_work(&m->trigger_event);
- queue_work(kmultipathd, &pgpath->deactivate_path);
out:
spin_unlock_irqrestore(&m->lock, flags);
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -659,7 +659,7 @@ static void do_writes(struct mirror_set
/*
* Dispatch io.
*/
- if (unlikely(ms->log_failure)) {
+ if (unlikely(ms->log_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -643,10 +643,9 @@ void dm_rh_recovery_end(struct dm_region
spin_lock_irq(&rh->region_lock);
if (success)
list_add(®->list, ®->rh->recovered_regions);
- else {
- reg->state = DM_RH_NOSYNC;
+ else
list_add(®->list, ®->rh->failed_recovered_regions);
- }
+
spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context);
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1925,13 +1925,14 @@ static void event_callback(void *context
wake_up(&md->eventq);
}
+/*
+ * Protected by md->suspend_lock obtained by dm_swap_table().
+ */
static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
- mutex_lock(&md->bdev->bd_inode->i_mutex);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
static int __bind(struct mapped_device *md, struct dm_table *t,
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -305,6 +305,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
+ if (unit && MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1<<MdpMinorShift)-1);
+
retry:
spin_lock(&all_mddevs_lock);
@@ -4802,9 +4805,10 @@ static int add_new_disk(mddev_t * mddev,
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
- info->raid_disk < mddev->raid_disks)
+ info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
- else
+ set_bit(In_sync, &rdev->flags);
+ } else
rdev->raid_disk = -1;
} else
super_types[mddev->major_version].
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct inode *in
{
ca_slot_info_t *info=(ca_slot_info_t *)parg;
- if (info->num > 1)
+ if (info->num < 0 || info->num > 1)
return -EINVAL;
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -31,7 +31,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
-#include <linux/delay.h> /* udelay */
+#include <linux/delay.h> /* msleep */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
@@ -71,27 +71,17 @@ static struct rtrack rtrack_card;
/* local things */
-static void sleep_delay(long n)
-{
- /* Sleep nicely for 'n' uS */
- int d = n / msecs_to_jiffies(1000);
- if (!d)
- udelay(n);
- else
- msleep(jiffies_to_msecs(d));
-}
-
static void rt_decvol(struct rtrack *rt)
{
outb(0x58, rt->io); /* volume down + sigstr + on */
- sleep_delay(100000);
+ msleep(100);
outb(0xd8, rt->io); /* volume steady + sigstr + on */
}
static void rt_incvol(struct rtrack *rt)
{
outb(0x98, rt->io); /* volume up + sigstr + on */
- sleep_delay(100000);
+ msleep(100);
outb(0xd8, rt->io); /* volume steady + sigstr + on */
}
@@ -120,7 +110,7 @@ static int rt_setvol(struct rtrack *rt,
if (vol == 0) { /* volume = 0 means mute the card */
outb(0x48, rt->io); /* volume down but still "on" */
- sleep_delay(2000000); /* make sure it's totally down */
+ msleep(2000); /* make sure it's totally down */
outb(0xd0, rt->io); /* volume steady, off */
rt->curvol = 0; /* track the volume state! */
mutex_unlock(&rt->lock);
@@ -155,7 +145,7 @@ static void send_0_byte(struct rtrack *r
outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */
outb_p(128+64+16+8+2+1, rt->io); /* clock */
}
- sleep_delay(1000);
+ msleep(1);
}
static void send_1_byte(struct rtrack *rt)
@@ -169,7 +159,7 @@ static void send_1_byte(struct rtrack *r
outb_p(128+64+16+8+4+2+1, rt->io); /* clock */
}
- sleep_delay(1000);
+ msleep(1);
}
static int rt_setfreq(struct rtrack *rt, unsigned long freq)
@@ -423,7 +413,7 @@ static int __init rtrack_init(void)
/* this ensures that the volume is all the way down */
outb(0x48, rt->io); /* volume down but still "on" */
- sleep_delay(2000000); /* make sure it's totally down */
+ msleep(2000); /* make sure it's totally down */
outb(0xc0, rt->io); /* steady volume, mute card */
return 0;
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -1525,11 +1525,11 @@ struct em28xx_board em28xx_boards[] = {
.input = { {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = SAA7115_COMPOSITE0,
- .amux = EM28XX_AMUX_VIDEO2,
+ .amux = EM28XX_AMUX_LINE_IN,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = SAA7115_SVIDEO3,
- .amux = EM28XX_AMUX_VIDEO2,
+ .amux = EM28XX_AMUX_LINE_IN,
} },
},
[EM2860_BOARD_TERRATEC_AV350] = {
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -577,6 +577,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, E
}
static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+ fasync_helper(-1, filep, 0, &async_queue);
+ return 0;
+}
+
+static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
MPT_ADAPTER *ioc;
@@ -2778,6 +2785,7 @@ static const struct file_operations mptc
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
+ .release = mptctl_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1835,8 +1835,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
}
out:
- printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+ printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+ SCpnt, SCpnt->serial_number);
return retval;
}
@@ -1873,7 +1874,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SC
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
- retval = SUCCESS;
+ retval = 0;
goto out;
}
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00
idev->close = ucb1x00_ts_close;
__set_bit(EV_ABS, idev->evbit);
- __set_bit(ABS_X, idev->absbit);
- __set_bit(ABS_Y, idev->absbit);
- __set_bit(ABS_PRESSURE, idev->absbit);
input_set_drvdata(idev, ts);
+ ucb1x00_adc_enable(ts->ucb);
+ ts->x_res = ucb1x00_ts_read_xres(ts);
+ ts->y_res = ucb1x00_ts_read_yres(ts);
+ ucb1x00_adc_disable(ts->ucb);
+
+ input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
err = input_register_device(idev);
if (err)
goto fail;
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -165,6 +165,7 @@ struct be_drvr_stats {
ulong be_tx_jiffies;
u64 be_tx_bytes;
u64 be_tx_bytes_prev;
+ u64 be_tx_pkts;
u32 be_tx_rate;
u32 cache_barrier[16];
@@ -176,6 +177,7 @@ struct be_drvr_stats {
ulong be_rx_jiffies;
u64 be_rx_bytes;
u64 be_rx_bytes_prev;
+ u64 be_rx_pkts;
u32 be_rx_rate;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -146,13 +146,10 @@ void netdev_stats_update(struct be_adapt
struct net_device_stats *dev_stats = &adapter->stats.net_stats;
struct be_erx_stats *erx_stats = &hw_stats->erx;
- dev_stats->rx_packets = port_stats->rx_total_frames;
- dev_stats->tx_packets = port_stats->tx_unicastframes +
- port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
- dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
- (u64) port_stats->rx_bytes_lsd;
- dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
- (u64) port_stats->tx_bytes_lsd;
+ dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
+ dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
+ dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
+ dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -309,12 +306,13 @@ static void be_tx_rate_update(struct be_
}
static void be_tx_stats_update(struct be_adapter *adapter,
- u32 wrb_cnt, u32 copied, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
+ stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
if (stopped)
stats->be_tx_stops++;
}
@@ -462,7 +460,8 @@ static netdev_tx_t be_xmit(struct sk_buf
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
+ be_tx_stats_update(adapter, wrb_cnt, copied,
+ skb_shinfo(skb)->gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
@@ -605,6 +604,7 @@ static void be_rx_stats_update(struct be
stats->be_rx_compl++;
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
+ stats->be_rx_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -66,6 +66,13 @@ static void bond_na_send(struct net_devi
};
struct sk_buff *skb;
+ /* The Ethernet header is built in ndisc_send_skb(), not
+ * ndisc_build_skb(), so we cannot insert a VLAN tag. Only an
+ * out-of-line tag inserted by the hardware will work.
+ */
+ if (vlan_id && !(slave_dev->features & NETIF_F_HW_VLAN_TX))
+ return;
+
icmp6h.icmp6_router = router;
icmp6h.icmp6_solicited = 0;
icmp6h.icmp6_override = 1;
@@ -84,7 +91,7 @@ static void bond_na_send(struct net_devi
}
if (vlan_id) {
- skb = vlan_put_tag(skb, vlan_id);
+ skb = __vlan_hwaccel_put_tag(skb, vlan_id);
if (!skb) {
pr_err(DRV_NAME ": failed to insert VLAN tag\n");
return;
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -5186,7 +5186,8 @@ static int __devinit e1000_probe(struct
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if (eeprom_data & E1000_WUC_PHY_WAKE)
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -338,6 +338,7 @@ static enum ixgbe_media_type ixgbe_get_m
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_EM:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -100,6 +100,8 @@ static struct pci_device_id ixgbe_pci_tb
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
@@ -791,6 +793,7 @@ static bool ixgbe_clean_rx_irq(struct ix
break;
(*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -53,6 +53,7 @@
#define IXGBE_DEV_ID_82599_KR 0x1517
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -685,6 +685,9 @@ void netxen_p3_nic_set_multi(struct net_
struct list_head *head;
nx_mac_list_t *cur;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
list_splice_tail_init(&adapter->mac_list, &del_list);
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pci-aspm.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -3030,6 +3031,11 @@ rtl8169_init_one(struct pci_dev *pdev, c
mii->reg_num_mask = 0x1f;
mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -214,8 +214,8 @@ void ath_descdma_cleanup(struct ath_soft
/* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \
- (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
- (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
+ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
+ DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -662,12 +662,6 @@ static int prism2_config(struct pcmcia_d
link->dev_node = &hw_priv->node;
/*
- * Make sure the IRQ handler cannot proceed until at least
- * dev->base_addr is initialized.
- */
- spin_lock_irqsave(&local->irq_init_lock, flags);
-
- /*
* Allocate an interrupt line. Note that this does not assign a
* handler to the interrupt, unless the 'Handler' member of the
* irq structure is initialized.
@@ -690,9 +684,10 @@ static int prism2_config(struct pcmcia_d
CS_CHECK(RequestConfiguration,
pcmcia_request_configuration(link, &link->conf));
+ /* IRQ handler cannot proceed until at dev->base_addr is initialized */
+ spin_lock_irqsave(&local->irq_init_lock, flags);
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
-
spin_unlock_irqrestore(&local->irq_init_lock, flags);
/* Finally, report what we've done */
@@ -724,7 +719,6 @@ static int prism2_config(struct pcmcia_d
return ret;
cs_failed:
- spin_unlock_irqrestore(&local->irq_init_lock, flags);
cs_error(link, last_fn, last_ret);
failed:
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1044,6 +1044,9 @@ static void iwl_irq_tasklet_legacy(struc
/* only Re-enable if diabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1245,6 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_p
/* only Re-enable if diabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2358,9 +2364,10 @@ static void iwl_mac_stop(struct ieee8021
flush_workqueue(priv->workqueue);
- /* enable interrupts again in order to receive rfkill changes */
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
+ iwl_enable_rfkill_int(priv);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -3060,14 +3067,14 @@ static int iwl_pci_probe(struct pci_dev
* 8. Setup and register mac80211
**********************************/
- /* enable interrupts if needed: hw bug w/a */
+ /* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
}
- iwl_enable_interrupts(priv);
+ iwl_enable_rfkill_int(priv);
err = iwl_setup_mac(priv);
if (err)
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -160,6 +160,12 @@ static inline void iwl_disable_interrupt
IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
}
+static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
+{
+ IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
+ iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
static inline void iwl_enable_interrupts(struct iwl_priv *priv)
{
IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -198,6 +198,7 @@ static void p54p_check_rx_ring(struct ie
while (i != idx) {
u16 len;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
@@ -215,17 +216,20 @@ static void p54p_check_rx_ring(struct ie
len = priv->common.rx_mtu;
}
+ dma_addr = le32_to_cpu(desc->host_addr);
+ pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
- pci_unmap_single(priv->pdev,
- le32_to_cpu(desc->host_addr),
- priv->common.rx_mtu + 32,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
- desc->host_addr = 0;
+ desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
+ pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -95,6 +95,7 @@ static struct usb_device_id p54u_table[]
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -617,7 +617,7 @@ static void p54_tx_80211_header(struct p
else
*burst_possible = false;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2400,6 +2400,7 @@ static struct usb_device_id rt73usb_devi
{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -54,6 +54,9 @@ static int __init pci_stub_init(void)
subdevice = PCI_ANY_ID, class=0, class_mask=0;
int fields;
+ if (!strlen(id))
+ continue;
+
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -90,7 +90,7 @@ struct acer_quirks {
*/
#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
@@ -1065,7 +1065,7 @@ static ssize_t set_bool_threeg(struct de
return -EINVAL;
return count;
}
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
set_bool_threeg);
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1001,14 +1001,8 @@ static int asus_hotk_add_fs(struct acpi_
struct proc_dir_entry *proc;
mode_t mode;
- /*
- * If parameter uid or gid is not changed, keep the default setting for
- * our proc entries (-rw-rw-rw-) else, it means we care about security,
- * and then set to -rw-rw----
- */
-
if ((asus_uid == 0) && (asus_gid == 0)) {
- mode = S_IFREG | S_IRUGO | S_IWUGO;
+ mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -177,7 +177,7 @@ set_bool_##value(struct device *dev, str
return -EINVAL; \
return count; \
} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
show_bool_##value, set_bool_##value);
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -211,7 +211,7 @@ static int ds2760_battery_read_status(st
if (di->rem_capacity > 100)
di->rem_capacity = 100;
- if (di->current_uA >= 100L)
+ if (di->current_uA < -100L)
di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
/ (di->current_uA / 100L);
else
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
+#include <linux/pm.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -855,7 +856,7 @@ static void __exit cmos_do_remove(struct
#ifdef CONFIG_PM
-static int cmos_suspend(struct device *dev, pm_message_t mesg)
+static int cmos_suspend(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
@@ -902,7 +903,7 @@ static int cmos_suspend(struct device *d
*/
static inline int cmos_poweroff(struct device *dev)
{
- return cmos_suspend(dev, PMSG_HIBERNATE);
+ return cmos_suspend(dev);
}
static int cmos_resume(struct device *dev)
@@ -949,9 +950,9 @@ static int cmos_resume(struct device *de
return 0;
}
+static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+
#else
-#define cmos_suspend NULL
-#define cmos_resume NULL
static inline int cmos_poweroff(struct device *dev)
{
@@ -1087,7 +1088,7 @@ static void __exit cmos_pnp_remove(struc
static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
{
- return cmos_suspend(&pnp->dev, mesg);
+ return cmos_suspend(&pnp->dev);
}
static int cmos_pnp_resume(struct pnp_dev *pnp)
@@ -1167,8 +1168,9 @@ static struct platform_driver cmos_platf
.shutdown = cmos_platform_shutdown,
.driver = {
.name = (char *) driver_name,
- .suspend = cmos_suspend,
- .resume = cmos_resume,
+#ifdef CONFIG_PM
+ .pm = &cmos_pm_ops,
+#endif
}
};
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -701,6 +701,8 @@ static const struct scsi_dh_devlist alua
{"IBM", "2145" },
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
+ {"NETAPP", "LUN"},
+ {"AIX", "NVDISK"},
{NULL, NULL}
};
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -648,6 +648,7 @@ void sas_scsi_recover_host(struct Scsi_H
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+ shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __func__);
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1947,9 +1947,9 @@ _base_allocate_memory_pools(struct MPT2S
/* adjust hba_queue_depth, reply_free_queue_depth,
* and queue_size
*/
- ioc->hba_queue_depth -= queue_diff;
- ioc->reply_free_queue_depth -= queue_diff;
- queue_size -= queue_diff;
+ ioc->hba_queue_depth -= (queue_diff / 2);
+ ioc->reply_free_queue_depth -= (queue_diff / 2);
+ queue_size = facts->MaxReplyDescriptorPostQueueDepth;
}
ioc->reply_post_queue_depth = queue_size;
@@ -3595,6 +3595,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPT
static void
_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
{
+ mpt2sas_scsih_reset_handler(ioc, reset_phase);
+ mpt2sas_ctl_reset_handler(ioc, reset_phase);
switch (reset_phase) {
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
@@ -3625,8 +3627,6 @@ _base_reset_handler(struct MPT2SAS_ADAPT
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
break;
}
- mpt2sas_scsih_reset_handler(ioc, reset_phase);
- mpt2sas_ctl_reset_handler(ioc, reset_phase);
}
/**
@@ -3680,6 +3680,7 @@ mpt2sas_base_hard_reset_handler(struct M
{
int r;
unsigned long flags;
+ u8 pe_complete = ioc->wait_for_port_enable_to_complete;
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
__func__));
@@ -3701,6 +3702,14 @@ mpt2sas_base_hard_reset_handler(struct M
if (r)
goto out;
_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (pe_complete) {
+ r = -EFAULT;
+ goto out;
+ }
r = _base_make_ioc_operational(ioc, sleep_flag);
if (!r)
_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2585,9 +2585,6 @@ _scsih_check_topo_delete_events(struct M
u16 handle;
for (i = 0 ; i < event_data->NumEntries; i++) {
- if (event_data->PHY[i].PhyStatus &
- MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
- continue;
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
if (!handle)
continue;
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1040,6 +1040,12 @@ static unsigned int sd_completed_bytes(s
u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
u64 bad_lba;
int info_valid;
+ /*
+ * resid is optional but mostly filled in. When it's unused,
+ * its value is zero, so we assume the whole buffer transferred
+ */
+ unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
+ unsigned int good_bytes;
if (!blk_fs_request(scmd->request))
return 0;
@@ -1073,7 +1079,8 @@ static unsigned int sd_completed_bytes(s
/* This computation should always be done in terms of
* the resolution of the device's medium.
*/
- return (bad_lba - start_lba) * scmd->device->sector_size;
+ good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
+ return min(good_bytes, transferred);
}
/**
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -255,7 +255,8 @@ static const struct serial8250_config ua
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
},
[PORT_RSA] = {
.name = "RSA",
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -52,6 +52,7 @@ Devices: [JR3] PCI force sensor board (j
#define PCI_VENDOR_ID_JR3 0x1762
#define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
+#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
#define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
#define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
#define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
@@ -71,6 +72,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_p
{
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+ PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
@@ -807,6 +810,10 @@ static int jr3_pci_attach(struct comedi_
devpriv->n_channels = 1;
}
break;
+ case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
+ devpriv->n_channels = 1;
+ }
+ break;
case PCI_DEVICE_ID_JR3_2_CHANNEL:{
devpriv->n_channels = 2;
}
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -528,7 +528,8 @@ int labpc_common_attach(struct comedi_de
/* grab our IRQ */
if (irq) {
isr_flags = 0;
- if (thisboard->bustype == pci_bustype)
+ if (thisboard->bustype == pci_bustype
+ || thisboard->bustype == pcmcia_bustype)
isr_flags |= IRQF_SHARED;
if (request_irq(irq, labpc_interrupt, isr_flags,
driver_labpc.driver_name, dev)) {
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -378,6 +378,7 @@ static int blkvsc_probe(struct device *d
blkdev->gd->first_minor = 0;
blkdev->gd->fops = &block_ops;
blkdev->gd->private_data = blkdev;
+ blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
blkvsc_do_inquiry(blkdev);
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -296,6 +296,7 @@ static void netvsc_linkstatus_callback(s
if (status == 1) {
netif_carrier_on(net);
netif_wake_queue(net);
+ netif_notify_peers(net);
} else {
netif_carrier_off(net);
netif_stop_queue(net);
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -100,9 +100,6 @@ struct vhci_hcd {
* But, the index of this array begins from 0.
*/
struct vhci_device vdev[VHCI_NPORTS];
-
- /* vhci_device which has not been assiged its address yet */
- int pending_port;
};
@@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport);
void vhci_rx_loop(struct usbip_task *ut);
void vhci_tx_loop(struct usbip_task *ut);
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+ __u32 seqnum);
+
#define hardware (&the_controller->pdev.dev)
static inline struct vhci_device *port_to_vdev(__u32 port)
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -137,8 +137,6 @@ void rh_port_connect(int rhport, enum us
* the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
* spin_unlock(&the_controller->vdev[rhport].ud.lock); */
- the_controller->pending_port = rhport;
-
spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
@@ -558,6 +556,7 @@ static int vhci_urb_enqueue(struct usb_h
struct device *dev = &urb->dev->dev;
int ret = 0;
unsigned long flags;
+ struct vhci_device *vdev;
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
hcd, urb, mem_flags);
@@ -573,6 +572,18 @@ static int vhci_urb_enqueue(struct usb_h
return urb->status;
}
+ vdev = port_to_vdev(urb->dev->portnum-1);
+
+ /* refuse enqueue for dead connection */
+ spin_lock(&vdev->ud.lock);
+ if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
+ usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
+ spin_unlock(&vdev->ud.lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
+ return -ENODEV;
+ }
+ spin_unlock(&vdev->ud.lock);
+
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto no_need_unlink;
@@ -591,8 +602,6 @@ static int vhci_urb_enqueue(struct usb_h
__u8 type = usb_pipetype(urb->pipe);
struct usb_ctrlrequest *ctrlreq =
(struct usb_ctrlrequest *) urb->setup_packet;
- struct vhci_device *vdev =
- port_to_vdev(the_controller->pending_port);
if (type != PIPE_CONTROL || !ctrlreq) {
dev_err(dev, "invalid request to devnum 0\n");
@@ -606,7 +615,9 @@ static int vhci_urb_enqueue(struct usb_h
dev_info(dev, "SetAddress Request (%d) to port %d\n",
ctrlreq->wValue, vdev->rhport);
- vdev->udev = urb->dev;
+ if (vdev->udev)
+ usb_put_dev(vdev->udev);
+ vdev->udev = usb_get_dev(urb->dev);
spin_lock(&vdev->ud.lock);
vdev->ud.status = VDEV_ST_USED;
@@ -626,8 +637,9 @@ static int vhci_urb_enqueue(struct usb_h
"Get_Descriptor to device 0 "
"(get max pipe size)\n");
- /* FIXME: reference count? (usb_get_dev()) */
- vdev->udev = urb->dev;
+ if (vdev->udev)
+ usb_put_dev(vdev->udev);
+ vdev->udev = usb_get_dev(urb->dev);
goto out;
default:
@@ -798,27 +810,12 @@ static int vhci_urb_dequeue(struct usb_h
spin_unlock_irqrestore(&vdev->priv_lock, flags2);
}
-
- if (!vdev->ud.tcp_socket) {
- /* tcp connection is closed */
- usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
- urb);
-
- usb_hcd_unlink_urb_from_ep(hcd, urb);
-
- spin_unlock_irqrestore(&the_controller->lock, flags);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
- urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
- }
-
spin_unlock_irqrestore(&the_controller->lock, flags);
usbip_dbg_vhci_hc("leave\n");
return 0;
}
-
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
{
struct vhci_unlink *unlink, *tmp;
@@ -826,11 +823,34 @@ static void vhci_device_unlink_cleanup(s
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+ usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
list_del(&unlink->list);
kfree(unlink);
}
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
+ struct urb *urb;
+
+ /* give back URB of unanswered unlink request */
+ usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
+
+ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+ if (!urb) {
+ usbip_uinfo("the urb (seqnum %lu) was already given back\n",
+ unlink->unlink_seqnum);
+ list_del(&unlink->list);
+ kfree(unlink);
+ continue;
+ }
+
+ urb->status = -ENODEV;
+
+ spin_lock(&the_controller->lock);
+ usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+ spin_unlock(&the_controller->lock);
+
+ usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+
list_del(&unlink->list);
kfree(unlink);
}
@@ -900,6 +920,10 @@ static void vhci_device_reset(struct usb
vdev->speed = 0;
vdev->devid = 0;
+ if (vdev->udev)
+ usb_put_dev(vdev->udev);
+ vdev->udev = NULL;
+
ud->tcp_socket = NULL;
ud->status = VDEV_ST_NULL;
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -21,16 +21,14 @@
#include "vhci.h"
-/* get URB from transmitted urb queue */
-static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
__u32 seqnum)
{
struct vhci_priv *priv, *tmp;
struct urb *urb = NULL;
int status;
- spin_lock(&vdev->priv_lock);
-
list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
if (priv->seqnum == seqnum) {
urb = priv->urb;
@@ -61,8 +59,6 @@ static struct urb *pickup_urb_and_free_p
}
}
- spin_unlock(&vdev->priv_lock);
-
return urb;
}
@@ -72,9 +68,11 @@ static void vhci_recv_ret_submit(struct
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
+ spin_unlock(&vdev->priv_lock);
if (!urb) {
usbip_uerr("cannot find a urb of seqnum %u\n",
@@ -159,7 +157,12 @@ static void vhci_recv_ret_unlink(struct
return;
}
+ spin_lock(&vdev->priv_lock);
+
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+
+ spin_unlock(&vdev->priv_lock);
+
if (!urb) {
/*
* I get the result of a unlink request. But, it seems that I
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1596,6 +1596,7 @@ static struct usb_device_id acm_ids[] =
{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -197,8 +197,10 @@ void usb_hcd_pci_shutdown(struct pci_dev
if (!hcd)
return;
- if (hcd->driver->shutdown)
+ if (hcd->driver->shutdown) {
hcd->driver->shutdown(hcd);
+ pci_disable_device(dev);
+ }
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -648,6 +648,8 @@ static void hub_init_func3(struct work_s
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
+ struct usb_hcd *hcd;
+ int ret;
int port1;
int status;
bool need_debounce_delay = false;
@@ -686,6 +688,25 @@ static void hub_activate(struct usb_hub
atomic_set(&to_usb_interface(hub->intfdev)->
pm_usage_cnt, 1);
return; /* Continues at init2: below */
+ } else if (type == HUB_RESET_RESUME) {
+ /* The internal host controller state for the hub device
+ * may be gone after a host power loss on system resume.
+ * Update the device's info so the HW knows it's a hub.
+ */
+ hcd = bus_to_hcd(hdev->bus);
+ if (hcd->driver->update_hub_device) {
+ ret = hcd->driver->update_hub_device(hcd, hdev,
+ &hub->tt, GFP_NOIO);
+ if (ret < 0) {
+ dev_err(hub->intfdev, "Host not "
+ "accepting hub info "
+ "update.\n");
+ dev_err(hub->intfdev, "LS/FS devices "
+ "and hubs may not work "
+ "under this hub\n.");
+ }
+ }
+ hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}
@@ -2683,6 +2704,11 @@ hub_port_init (struct usb_hub *hub, stru
udev->ttport = hdev->ttport;
} else if (udev->speed != USB_SPEED_HIGH
&& hdev->speed == USB_SPEED_HIGH) {
+ if (!hub->tt.hub) {
+ dev_err(&udev->dev, "parent hub has no TT\n");
+ retval = -EINVAL;
+ goto fail;
+ }
udev->tt = &hub->tt;
udev->ttport = port1;
}
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_qu
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_qu
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -130,31 +130,31 @@ static struct printer_dev usb_printer_ga
* parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
-static ushort __initdata idVendor;
+static ushort idVendor;
module_param(idVendor, ushort, S_IRUGO);
MODULE_PARM_DESC(idVendor, "USB Vendor ID");
-static ushort __initdata idProduct;
+static ushort idProduct;
module_param(idProduct, ushort, S_IRUGO);
MODULE_PARM_DESC(idProduct, "USB Product ID");
-static ushort __initdata bcdDevice;
+static ushort bcdDevice;
module_param(bcdDevice, ushort, S_IRUGO);
MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
-static char *__initdata iManufacturer;
+static char *iManufacturer;
module_param(iManufacturer, charp, S_IRUGO);
MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
-static char *__initdata iProduct;
+static char *iProduct;
module_param(iProduct, charp, S_IRUGO);
MODULE_PARM_DESC(iProduct, "USB Product string");
-static char *__initdata iSerialNum;
+static char *iSerialNum;
module_param(iSerialNum, charp, S_IRUGO);
MODULE_PARM_DESC(iSerialNum, "1");
-static char *__initdata iPNPstring;
+static char *iPNPstring;
module_param(iPNPstring, charp, S_IRUGO);
MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -103,6 +103,9 @@ MODULE_PARM_DESC (ignore_oc, "ignore bog
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+/* for ASPM quirk of ISOC on AMD SB800 */
+static struct pci_dev *amd_nb_dev;
+
/*-------------------------------------------------------------------------*/
#include "ehci.h"
@@ -502,6 +505,11 @@ static void ehci_stop (struct usb_hcd *h
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
+ if (amd_nb_dev) {
+ pci_dev_put(amd_nb_dev);
+ amd_nb_dev = NULL;
+ }
+
#ifdef EHCI_STATS
ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
@@ -537,6 +545,8 @@ static int ehci_init(struct usb_hcd *hcd
ehci->iaa_watchdog.function = ehci_iaa_watchdog;
ehci->iaa_watchdog.data = (unsigned long) ehci;
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -544,11 +554,20 @@ static int ehci_init(struct usb_hcd *hcd
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (EHCI_TUNE_FLS) {
+ case 0: ehci->periodic_size = 1024; break;
+ case 1: ehci->periodic_size = 512; break;
+ case 2: ehci->periodic_size = 256; break;
+ default: BUG();
+ }
+ }
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
ehci->i_thresh = 8;
else // N microframes cached
@@ -597,12 +616,6 @@ static int ehci_init(struct usb_hcd *hcd
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
- switch (EHCI_TUNE_FLS) {
- case 0: ehci->periodic_size = 1024; break;
- case 1: ehci->periodic_size = 512; break;
- case 2: ehci->periodic_size = 256; break;
- default: BUG();
- }
}
ehci->command = temp;
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -41,6 +41,42 @@ static int ehci_pci_reinit(struct ehci_h
return 0;
}
+static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
+{
+ struct pci_dev *amd_smbus_dev;
+ u8 rev = 0;
+
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+ if (amd_smbus_dev) {
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if (rev < 0x40) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+ } else {
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
+ if (!amd_smbus_dev)
+ return 0;
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if (rev < 0x11 || rev > 0x18) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+ }
+
+ if (!amd_nb_dev)
+ amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+
+ ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
+
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+
+ return 1;
+}
+
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
@@ -99,6 +135,9 @@ static int ehci_pci_setup(struct usb_hcd
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ if (ehci_quirk_amd_hudson(ehci))
+ ehci->amd_l1_fix = 1;
+
retval = ehci_halt(ehci);
if (retval)
return retval;
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1576,6 +1576,63 @@ itd_link (struct ehci_hcd *ehci, unsigne
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
+#define AB_REG_BAR_LOW 0xe0
+#define AB_REG_BAR_HIGH 0xe1
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define NB_PIF0_PWRDOWN_0 0x01100012
+#define NB_PIF0_PWRDOWN_1 0x01100013
+
+static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
+{
+ u32 addr, addr_low, addr_high, val;
+
+ outb_p(AB_REG_BAR_LOW, 0xcd6);
+ addr_low = inb_p(0xcd7);
+ outb_p(AB_REG_BAR_HIGH, 0xcd6);
+ addr_high = inb_p(0xcd7);
+ addr = addr_high << 8 | addr_low;
+ outl_p(0x30, AB_INDX(addr));
+ outl_p(0x40, AB_DATA(addr));
+ outl_p(0x34, AB_INDX(addr));
+ val = inl_p(AB_DATA(addr));
+
+ if (disable) {
+ val &= ~0x8;
+ val |= (1 << 4) | (1 << 9);
+ } else {
+ val |= 0x8;
+ val &= ~((1 << 4) | (1 << 9));
+ }
+ outl_p(val, AB_DATA(addr));
+
+ if (amd_nb_dev) {
+ addr = NB_PIF0_PWRDOWN_0;
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
+
+ addr = NB_PIF0_PWRDOWN_1;
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
+ }
+
+ return;
+}
+
/* fit urb's itds into the selected schedule slot; activate as needed */
static int
itd_link_urb (
@@ -1603,6 +1660,12 @@ itd_link_urb (
next_uframe >> 3, next_uframe & 0x7);
stream->start = jiffies;
}
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 1);
+ }
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
@@ -1729,6 +1792,11 @@ itd_complete (
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 0);
+ }
+
if (unlikely(list_is_singular(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
@@ -2016,6 +2084,12 @@ sitd_link_urb (
stream->interval, hc32_to_cpu(ehci, stream->splits));
stream->start = jiffies;
}
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 1);
+ }
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
@@ -2118,6 +2192,11 @@ sitd_complete (
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_l1_fix == 1)
+ ehci_quirk_amd_L1(ehci, 0);
+ }
+
if (list_is_singular(&stream->td_list)) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -130,6 +130,7 @@ struct ehci_hcd { /* one per controlle
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
+ unsigned amd_l1_fix:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -398,7 +398,14 @@ ohci_shutdown (struct usb_hcd *hcd)
ohci = hcd_to_ohci (hcd);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- ohci_usb_reset (ohci);
+ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+
+ /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
+ ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
+ OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
+ OHCI_CTRL_RWC);
+ ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
}
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -201,6 +201,20 @@ static int ohci_quirk_amd700(struct usb_
return 0;
}
+/* nVidia controllers continue to drive Reset signalling on the bus
+ * even after system shutdown, wasting power. This flag tells the
+ * shutdown routine to leave the controller OPERATIONAL instead of RESET.
+ */
+static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci->flags |= OHCI_QUIRK_SHUTDOWN;
+ ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
+
+ return 0;
+}
+
/*
* The hardware normally enables the A-link power management feature, which
* lets the system lower the power consumption in idle states.
@@ -332,6 +346,10 @@ static const struct pci_device_id ohci_p
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+ .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
+ },
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,6 +403,7 @@ struct ohci_hcd {
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
+#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -169,6 +169,7 @@ static int __devinit mmio_resource_enabl
static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
+ u32 control;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -177,10 +178,14 @@ static void __devinit quirk_usb_handoff_
if (base == NULL)
return;
+ control = readl(base + OHCI_CONTROL);
+
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
-#ifndef __hppa__
-{
- u32 control = readl(base + OHCI_CONTROL);
+#ifdef __hppa__
+#define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR)
+#else
+#define OHCI_CTRL_MASK OHCI_CTRL_RWC
+
if (control & OHCI_CTRL_IR) {
int wait_time = 500; /* arbitrary; 5 seconds */
writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
@@ -194,13 +199,12 @@ static void __devinit quirk_usb_handoff_
dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
" (BIOS bug?) %08x\n",
readl(base + OHCI_CONTROL));
-
- /* reset controller, preserving RWC */
- writel(control & OHCI_CTRL_RWC, base + OHCI_CONTROL);
}
-}
#endif
+ /* reset controller, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+
/*
* disable interrupts
*/
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -391,8 +391,11 @@ void xhci_find_new_dequeue_state(struct
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -403,8 +406,10 @@ void xhci_find_new_dequeue_state(struct
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -1485,12 +1490,13 @@ static unsigned int count_sg_trbs_needed
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -1515,11 +1521,11 @@ static unsigned int count_sg_trbs_needed
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -1626,8 +1632,7 @@ static int queue_bulk_sg_tx(struct xhci_
sg = urb->sg->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -1662,7 +1667,7 @@ static int queue_bulk_sg_tx(struct xhci_
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -1700,7 +1705,7 @@ static int queue_bulk_sg_tx(struct xhci_
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -1735,7 +1740,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -1774,8 +1780,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -373,7 +373,7 @@ static ssize_t iowarrior_write(struct fi
case USB_DEVICE_ID_CODEMERCS_IOWPV2:
case USB_DEVICE_ID_CODEMERCS_IOW40:
/* IOW24 and IOW40 use a synchronous call */
- buf = kmalloc(8, GFP_KERNEL); /* 8 bytes are enough for both products */
+ buf = kmalloc(count, GFP_KERNEL);
if (!buf) {
retval = -ENOMEM;
goto exit;
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -309,6 +309,7 @@ static int musb_platform_resume(struct m
int musb_platform_exit(struct musb *musb)
{
+ del_timer_sync(&musb_idle_timer);
omap_vbus_power(musb, 0 /*off*/, 1);
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -479,12 +479,22 @@ static void ch341_read_int_callback(stru
if (actual_length >= 4) {
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
+ u8 prev_line_status = priv->line_status;
spin_lock_irqsave(&priv->lock, flags);
priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
if ((data[1] & CH341_MULT_STAT))
priv->multi_status_change = 1;
spin_unlock_irqrestore(&priv->lock, flags);
+
+ if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ if (tty)
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & CH341_BIT_DCD);
+ tty_kref_put(tty);
+ }
+
wake_up_interruptible(&priv->delta_msr_wait);
}
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -51,7 +51,6 @@ static void cp210x_break_ctl(struct tty_
static int cp210x_startup(struct usb_serial *);
static void cp210x_disconnect(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int cp210x_carrier_raised(struct usb_serial_port *p);
static int debug;
@@ -88,7 +87,6 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
- { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
@@ -111,7 +109,9 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -165,8 +165,7 @@ static struct usb_serial_driver cp210x_d
.tiocmset = cp210x_tiocmset,
.attach = cp210x_startup,
.disconnect = cp210x_disconnect,
- .dtr_rts = cp210x_dtr_rts,
- .carrier_raised = cp210x_carrier_raised
+ .dtr_rts = cp210x_dtr_rts
};
/* Config request types */
@@ -800,15 +799,6 @@ static int cp210x_tiocmget (struct tty_s
return result;
}
-static int cp210x_carrier_raised(struct usb_serial_port *p)
-{
- unsigned int control;
- cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
- if (control & CONTROL_DCD)
- return 1;
- return 0;
-}
-
static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -455,7 +455,6 @@ static int digi_write_room(struct tty_st
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
-static int digi_carrier_raised(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
@@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acc
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
- .carrier_raised = digi_carrier_raised,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
@@ -1338,14 +1336,6 @@ static void digi_dtr_rts(struct usb_seri
digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
}
-static int digi_carrier_raised(struct usb_serial_port *port)
-{
- struct digi_port *priv = usb_get_serial_port_data(port);
- if (priv->dp_modem_signals & TIOCM_CD)
- return 1;
- return 0;
-}
-
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -104,6 +104,7 @@ struct ftdi_sio_quirk {
static int ftdi_jtag_probe(struct usb_serial *serial);
static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
static int ftdi_NDI_device_setup(struct usb_serial *serial);
+static int ftdi_stmclite_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
@@ -127,6 +128,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIR
.port_probe = ftdi_HE_TIRA1_setup,
};
+static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+ .probe = ftdi_stmclite_probe,
+};
+
/*
* The 8U232AM has the same API as the sio except for:
* - it can support MUCH higher baudrates; up to:
@@ -620,6 +625,7 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -681,7 +687,17 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
- { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -805,6 +821,8 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1738,6 +1756,25 @@ static int ftdi_jtag_probe(struct usb_se
}
/*
+ * First and second port on STMCLiteadaptors is reserved for JTAG interface
+ * and the forth port for pio
+ */
+static int ftdi_stmclite_probe(struct usb_serial *serial)
+{
+ struct usb_device *udev = serial->dev;
+ struct usb_interface *interface = serial->interface;
+
+ dbg("%s", __func__);
+
+ if (interface == udev->actconfig->interface[2])
+ return 0;
+
+ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+
+ return -ENODEV;
+}
+
+/*
* The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
* We have to correct it if we want to read from it.
*/
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Acton Research Corp.
+ */
+#define ACTON_VID 0x0647 /* Vendor ID */
+#define ACTON_SPECTRAPRO_PID 0x0100
+
+/*
* Contec products (http://www.contec.com)
* Submitted by Daniel Sangorrin
*/
@@ -576,11 +582,23 @@
#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
/*
- * Icom ID-1 digital transceiver
+ * Definitions for Icom Inc. devices
*/
-
-#define ICOM_ID1_VID 0x0C26
-#define ICOM_ID1_PID 0x0004
+#define ICOM_VID 0x0C26 /* Icom vendor ID */
+/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
+#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */
+/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
+#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */
+/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
+#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */
+#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */
+#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/
+#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */
+#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */
+#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */
+#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */
+#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */
+#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */
/*
* GN Otometrics (http://www.otometrics.com)
@@ -1029,6 +1047,12 @@
#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
/*
+ * STMicroelectonics
+ */
+#define ST_VID 0x0483
+#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
+
+/*
* Papouch products (http://www.papouch.com/)
* Submitted by Folkert van Heusden
*/
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -578,6 +578,26 @@ int usb_serial_handle_break(struct usb_s
}
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
+/**
+ * usb_serial_handle_dcd_change - handle a change of carrier detect state
+ * @port: usb_serial_port structure for the open port
+ * @tty: tty_struct structure for the port
+ * @status: new carrier detect status, nonzero if active
+ */
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+ struct tty_struct *tty, unsigned int status)
+{
+ struct tty_port *port = &usb_port->port;
+
+ dbg("%s - port %d, status %d", __func__, usb_port->number, status);
+
+ if (status)
+ wake_up_interruptible(&port->open_wait);
+ else if (tty && !C_CLOCAL(tty))
+ tty_hangup(tty);
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
+
int usb_serial_generic_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2935,8 +2935,8 @@ static void load_application_firmware(st
dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
- edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
- edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
+ edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
+ edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
for (rec = ihex_next_binrec(rec); rec;
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -663,22 +663,6 @@ static void keyspan_pda_dtr_rts(struct u
}
}
-static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
- unsigned char modembits;
-
- /* If we can read the modem status and the DCD is low then
- carrier is not raised yet */
- if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
- if (!(modembits & (1>>6)))
- return 0;
- }
- /* Carrier raised, or we failed (eg disconnected) so
- progress accordingly */
- return 1;
-}
-
static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port)
@@ -854,7 +838,6 @@ static struct usb_serial_driver keyspan_
.id_table = id_table_std,
.num_ports = 1,
.dtr_rts = keyspan_pda_dtr_rts,
- .carrier_raised = keyspan_pda_carrier_raised,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
.write = keyspan_pda_write,
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -369,7 +369,16 @@ static int option_resume(struct usb_ser
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
-#define CINTERION_VENDOR_ID 0x0681
+/* Cinterion (formerly Siemens) products */
+#define SIEMENS_VENDOR_ID 0x0681
+#define CINTERION_VENDOR_ID 0x1e2d
+#define CINTERION_PRODUCT_HC25_MDM 0x0047
+#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
+#define CINTERION_PRODUCT_HC28_MDM 0x004C
+#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
+#define CINTERION_PRODUCT_EU3_E 0x0051
+#define CINTERION_PRODUCT_EU3_P 0x0052
+#define CINTERION_PRODUCT_PH8 0x0053
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -895,7 +904,17 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
- { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+ /* Cinterion */
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ } /* Terminating entry */
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -59,6 +59,8 @@ static struct usb_device_id id_table []
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -955,9 +957,11 @@ static void pl2303_update_line_status(st
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
unsigned long flags;
u8 status_idx = UART_STATE;
u8 length = UART_STATE + 1;
+ u8 prev_line_status;
u16 idv, idp;
idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
@@ -979,11 +983,20 @@ static void pl2303_update_line_status(st
/* Save off the uart status for others to look at */
spin_lock_irqsave(&priv->lock, flags);
+ prev_line_status = priv->line_status;
priv->line_status = data[status_idx];
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->line_status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
wake_up_interruptible(&priv->delta_msr_wait);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+ if ((priv->line_status ^ prev_line_status) & UART_DCD)
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & UART_DCD);
+ tty_kref_put(tty);
}
static void pl2303_read_int_callback(struct urb *urb)
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -20,6 +20,8 @@
#define PL2303_PRODUCT_ID_ALDIGA 0x0611
#define PL2303_PRODUCT_ID_MMX 0x0612
#define PL2303_PRODUCT_ID_GPRS 0x0609
+#define PL2303_PRODUCT_ID_HCR331 0x331a
+#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -266,6 +266,9 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ }
};
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -137,7 +137,7 @@ struct spcp8x5_usb_ctrl_arg {
/* how come ??? */
#define UART_STATE 0x08
-#define UART_STATE_TRANSIENT_MASK 0x74
+#define UART_STATE_TRANSIENT_MASK 0x75
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
@@ -734,6 +734,10 @@ static void spcp8x5_read_bulk_callback(s
tty_insert_flip_char(tty, data[i], tty_flag);
tty_flip_buffer_push(tty);
}
+
+ if (status & UART_DCD)
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & MSR_STATUS_LINE_DCD);
tty_kref_put(tty);
/* Schedule the next read _if_ we are still open */
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -366,9 +366,9 @@ failed_1port:
static void __exit ti_exit(void)
{
+ usb_deregister(&ti_usb_driver);
usb_serial_deregister(&ti_1port_device);
usb_serial_deregister(&ti_2port_device);
- usb_deregister(&ti_usb_driver);
}
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
#include "visor.h"
/*
@@ -758,6 +759,17 @@ static int visor_probe(struct usb_serial
dbg("%s", __func__);
+ /*
+ * some Samsung Android phones in modem mode have the same ID
+ * as SPH-I500, but they are ACM devices, so dont bind to them
+ */
+ if (id->idVendor == SAMSUNG_VENDOR_ID &&
+ id->idProduct == SAMSUNG_SPH_I500_ID &&
+ serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+ serial->dev->descriptor.bDeviceSubClass ==
+ USB_CDC_SUBCLASS_ACM)
+ return -ENODEV;
+
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x
"Cypress ISD-300LP",
US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
+
#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1043,6 +1043,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_BULK32),
+/* Reported by <ttkspam@free.fr>
+ * The device reports a vendor-specific device class, requiring an
+ * explicit vendor/product match.
+ */
+UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
+ "MagicPixel",
+ "FW_Omega2",
+ US_SC_DEVICE, US_PR_DEVICE, NULL, 0),
+
/* Andrew Lunn <andrew@lunn.ch>
* PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
* on LUN 4.
@@ -1401,6 +1410,13 @@ UNUSUAL_DEV( 0x0fca, 0x0006, 0x0001, 0x
US_FL_IGNORE_DEVICE ),
#endif
+/* Submitted by Nick Holloway */
+UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+ "VTech",
+ "Kidizoom",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
@@ -1880,6 +1896,22 @@ UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_BAD_SENSE ),
+/* Patch by Richard Sch�tz <r.schtz@t-online.de>
+ * This external hard drive enclosure uses a JMicron chip which
+ * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
+UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
+ "TrekStor GmbH & Co. KG",
+ "DataStation maxi g.u",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+
+/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
+UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+ "Coby Electronics",
+ "MP3 Player",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -95,11 +95,6 @@ static struct pci_device_id virtio_pci_i
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
-/* A PCI device has it's own struct device and so does a virtio device so
- * we create a place for the virtio devices to show up in sysfs. I think it
- * would make more sense for virtio to not insist on having it's own device. */
-static struct device *virtio_pci_root;
-
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{
@@ -628,7 +623,7 @@ static int __devinit virtio_pci_probe(st
if (vp_dev == NULL)
return -ENOMEM;
- vp_dev->vdev.dev.parent = virtio_pci_root;
+ vp_dev->vdev.dev.parent = &pci_dev->dev;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->vdev.config = &virtio_pci_config_ops;
vp_dev->pci_dev = pci_dev;
@@ -652,6 +647,7 @@ static int __devinit virtio_pci_probe(st
goto out_req_regions;
pci_set_drvdata(pci_dev, vp_dev);
+ pci_set_master(pci_dev);
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
@@ -715,17 +711,7 @@ static struct pci_driver virtio_pci_driv
static int __init virtio_pci_init(void)
{
- int err;
-
- virtio_pci_root = root_device_register("virtio-pci");
- if (IS_ERR(virtio_pci_root))
- return PTR_ERR(virtio_pci_root);
-
- err = pci_register_driver(&virtio_pci_driver);
- if (err)
- root_device_unregister(virtio_pci_root);
-
- return err;
+ return pci_register_driver(&virtio_pci_driver);
}
module_init(virtio_pci_init);
@@ -733,7 +719,6 @@ module_init(virtio_pci_init);
static void __exit virtio_pci_exit(void)
{
pci_unregister_driver(&virtio_pci_driver);
- root_device_unregister(virtio_pci_root);
}
module_exit(virtio_pci_exit);
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -323,7 +323,7 @@ extern int CIFSSMBLock(const int xid, st
const __u16 netfid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
- const bool waitFlag);
+ const bool waitFlag, const __u8 oplock_level);
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, struct file_lock *,
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1641,7 +1641,8 @@ int
CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
- const __u32 numLock, const __u8 lockType, const bool waitFlag)
+ const __u32 numLock, const __u8 lockType,
+ const bool waitFlag, const __u8 oplock_level)
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
@@ -1669,6 +1670,7 @@ CIFSSMBLock(const int xid, struct cifsTc
pSMB->NumberOfLocks = cpu_to_le16(numLock);
pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
pSMB->LockType = lockType;
+ pSMB->OplockLevel = oplock_level;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -825,12 +825,12 @@ int cifs_lock(struct file *file, int cmd
/* BB we could chain these into one lock request BB */
rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
- 0, 1, lockType, 0 /* wait flag */ );
+ 0, 1, lockType, 0 /* wait flag */, 0);
if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
- 0 /* wait flag */ );
+ 0 /* wait flag */, 0);
pfLock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, ("Error unlocking previously locked "
@@ -873,8 +873,8 @@ int cifs_lock(struct file *file, int cmd
if (numLock) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
- pfLock->fl_start,
- 0, numLock, lockType, wait_flag);
+ pfLock->fl_start, 0, numLock, lockType,
+ wait_flag, 0);
if (rc == 0) {
/* For Windows locks we must store them. */
@@ -894,9 +894,9 @@ int cifs_lock(struct file *file, int cmd
(pfLock->fl_start + length) >=
(li->offset + li->length)) {
stored_rc = CIFSSMBLock(xid, tcon,
- netfid,
- li->length, li->offset,
- 1, 0, li->type, false);
+ netfid, li->length,
+ li->offset, 1, 0,
+ li->type, false, 0);
if (stored_rc)
rc = stored_rc;
@@ -2314,7 +2314,8 @@ cifs_oplock_break(struct slow_work *work
*/
if (!cfile->closePend && !cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
- LOCKING_ANDX_OPLOCK_RELEASE, false);
+ LOCKING_ANDX_OPLOCK_RELEASE, false,
+ cinode->clientCanCacheRead ? 1 : 0);
cFYI(1, ("Oplock release rc = %d", rc));
}
}
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -988,6 +988,8 @@ int ecryptfs_getattr(struct vfsmount *mn
rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
ecryptfs_dentry_to_lower(dentry), &lower_stat);
if (!rc) {
+ fsstack_copy_attr_all(dentry->d_inode,
+ ecryptfs_inode_to_lower(dentry->d_inode), NULL);
generic_fillattr(dentry->d_inode, stat);
stat->blocks = lower_stat.blocks;
}
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -63,6 +63,13 @@
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
@@ -227,6 +234,9 @@ static int max_user_watches __read_mostl
*/
static DEFINE_MUTEX(epmutex);
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
@@ -1182,6 +1192,62 @@ retry:
return res;
}
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ * API, to verify that adding an epoll file inside another
+ * epoll structure, does not violate the constraints, in
+ * terms of closed loops, or too deep chains (which can
+ * result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ * data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ * structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+ int error = 0;
+ struct file *file = priv;
+ struct eventpoll *ep = file->private_data;
+ struct rb_node *rbp;
+ struct epitem *epi;
+
+ mutex_lock(&ep->mtx);
+ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (unlikely(is_file_epoll(epi->ffd.file))) {
+ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, epi->ffd.file,
+ epi->ffd.file->private_data, current);
+ if (error != 0)
+ break;
+ }
+ }
+ mutex_unlock(&ep->mtx);
+
+ return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ * another epoll file (represented by @ep) does not create
+ * closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ * structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, file, ep, current);
+}
+
/*
* Open an eventpoll file descriptor.
*/
@@ -1230,6 +1296,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
struct epoll_event __user *, event)
{
int error;
+ int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
@@ -1271,6 +1338,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
*/
ep = file->private_data;
+ /*
+ * When we insert an epoll file descriptor, inside another epoll file
+ * descriptor, there is the change of creating closed loops, which are
+ * better be handled here, than in more critical paths.
+ *
+ * We hold epmutex across the loop check and the insert in this case, in
+ * order to prevent two separate inserts from racing and each doing the
+ * insert "at the same time" such that ep_loop_check passes on both
+ * before either one does the insert, thereby creating a cycle.
+ */
+ if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+ mutex_lock(&epmutex);
+ did_lock_epmutex = 1;
+ error = -ELOOP;
+ if (ep_loop_check(ep, tfile) != 0)
+ goto error_tgt_fput;
+ }
+
+
mutex_lock(&ep->mtx);
/*
@@ -1306,6 +1392,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, in
mutex_unlock(&ep->mtx);
error_tgt_fput:
+ if (unlikely(did_lock_epmutex))
+ mutex_unlock(&epmutex);
+
fput(tfile);
error_fput:
fput(file);
@@ -1424,6 +1513,12 @@ static int __init eventpoll_init(void)
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
+ /*
+ * Initialize the structure used to perform epoll file descriptor
+ * inclusion loops checks.
+ */
+ ep_nested_calls_init(&poll_loop_ncalls);
+
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -327,7 +327,6 @@ static int ext2_rename (struct inode * o
new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
- inode_inc_link_count(old_inode);
ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
new_inode->i_ctime = CURRENT_TIME_SEC;
if (dir_de)
@@ -339,12 +338,9 @@ static int ext2_rename (struct inode * o
if (new_dir->i_nlink >= EXT2_LINK_MAX)
goto out_dir;
}
- inode_inc_link_count(old_inode);
err = ext2_add_link(new_dentry, old_inode);
- if (err) {
- inode_dec_link_count(old_inode);
+ if (err)
goto out_dir;
- }
if (dir_de)
inode_inc_link_count(new_dir);
}
@@ -352,12 +348,11 @@ static int ext2_rename (struct inode * o
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
- * inode_dec_link_count() will mark the inode dirty.
*/
old_inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(old_inode);
ext2_delete_entry (old_de, old_page);
- inode_dec_link_count(old_inode);
if (dir_de) {
if (old_dir != new_dir)
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -121,13 +121,13 @@ struct file *get_empty_filp(void)
goto fail;
percpu_counter_inc(&nr_files);
+ f->f_cred = get_cred(cred);
if (security_file_alloc(f))
goto fail_sec;
INIT_LIST_HEAD(&f->f_u.fu_list);
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
- f->f_cred = get_cred(cred);
spin_lock_init(&f->f_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct f
return ff;
}
+static void fuse_release_async(struct work_struct *work)
+{
+ struct fuse_req *req;
+ struct fuse_conn *fc;
+ struct path path;
+
+ req = container_of(work, struct fuse_req, misc.release.work);
+ path = req->misc.release.path;
+ fc = get_fuse_conn(path.dentry->d_inode);
+
+ fuse_put_request(fc, req);
+ path_put(&path);
+}
+
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- path_put(&req->misc.release.path);
+ if (fc->destroy_req) {
+ /*
+ * If this is a fuseblk mount, then it's possible that
+ * releasing the path will result in releasing the
+ * super block and sending the DESTROY request. If
+ * the server is single threaded, this would hang.
+ * For this reason do the path_put() in a separate
+ * thread.
+ */
+ atomic_inc(&req->count);
+ INIT_WORK(&req->misc.release.work, fuse_release_async);
+ schedule_work(&req->misc.release.work);
+ } else {
+ path_put(&req->misc.release.path);
+ }
}
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_file_put(struct fuse_file *ff, bool sync)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- req->end = fuse_release_end;
- fuse_request_send_background(ff->fc, req);
+ if (sync) {
+ fuse_request_send(ff->fc, req);
+ path_put(&req->misc.release.path);
+ fuse_put_request(ff->fc, req);
+ } else {
+ req->end = fuse_release_end;
+ fuse_request_send_background(ff->fc, req);
+ }
kfree(ff);
}
}
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *fi
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
+ *
+ * Make the release synchronous if this is a fuseblk mount,
+ * synchronous RELEASE is allowed (and desirable) in this case
+ * because the server can be trusted not to screw up.
*/
- fuse_file_put(ff);
+ fuse_file_put(ff, ff->fc->destroy_req != NULL);
}
static int fuse_open(struct inode *inode, struct file *file)
@@ -549,7 +587,7 @@ static void fuse_readpages_end(struct fu
unlock_page(page);
}
if (req->ff)
- fuse_file_put(req->ff);
+ fuse_file_put(req->ff, false);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1129,7 +1167,7 @@ static ssize_t fuse_direct_write(struct
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
__free_page(req->pages[0]);
- fuse_file_put(req->ff);
+ fuse_file_put(req->ff, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/poll.h>
+#include <linux/workqueue.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -254,7 +255,10 @@ struct fuse_req {
union {
struct fuse_forget_in forget_in;
struct {
- struct fuse_release_in in;
+ union {
+ struct fuse_release_in in;
+ struct work_struct work;
+ };
struct path path;
} release;
struct fuse_init_in init_in;
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode
*ptr++ = cpu_to_be64(bn++);
break;
}
- } while (state != ALLOC_DATA);
+ } while ((state != ALLOC_DATA) || !dblock);
ip->i_height = height;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,18 @@ static ssize_t nfs_direct_read_schedule_
pos += vec->iov_len;
}
+ /*
+ * If no bytes were started, return the error, and let the
+ * generic layer handle the completion.
+ */
+ if (requested_bytes == 0) {
+ nfs_direct_req_release(dreq);
+ return result < 0 ? result : -EIO;
+ }
+
if (put_dreq(dreq))
nfs_direct_complete(dreq);
-
- if (requested_bytes != 0)
- return 0;
-
- if (result < 0)
- return result;
- return -EIO;
+ return 0;
}
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
@@ -829,15 +832,18 @@ static ssize_t nfs_direct_write_schedule
pos += vec->iov_len;
}
+ /*
+ * If no bytes were started, return the error, and let the
+ * generic layer handle the completion.
+ */
+ if (requested_bytes == 0) {
+ nfs_direct_req_release(dreq);
+ return result < 0 ? result : -EIO;
+ }
+
if (put_dreq(dreq))
nfs_direct_write_complete(dreq, dreq->inode);
-
- if (requested_bytes != 0)
- return 0;
-
- if (result < 0)
- return result;
- return -EIO;
+ return 0;
}
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -220,7 +220,7 @@ static int nfs_do_fsync(struct nfs_open_
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
if (have_error)
ret = xchg(&ctx->error, 0);
- if (!ret)
+ if (!ret && status < 0)
ret = status;
return ret;
}
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -323,8 +323,8 @@ nfsd4_decode_fattr(struct nfsd4_compound
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
- if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
- goto out_nfserr;
+ if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+ return status;
iattr->ia_valid |= ATTR_UID;
}
if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
@@ -334,8 +334,8 @@ nfsd4_decode_fattr(struct nfsd4_compound
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
- if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
- goto out_nfserr;
+ if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+ return status;
iattr->ia_valid |= ATTR_GID;
}
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -823,7 +823,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino)
if (ra->p_count == 0)
frap = rap;
}
- depth = nfsdstats.ra_size*11/10;
+ depth = nfsdstats.ra_size;
if (!frap) {
spin_unlock(&rab->pb_lock);
return NULL;
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3236,7 +3236,7 @@ static int ocfs2_make_clusters_writable(
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
- u32 new_bit, new_len;
+ u32 new_bit, new_len, orig_num_clusters;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
@@ -3269,6 +3269,8 @@ static int ocfs2_make_clusters_writable(
goto out;
}
+ orig_num_clusters = num_clusters;
+
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
@@ -3356,7 +3358,8 @@ static int ocfs2_make_clusters_writable(
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
- ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+ ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+ orig_num_clusters);
if (ret)
mlog_errno(ret);
}
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -191,7 +191,7 @@ static struct ocfs2_live_connection *ocf
return c;
}
- return c;
+ return NULL;
}
/*
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *da
}
vm->vblk_size = get_unaligned_be32(data + 0x08);
+ if (vm->vblk_size == 0) {
+ ldm_error ("Illegal VBLK size");
+ return false;
+ }
+
vm->vblk_offset = get_unaligned_be32(data + 0x0C);
vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *
int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
{
- int slot = 1;
Sector sect;
unsigned char *data;
- int blk, blocks_in_map;
+ int slot, blocks_in_map;
unsigned secsize;
#ifdef CONFIG_PPC_PMAC
int found_root = 0;
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitio
put_dev_sector(sect);
return 0; /* not a MacOS disk */
}
- printk(" [mac]");
blocks_in_map = be32_to_cpu(part->map_count);
- for (blk = 1; blk <= blocks_in_map; ++blk) {
- int pos = blk * secsize;
+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ printk(" [mac]");
+ for (slot = 1; slot <= blocks_in_map; ++slot) {
+ int pos = slot * secsize;
put_dev_sector(sect);
data = read_dev_sector(bdev, pos/512, §);
if (!data)
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitio
}
if (goodness > found_root_goodness) {
- found_root = blk;
+ found_root = slot;
found_root_goodness = goodness;
}
}
#endif /* CONFIG_PPC_PMAC */
-
- ++slot;
}
#ifdef CONFIG_PPC_PMAC
if (found_root_goodness)
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -172,7 +172,7 @@ static inline void task_state(struct seq
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
- cred = get_cred((struct cred *) __task_cred(p));
+ cred = get_task_cred(p);
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
@@ -336,9 +336,6 @@ int proc_pid_status(struct seq_file *m,
task_sig(m, task);
task_cap(m, task);
cpuset_task_status_allowed(m, task);
-#if defined(CONFIG_S390)
- task_show_regs(m, task);
-#endif
task_context_switch_counts(m, task);
return 0;
}
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -826,8 +826,6 @@ static ssize_t do_sendfile(int out_fd, i
if (!(out_file->f_mode & FMODE_WRITE))
goto fput_out;
retval = -EINVAL;
- if (!out_file->f_op || !out_file->f_op->sendpage)
- goto fput_out;
in_inode = in_file->f_path.dentry->d_inode;
out_inode = out_file->f_path.dentry->d_inode;
retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -638,9 +638,11 @@ static int pipe_to_sendpage(struct pipe_
ret = buf->ops->confirm(pipe, buf);
if (!ret) {
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
-
- ret = file->f_op->sendpage(file, buf->page, buf->offset,
- sd->len, &pos, more);
+ if (file->f_op && file->f_op->sendpage)
+ ret = file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
+ else
+ ret = -EINVAL;
}
return ret;
@@ -1058,8 +1060,9 @@ static long do_splice_from(struct pipe_i
if (unlikely(ret < 0))
return ret;
- splice_write = out->f_op->splice_write;
- if (!splice_write)
+ if (out->f_op && out->f_op->splice_write)
+ splice_write = out->f_op->splice_write;
+ else
splice_write = default_file_splice_write;
return splice_write(pipe, out, ppos, len, flags);
@@ -1083,8 +1086,9 @@ static long do_splice_to(struct file *in
if (unlikely(ret < 0))
return ret;
- splice_read = in->f_op->splice_read;
- if (!splice_read)
+ if (in->f_op && in->f_op->splice_read)
+ splice_read = in->f_op->splice_read;
+ else
splice_read = default_file_splice_read;
return splice_read(in, ppos, pipe, len, flags);
@@ -1306,7 +1310,8 @@ static long do_splice(struct file *in, l
if (off_in)
return -ESPIPE;
if (off_out) {
- if (out->f_op->llseek == no_llseek)
+ if (!out->f_op || !out->f_op->llseek ||
+ out->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
@@ -1326,7 +1331,8 @@ static long do_splice(struct file *in, l
if (off_out)
return -ESPIPE;
if (off_in) {
- if (in->f_op->llseek == no_llseek)
+ if (!in->f_op || !in->f_op->llseek ||
+ in->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -127,13 +127,12 @@ xfs_nfs_get_inode(
return ERR_PTR(-ESTALE);
/*
- * The XFS_IGET_BULKSTAT means that an invalid inode number is just
- * fine and not an indication of a corrupted filesystem. Because
- * clients can send any kind of invalid file handle, e.g. after
- * a restore on the server we have to deal with this case gracefully.
+ * The XFS_IGET_UNTRUSTED means that an invalid inode number is just
+ * fine and not an indication of a corrupted filesystem as clients can
+ * send invalid file handles and we have to handle it gracefully..
*/
- error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
- XFS_ILOCK_SHARED, &ip, 0);
+ error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED,
+ XFS_ILOCK_SHARED, &ip);
if (error) {
/*
* EINVAL means the inode cluster doesn't exist anymore.
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -673,10 +673,9 @@ xfs_ioc_bulkstat(
error = xfs_bulkstat_single(mp, &inlast,
bulkreq.ubuffer, &done);
else /* XFS_IOC_FSBULKSTAT */
- error = xfs_bulkstat(mp, &inlast, &count,
- (bulkstat_one_pf)xfs_bulkstat_one, NULL,
- sizeof(xfs_bstat_t), bulkreq.ubuffer,
- BULKSTAT_FG_QUICK, &done);
+ error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
+ sizeof(xfs_bstat_t), bulkreq.ubuffer,
+ &done);
if (error)
return -error;
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -235,15 +235,12 @@ xfs_bulkstat_one_compat(
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
- void *private_data, /* my private data */
- xfs_daddr_t bno, /* starting bno of inode cluster */
int *ubused, /* bytes used by me */
- void *dibuff, /* on-disk inode buffer */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
- xfs_bulkstat_one_fmt_compat, bno,
- ubused, dibuff, stat);
+ xfs_bulkstat_one_fmt_compat,
+ ubused, stat);
}
/* copied from xfs_ioctl.c */
@@ -296,13 +293,11 @@ xfs_compat_ioc_bulkstat(
int res;
error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
- sizeof(compat_xfs_bstat_t),
- NULL, 0, NULL, NULL, &res);
+ sizeof(compat_xfs_bstat_t), 0, &res);
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
error = xfs_bulkstat(mp, &inlast, &count,
- xfs_bulkstat_one_compat, NULL,
- sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
- BULKSTAT_FG_QUICK, &done);
+ xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
+ bulkreq.ubuffer, &done);
} else
error = XFS_ERROR(EINVAL);
if (error)
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1606,10 +1606,7 @@ xfs_qm_dqusage_adjust(
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
- void *private_data, /* not used */
- xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
- void *dip, /* on-disk inode pointer (not used) */
int *res) /* result code value */
{
xfs_inode_t *ip;
@@ -1634,7 +1631,7 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
- if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
+ if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
*res = BULKSTAT_RV_NOTHING;
return error;
}
@@ -1766,12 +1763,13 @@ xfs_qm_quotacheck(
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters in core.
*/
- if ((error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_dqusage_adjust, NULL,
- structsz, NULL, BULKSTAT_FG_IGET, &done)))
+ error = xfs_bulkstat(mp, &lastino, &count,
+ xfs_qm_dqusage_adjust,
+ structsz, NULL, &done);
+ if (error)
break;
- } while (! done);
+ } while (!done);
/*
* We've made all the changes that we need to make incore.
@@ -1859,14 +1857,14 @@ xfs_qm_init_quotainos(
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip, 0)))
+ 0, 0, &uip)))
return XFS_ERROR(error);
}
if (XFS_IS_OQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip, 0))) {
+ 0, 0, &gip))) {
if (uip)
IRELE(uip);
return XFS_ERROR(error);
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -266,7 +266,7 @@ xfs_qm_scall_trunc_qfiles(
}
if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
- error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
+ error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip);
if (!error) {
error = xfs_truncate_file(mp, qip);
IRELE(qip);
@@ -275,7 +275,7 @@ xfs_qm_scall_trunc_qfiles(
if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
- error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
+ error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip);
if (!error2) {
error2 = xfs_truncate_file(mp, qip);
IRELE(qip);
@@ -420,12 +420,12 @@ xfs_qm_scall_getqstat(
}
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip, 0) == 0)
+ 0, 0, &uip) == 0)
tempuqip = B_TRUE;
}
if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip, 0) == 0)
+ 0, 0, &gip) == 0)
tempgqip = B_TRUE;
}
if (uip) {
@@ -1114,10 +1114,7 @@ xfs_qm_internalqcheck_adjust(
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
- void *private_data, /* not used */
- xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
- void *dip, /* not used */
int *res) /* bulkstat result code */
{
xfs_inode_t *ip;
@@ -1139,7 +1136,7 @@ xfs_qm_internalqcheck_adjust(
ipreleased = B_FALSE;
again:
lock_flags = XFS_ILOCK_SHARED;
- if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) {
+ if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
*res = BULKSTAT_RV_NOTHING;
return (error);
}
@@ -1212,15 +1209,15 @@ xfs_qm_internalqcheck(
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters
*/
- if ((error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_internalqcheck_adjust, NULL,
- 0, NULL, BULKSTAT_FG_IGET, &done))) {
+ error = xfs_bulkstat(mp, &lastino, &count,
+ xfs_qm_internalqcheck_adjust,
+ 0, NULL, &done);
+ if (error) {
+ cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
break;
}
- } while (! done);
- if (error) {
- cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
- }
+ } while (!done);
+
cmn_err(CE_DEBUG, "Checking results against system dquots");
for (i = 0; i < qmtest_hashmask; i++) {
h1 = &qmtest_udqtab[i];
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -1206,6 +1206,69 @@ error0:
return error;
}
+STATIC int
+xfs_imap_lookup(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino,
+ xfs_agblock_t agbno,
+ xfs_agblock_t *chunk_agbno,
+ xfs_agblock_t *offset_agbno,
+ int flags)
+{
+ struct xfs_inobt_rec_incore rec;
+ struct xfs_btree_cur *cur;
+ struct xfs_buf *agbp;
+ int error;
+ int i;
+
+ down_read(&mp->m_peraglock);
+ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ up_read(&mp->m_peraglock);
+ if (error) {
+ xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+ "xfs_ialloc_read_agi() returned "
+ "error %d, agno %d",
+ error, agno);
+ return error;
+ }
+
+ /*
+ * Lookup the inode record for the given agino. If the record cannot be
+ * found, then it's an invalid inode number and we should abort. Once
+ * we have a record, we need to ensure it contains the inode number
+ * we are looking up.
+ */
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
+ if (!error) {
+ if (i)
+ error = xfs_inobt_get_rec(cur, &rec, &i);
+ if (!error && i == 0)
+ error = EINVAL;
+ }
+
+ xfs_trans_brelse(tp, agbp);
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ if (error)
+ return error;
+
+ /* check that the returned record contains the required inode */
+ if (rec.ir_startino > agino ||
+ rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino)
+ return EINVAL;
+
+ /* for untrusted inodes check it is allocated first */
+ if ((flags & XFS_IGET_UNTRUSTED) &&
+ (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
+ return EINVAL;
+
+ *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
+ *offset_agbno = agbno - *chunk_agbno;
+ return 0;
+}
+
/*
* Return the location of the inode in imap, for mapping it into a buffer.
*/
@@ -1238,8 +1301,11 @@ xfs_imap(
if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
#ifdef DEBUG
- /* no diagnostics for bulkstat, ino comes from userspace */
- if (flags & XFS_IGET_BULKSTAT)
+ /*
+ * Don't output diagnostic information for untrusted inodes
+ * as they can be invalid without implying corruption.
+ */
+ if (flags & XFS_IGET_UNTRUSTED)
return XFS_ERROR(EINVAL);
if (agno >= mp->m_sb.sb_agcount) {
xfs_fs_cmn_err(CE_ALERT, mp,
@@ -1266,6 +1332,23 @@ xfs_imap(
return XFS_ERROR(EINVAL);
}
+ blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
+
+ /*
+ * For bulkstat and handle lookups, we have an untrusted inode number
+ * that we have to verify is valid. We cannot do this just by reading
+ * the inode buffer as it may have been unlinked and removed leaving
+ * inodes in stale state on disk. Hence we have to do a btree lookup
+ * in all cases where an untrusted inode number is passed.
+ */
+ if (flags & XFS_IGET_UNTRUSTED) {
+ error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ &chunk_agbno, &offset_agbno, flags);
+ if (error)
+ return error;
+ goto out_map;
+ }
+
/*
* If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics.
@@ -1280,24 +1363,6 @@ xfs_imap(
return 0;
}
- blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
-
- /*
- * If we get a block number passed from bulkstat we can use it to
- * find the buffer easily.
- */
- if (imap->im_blkno) {
- offset = XFS_INO_TO_OFFSET(mp, ino);
- ASSERT(offset < mp->m_sb.sb_inopblock);
-
- cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
- offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
-
- imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
- imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
- return 0;
- }
-
/*
* If the inode chunks are aligned then use simple maths to
* find the location. Otherwise we have to do a btree
@@ -1307,52 +1372,13 @@ xfs_imap(
offset_agbno = agbno & mp->m_inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
- xfs_btree_cur_t *cur; /* inode btree cursor */
- xfs_inobt_rec_incore_t chunk_rec;
- xfs_buf_t *agbp; /* agi buffer */
- int i; /* temp state */
-
- down_read(&mp->m_peraglock);
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- up_read(&mp->m_peraglock);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_ialloc_read_agi() returned "
- "error %d, agno %d",
- error, agno);
- return error;
- }
-
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
- error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_inobt_lookup() failed");
- goto error0;
- }
-
- error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_inobt_get_rec() failed");
- goto error0;
- }
- if (i == 0) {
-#ifdef DEBUG
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_inobt_get_rec() failed");
-#endif /* DEBUG */
- error = XFS_ERROR(EINVAL);
- }
- error0:
- xfs_trans_brelse(tp, agbp);
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ &chunk_agbno, &offset_agbno, flags);
if (error)
return error;
- chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
- offset_agbno = agbno - chunk_agbno;
}
+out_map:
ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno +
((offset_agbno / blks_per_cluster) * blks_per_cluster);
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -295,7 +295,6 @@ xfs_iget_cache_miss(
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
- xfs_daddr_t bno,
int flags,
int lock_flags) __releases(pag->pag_ici_lock)
{
@@ -308,7 +307,7 @@ xfs_iget_cache_miss(
if (!ip)
return ENOMEM;
- error = xfs_iread(mp, tp, ip, bno, flags);
+ error = xfs_iread(mp, tp, ip, flags);
if (error)
goto out_destroy;
@@ -392,8 +391,6 @@ out_destroy:
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
- * bno -- the block number starting the buffer containing the inode,
- * if known (as by bulkstat), else 0.
*/
int
xfs_iget(
@@ -402,8 +399,7 @@ xfs_iget(
xfs_ino_t ino,
uint flags,
uint lock_flags,
- xfs_inode_t **ipp,
- xfs_daddr_t bno)
+ xfs_inode_t **ipp)
{
xfs_inode_t *ip;
int error;
@@ -434,7 +430,7 @@ again:
read_unlock(&pag->pag_ici_lock);
XFS_STATS_INC(xs_ig_missed);
- error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
+ error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags);
if (error)
goto out_error_or_again;
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -177,7 +177,7 @@ xfs_imap_to_bp(
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) {
- if (iget_flags & XFS_IGET_BULKSTAT) {
+ if (iget_flags & XFS_IGET_UNTRUSTED) {
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EINVAL);
}
@@ -787,7 +787,6 @@ xfs_iread(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
- xfs_daddr_t bno,
uint iget_flags)
{
xfs_buf_t *bp;
@@ -797,11 +796,9 @@ xfs_iread(
/*
* Fill in the location information in the in-core inode.
*/
- ip->i_imap.im_blkno = bno;
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
if (error)
return error;
- ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
/*
* Get pointers to the on-disk inode and the buffer containing it.
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -468,7 +468,7 @@ static inline void xfs_ifunlock(xfs_inod
* xfs_iget.c prototypes.
*/
int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
- uint, uint, xfs_inode_t **, xfs_daddr_t);
+ uint, uint, xfs_inode_t **);
void xfs_iput(xfs_inode_t *, uint);
void xfs_iput_new(xfs_inode_t *, uint);
void xfs_ilock(xfs_inode_t *, uint);
@@ -558,7 +558,7 @@ do { \
* Flags for xfs_iget()
*/
#define XFS_IGET_CREATE 0x1
-#define XFS_IGET_BULKSTAT 0x2
+#define XFS_IGET_UNTRUSTED 0x2
int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
xfs_ino_t, struct xfs_dinode **,
@@ -567,7 +567,7 @@ int xfs_itobp(struct xfs_mount *, struc
struct xfs_inode *, struct xfs_dinode **,
struct xfs_buf **, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
- struct xfs_inode *, xfs_daddr_t, uint);
+ struct xfs_inode *, uint);
void xfs_dinode_to_disk(struct xfs_dinode *,
struct xfs_icdinode *);
void xfs_idestroy_fork(struct xfs_inode *, int);
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -49,24 +49,40 @@ xfs_internal_inum(
(ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
}
-STATIC int
-xfs_bulkstat_one_iget(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- xfs_daddr_t bno, /* starting bno of inode cluster */
- xfs_bstat_t *buf, /* return buffer */
- int *stat) /* BULKSTAT_RV_... */
+/*
+ * Return stat information for one inode.
+ * Return 0 if ok, else errno.
+ */
+int
+xfs_bulkstat_one_int(
+ struct xfs_mount *mp, /* mount point for filesystem */
+ xfs_ino_t ino, /* inode to get data for */
+ void __user *buffer, /* buffer to place output in */
+ int ubsize, /* size of buffer */
+ bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
+ int *ubused, /* bytes used by me */
+ int *stat) /* BULKSTAT_RV_... */
{
- xfs_icdinode_t *dic; /* dinode core info pointer */
- xfs_inode_t *ip; /* incore inode pointer */
- struct inode *inode;
- int error;
+ struct xfs_icdinode *dic; /* dinode core info pointer */
+ struct xfs_inode *ip; /* incore inode pointer */
+ struct inode *inode;
+ struct xfs_bstat *buf; /* return buffer */
+ int error = 0; /* error value */
+
+ *stat = BULKSTAT_RV_NOTHING;
+
+ if (!buffer || xfs_internal_inum(mp, ino))
+ return XFS_ERROR(EINVAL);
+
+ buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
+ if (!buf)
+ return XFS_ERROR(ENOMEM);
error = xfs_iget(mp, NULL, ino,
- XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno);
+ XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip);
if (error) {
*stat = BULKSTAT_RV_NOTHING;
- return error;
+ goto out_free;
}
ASSERT(ip != NULL);
@@ -126,76 +142,16 @@ xfs_bulkstat_one_iget(
buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
break;
}
-
xfs_iput(ip, XFS_ILOCK_SHARED);
- return error;
-}
-STATIC void
-xfs_bulkstat_one_dinode(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- xfs_dinode_t *dic, /* dinode inode pointer */
- xfs_bstat_t *buf) /* return buffer */
-{
- /*
- * The inode format changed when we moved the link count and
- * made it 32 bits long. If this is an old format inode,
- * convert it in memory to look like a new one. If it gets
- * flushed to disk we will convert back before flushing or
- * logging it. We zero out the new projid field and the old link
- * count field. We'll handle clearing the pad field (the remains
- * of the old uuid field) when we actually convert the inode to
- * the new format. We don't change the version number so that we
- * can distinguish this from a real new format inode.
- */
- if (dic->di_version == 1) {
- buf->bs_nlink = be16_to_cpu(dic->di_onlink);
- buf->bs_projid = 0;
- } else {
- buf->bs_nlink = be32_to_cpu(dic->di_nlink);
- buf->bs_projid = be16_to_cpu(dic->di_projid);
- }
+ error = formatter(buffer, ubsize, ubused, buf);
- buf->bs_ino = ino;
- buf->bs_mode = be16_to_cpu(dic->di_mode);
- buf->bs_uid = be32_to_cpu(dic->di_uid);
- buf->bs_gid = be32_to_cpu(dic->di_gid);
- buf->bs_size = be64_to_cpu(dic->di_size);
- buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
- buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
- buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
- buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
- buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
- buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
- buf->bs_xflags = xfs_dic2xflags(dic);
- buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
- buf->bs_extents = be32_to_cpu(dic->di_nextents);
- buf->bs_gen = be32_to_cpu(dic->di_gen);
- memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
- buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
- buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
- buf->bs_aextents = be16_to_cpu(dic->di_anextents);
+ if (!error)
+ *stat = BULKSTAT_RV_DIDONE;
- switch (dic->di_format) {
- case XFS_DINODE_FMT_DEV:
- buf->bs_rdev = xfs_dinode_get_rdev(dic);
- buf->bs_blksize = BLKDEV_IOSIZE;
- buf->bs_blocks = 0;
- break;
- case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_UUID:
- buf->bs_rdev = 0;
- buf->bs_blksize = mp->m_sb.sb_blocksize;
- buf->bs_blocks = 0;
- break;
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- buf->bs_rdev = 0;
- buf->bs_blksize = mp->m_sb.sb_blocksize;
- buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
- break;
- }
+ out_free:
+ kmem_free(buf);
+ return error;
}
/* Return 0 on success or positive error */
@@ -215,118 +171,17 @@ xfs_bulkstat_one_fmt(
return 0;
}
-/*
- * Return stat information for one inode.
- * Return 0 if ok, else errno.
- */
-int /* error status */
-xfs_bulkstat_one_int(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- void __user *buffer, /* buffer to place output in */
- int ubsize, /* size of buffer */
- bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
- xfs_daddr_t bno, /* starting bno of inode cluster */
- int *ubused, /* bytes used by me */
- void *dibuff, /* on-disk inode buffer */
- int *stat) /* BULKSTAT_RV_... */
-{
- xfs_bstat_t *buf; /* return buffer */
- int error = 0; /* error value */
- xfs_dinode_t *dip; /* dinode inode pointer */
-
- dip = (xfs_dinode_t *)dibuff;
- *stat = BULKSTAT_RV_NOTHING;
-
- if (!buffer || xfs_internal_inum(mp, ino))
- return XFS_ERROR(EINVAL);
-
- buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
-
- if (dip == NULL) {
- /* We're not being passed a pointer to a dinode. This happens
- * if BULKSTAT_FG_IGET is selected. Do the iget.
- */
- error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
- if (error)
- goto out_free;
- } else {
- xfs_bulkstat_one_dinode(mp, ino, dip, buf);
- }
-
- error = formatter(buffer, ubsize, ubused, buf);
- if (error)
- goto out_free;
-
- *stat = BULKSTAT_RV_DIDONE;
-
- out_free:
- kmem_free(buf);
- return error;
-}
-
int
xfs_bulkstat_one(
xfs_mount_t *mp, /* mount point for filesystem */
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
- void *private_data, /* my private data */
- xfs_daddr_t bno, /* starting bno of inode cluster */
int *ubused, /* bytes used by me */
- void *dibuff, /* on-disk inode buffer */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
- xfs_bulkstat_one_fmt, bno,
- ubused, dibuff, stat);
-}
-
-/*
- * Test to see whether we can use the ondisk inode directly, based
- * on the given bulkstat flags, filling in dipp accordingly.
- * Returns zero if the inode is dodgey.
- */
-STATIC int
-xfs_bulkstat_use_dinode(
- xfs_mount_t *mp,
- int flags,
- xfs_buf_t *bp,
- int clustidx,
- xfs_dinode_t **dipp)
-{
- xfs_dinode_t *dip;
- unsigned int aformat;
-
- *dipp = NULL;
- if (!bp || (flags & BULKSTAT_FG_IGET))
- return 1;
- dip = (xfs_dinode_t *)
- xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
- /*
- * Check the buffer containing the on-disk inode for di_mode == 0.
- * This is to prevent xfs_bulkstat from picking up just reclaimed
- * inodes that have their in-core state initialized but not flushed
- * to disk yet. This is a temporary hack that would require a proper
- * fix in the future.
- */
- if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
- !XFS_DINODE_GOOD_VERSION(dip->di_version) ||
- !dip->di_mode)
- return 0;
- if (flags & BULKSTAT_FG_QUICK) {
- *dipp = dip;
- return 1;
- }
- /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
- aformat = dip->di_aformat;
- if ((XFS_DFORK_Q(dip) == 0) ||
- (aformat == XFS_DINODE_FMT_LOCAL) ||
- (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) {
- *dipp = dip;
- return 1;
- }
- return 1;
+ xfs_bulkstat_one_fmt, ubused, stat);
}
#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
@@ -340,10 +195,8 @@ xfs_bulkstat(
xfs_ino_t *lastinop, /* last inode returned */
int *ubcountp, /* size of buffer/count returned */
bulkstat_one_pf formatter, /* func that'd fill a single buf */
- void *private_data,/* private data for formatter */
size_t statstruct_size, /* sizeof struct filling */
char __user *ubuffer, /* buffer with inode stats */
- int flags, /* defined in xfs_itable.h */
int *done) /* 1 if there are more stats to get */
{
xfs_agblock_t agbno=0;/* allocation group block number */
@@ -378,14 +231,12 @@ xfs_bulkstat(
int ubelem; /* spaces used in user's buffer */
int ubused; /* bytes used by formatter */
xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */
- xfs_dinode_t *dip; /* ptr into bp for specific inode */
/*
* Get the last inode value, see if there's nothing to do.
*/
ino = (xfs_ino_t)*lastinop;
lastino = ino;
- dip = NULL;
agno = XFS_INO_TO_AGNO(mp, ino);
agino = XFS_INO_TO_AGINO(mp, ino);
if (agno >= mp->m_sb.sb_agcount ||
@@ -610,37 +461,6 @@ xfs_bulkstat(
irbp->ir_startino) +
((chunkidx & nimask) >>
mp->m_sb.sb_inopblog);
-
- if (flags & (BULKSTAT_FG_QUICK |
- BULKSTAT_FG_INLINE)) {
- int offset;
-
- ino = XFS_AGINO_TO_INO(mp, agno,
- agino);
- bno = XFS_AGB_TO_DADDR(mp, agno,
- agbno);
-
- /*
- * Get the inode cluster buffer
- */
- if (bp)
- xfs_buf_relse(bp);
-
- error = xfs_inotobp(mp, NULL, ino, &dip,
- &bp, &offset,
- XFS_IGET_BULKSTAT);
-
- if (!error)
- clustidx = offset / mp->m_sb.sb_inodesize;
- if (XFS_TEST_ERROR(error != 0,
- mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
- XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
- bp = NULL;
- ubleft = 0;
- rval = error;
- break;
- }
- }
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
@@ -656,35 +476,13 @@ xfs_bulkstat(
* when the chunk is used up.
*/
irbp->ir_freecount++;
- if (!xfs_bulkstat_use_dinode(mp, flags, bp,
- clustidx, &dip)) {
- lastino = ino;
- continue;
- }
- /*
- * If we need to do an iget, cannot hold bp.
- * Drop it, until starting the next cluster.
- */
- if ((flags & BULKSTAT_FG_INLINE) && !dip) {
- if (bp)
- xfs_buf_relse(bp);
- bp = NULL;
- }
/*
* Get the inode and fill in a single buffer.
- * BULKSTAT_FG_QUICK uses dip to fill it in.
- * BULKSTAT_FG_IGET uses igets.
- * BULKSTAT_FG_INLINE uses dip if we have an
- * inline attr fork, else igets.
- * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
- * This is also used to count inodes/blks, etc
- * in xfs_qm_quotacheck.
*/
ubused = statstruct_size;
- error = formatter(mp, ino, ubufp,
- ubleft, private_data,
- bno, &ubused, dip, &fmterror);
+ error = formatter(mp, ino, ubufp, ubleft,
+ &ubused, &fmterror);
if (fmterror == BULKSTAT_RV_NOTHING) {
if (error && error != ENOENT &&
error != EINVAL) {
@@ -776,8 +574,7 @@ xfs_bulkstat_single(
*/
ino = (xfs_ino_t)*lastinop;
- error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
- NULL, 0, NULL, NULL, &res);
+ error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
if (error) {
/*
* Special case way failed, do it the "long" way
@@ -786,8 +583,7 @@ xfs_bulkstat_single(
(*lastinop)--;
count = 1;
if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
- NULL, sizeof(xfs_bstat_t), buffer,
- BULKSTAT_FG_IGET, done))
+ sizeof(xfs_bstat_t), buffer, done))
return error;
if (count == 0 || (xfs_ino_t)*lastinop != ino)
return error == EFSCORRUPTED ?
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -27,10 +27,7 @@ typedef int (*bulkstat_one_pf)(struct xf
xfs_ino_t ino,
void __user *buffer,
int ubsize,
- void *private_data,
- xfs_daddr_t bno,
int *ubused,
- void *dip,
int *stat);
/*
@@ -41,13 +38,6 @@ typedef int (*bulkstat_one_pf)(struct xf
#define BULKSTAT_RV_GIVEUP 2
/*
- * Values for bulkstat flag argument.
- */
-#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
-#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
-#define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */
-
-/*
* Return stat information in bulk (by-inode) for the filesystem.
*/
int /* error status */
@@ -56,10 +46,8 @@ xfs_bulkstat(
xfs_ino_t *lastino, /* last inode returned */
int *count, /* size of buffer/count returned */
bulkstat_one_pf formatter, /* func that'd fill a single buf */
- void *private_data, /* private data for formatter */
size_t statstruct_size,/* sizeof struct that we're filling */
char __user *ubuffer,/* buffer with inode stats */
- int flags, /* flag to control access method */
int *done); /* 1 if there are more stats to get */
int
@@ -82,9 +70,7 @@ xfs_bulkstat_one_int(
void __user *buffer,
int ubsize,
bulkstat_one_fmt_pf formatter,
- xfs_daddr_t bno,
int *ubused,
- void *dibuff,
int *stat);
int
@@ -93,10 +79,7 @@ xfs_bulkstat_one(
xfs_ino_t ino,
void __user *buffer,
int ubsize,
- void *private_data,
- xfs_daddr_t bno,
int *ubused,
- void *dibuff,
int *stat);
typedef int (*inumbers_fmt_pf)(
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3209,7 +3209,7 @@ xlog_recover_process_one_iunlink(
int error;
ino = XFS_AGINO_TO_INO(mp, agno, agino);
- error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
+ error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
if (error)
goto fail;
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1207,7 +1207,7 @@ xfs_mountfs(
* Get and sanity-check the root inode.
* Save the pointer to it in the mount structure.
*/
- error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
+ error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
if (error) {
cmn_err(CE_WARN, "XFS: failed to read root inode");
goto out_log_dealloc;
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -2274,12 +2274,12 @@ xfs_rtmount_inodes(
sbp = &mp->m_sb;
if (sbp->sb_rbmino == NULLFSINO)
return 0;
- error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0);
+ error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
if (error)
return error;
ASSERT(mp->m_rbmip != NULL);
ASSERT(sbp->sb_rsumino != NULLFSINO);
- error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0);
+ error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
if (error) {
IRELE(mp->m_rbmip);
return error;
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -62,7 +62,7 @@ xfs_trans_iget(
{
int error;
- error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0);
+ error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
if (!error && tp)
xfs_trans_ijoin(tp, *ipp, lock_flags);
return error;
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1371,7 +1371,7 @@ xfs_lookup(
if (error)
goto out;
- error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
+ error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
if (error)
goto out_free_name;
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1016,7 +1016,7 @@ struct drm_device {
struct pci_controller *hose;
#endif
struct drm_sg_mem *sg; /**< Scatter gather memory */
- int num_crtcs; /**< Number of CRTCs on this device */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
void *mm_private;
struct address_space *dev_mapping;
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -28,7 +28,6 @@
{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -153,6 +153,7 @@ struct cred {
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, unsigned long);
+extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
@@ -283,26 +284,6 @@ static inline void put_cred(const struct
((const struct cred *)(rcu_dereference((task)->real_cred)))
/**
- * get_task_cred - Get another task's objective credentials
- * @task: The task to query
- *
- * Get the objective credentials of a task, pinning them so that they can't go
- * away. Accessing a task's credentials directly is not permitted.
- *
- * The caller must make sure task doesn't go away, either by holding a ref on
- * task or by holding tasklist_lock to prevent it from being unlinked.
- */
-#define get_task_cred(task) \
-({ \
- struct cred *__cred; \
- rcu_read_lock(); \
- __cred = (struct cred *) __task_cred((task)); \
- get_cred(__cred); \
- rcu_read_unlock(); \
- __cred; \
-})
-
-/**
* get_current_cred - Get the current task's subjective credentials
*
* Get the subjective credentials of the current task, pinning them so that
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -64,6 +64,8 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
@@ -82,10 +84,13 @@
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
@@ -132,10 +137,12 @@ extern void synchronize_irq(unsigned int
struct task_struct;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
static inline void account_system_vtime(struct task_struct *tsk)
{
}
+#else
+extern void account_system_vtime(struct task_struct *tsk);
#endif
#if defined(CONFIG_NO_HZ)
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -872,7 +872,7 @@ struct ieee80211_ht_info {
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -22,7 +22,7 @@ struct klist {
struct list_head k_list;
void (*get)(struct klist_node *);
void (*put)(struct klist_node *);
-} __attribute__ ((aligned (4)));
+} __attribute__ ((aligned (sizeof(void *))));
#define KLIST_INIT(_name, _get, _put) \
{ .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -739,14 +739,6 @@ struct user_struct {
uid_t uid;
struct user_namespace *user_ns;
-#ifdef CONFIG_USER_SCHED
- struct task_group *tg;
-#ifdef CONFIG_SYSFS
- struct kobject kobj;
- struct delayed_work work;
-#endif
-#endif
-
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
@@ -913,6 +905,7 @@ struct sched_group {
* single CPU.
*/
unsigned int cpu_power;
+ unsigned int group_weight;
/*
* The CPUs this group covers.
@@ -1132,7 +1125,7 @@ struct sched_class {
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p, int on_rq);
+ void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};
@@ -1836,8 +1829,7 @@ extern int task_free_unregister(struct n
/*
* Per process flags
*/
-#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
- /* Not implemented yet, only for 486*/
+#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1974,6 +1966,19 @@ extern void sched_clock_idle_wakeup_even
*/
extern unsigned long long cpu_clock(int cpu);
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2525,9 +2530,9 @@ extern int __cond_resched_lock(spinlock_
extern int __cond_resched_softirq(void);
-#define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
- __cond_resched_softirq(); \
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
})
/*
@@ -2616,13 +2621,9 @@ extern long sched_getaffinity(pid_t pid,
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group;
-#ifdef CONFIG_USER_SCHED
-extern struct task_group root_task_group;
-extern void set_tg_uid(struct user_struct *user);
-#endif
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -326,6 +326,9 @@ extern int usb_serial_handle_sysrq_char(
struct usb_serial_port *port,
unsigned int ch);
extern int usb_serial_handle_break(struct usb_serial_port *port);
+extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+ struct tty_struct *tty,
+ unsigned int status);
extern int usb_serial_bus_register(struct usb_serial_driver *device);
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
+ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_LAST
} sctp_verb_t;
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -439,57 +439,6 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK
bool
-config GROUP_SCHED
- bool "Group CPU scheduler"
- depends on EXPERIMENTAL && !SCHED_BFS
- default n
- help
- This feature lets CPU scheduler recognize task groups and control CPU
- bandwidth allocation to such task groups.
- In order to create a group from arbitrary set of processes, use
- CONFIG_CGROUPS. (See Control Group support.)
-
-config FAIR_GROUP_SCHED
- bool "Group scheduling for SCHED_OTHER"
- depends on GROUP_SCHED
- default GROUP_SCHED
-
-config RT_GROUP_SCHED
- bool "Group scheduling for SCHED_RR/FIFO"
- depends on EXPERIMENTAL
- depends on GROUP_SCHED
- default n
- help
- This feature lets you explicitly allocate real CPU bandwidth
- to users or control groups (depending on the "Basis for grouping tasks"
- setting below. If enabled, it will also make it impossible to
- schedule realtime tasks for non-root users until you allocate
- realtime bandwidth for them.
- See Documentation/scheduler/sched-rt-group.txt for more information.
-
-choice
- depends on GROUP_SCHED
- prompt "Basis for grouping tasks"
- default USER_SCHED
-
-config USER_SCHED
- bool "user id"
- help
- This option will choose userid as the basis for grouping
- tasks, thus providing equal CPU bandwidth to each user.
-
-config CGROUP_SCHED
- bool "Control groups"
- depends on CGROUPS
- help
- This option allows you to create arbitrary task groups
- using the "cgroup" pseudo filesystem and control
- the cpu bandwidth allocated to each such task group.
- Refer to Documentation/cgroups/cgroups.txt for more
- information on "cgroup" pseudo filesystem.
-
-endchoice
-
menuconfig CGROUPS
boolean "Control Group support"
help
@@ -610,6 +559,35 @@ config CGROUP_MEM_RES_CTLR_SWAP
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap.
+menuconfig CGROUP_SCHED
+ bool "Group CPU scheduler"
+ depends on EXPERIMENTAL && CGROUPS
+ default n
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups. It uses cgroups to group
+ tasks.
+
+if CGROUP_SCHED
+config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+ default CGROUP_SCHED
+
+config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+ to task groups. If enabled, it will also make it impossible to
+ schedule realtime tasks for non-root users until you allocate
+ realtime bandwidth for them.
+ See Documentation/scheduler/sched-rt-group.txt for more information.
+
+endif #CGROUP_SCHED
+
endif # CGROUPS
config SCHED_AUTOGROUP
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate
pre_start = 0;
read_current_timer(&start);
start_jiffies = jiffies;
- while (jiffies <= (start_jiffies + 1)) {
+ while (time_before_eq(jiffies, start_jiffies + 1)) {
pre_start = start;
read_current_timer(&start);
}
@@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate
pre_end = 0;
end = post_start;
- while (jiffies <=
- (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
+ while (time_before_eq(jiffies, start_jiffies + 1 +
+ DELAY_CALIBRATION_TICKS)) {
pre_end = end;
read_current_timer(&end);
}
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -15,7 +15,6 @@
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
-#include "cred-internals.h"
/*
* Leveraged for setting/resetting capabilities
--- a/kernel/cred-internals.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Internal credentials stuff
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-/*
- * user.c
- */
-static inline void sched_switch_user(struct task_struct *p)
-{
-#ifdef CONFIG_USER_SCHED
- sched_move_task(p);
-#endif /* CONFIG_USER_SCHED */
-}
-
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -16,7 +16,6 @@
#include <linux/init_task.h>
#include <linux/security.h>
#include <linux/cn_proc.h>
-#include "cred-internals.h"
#if 0
#define kdebug(FMT, ...) \
@@ -209,6 +208,31 @@ void exit_creds(struct task_struct *tsk)
}
}
+/**
+ * get_task_cred - Get another task's objective credentials
+ * @task: The task to query
+ *
+ * Get the objective credentials of a task, pinning them so that they can't go
+ * away. Accessing a task's credentials directly is not permitted.
+ *
+ * The caller must also make sure task doesn't get deleted, either by holding a
+ * ref on task or by holding tasklist_lock to prevent it from being unlinked.
+ */
+const struct cred *get_task_cred(struct task_struct *task)
+{
+ const struct cred *cred;
+
+ rcu_read_lock();
+
+ do {
+ cred = __task_cred((task));
+ BUG_ON(!cred);
+ } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
+
+ rcu_read_unlock();
+ return cred;
+}
+
/*
* Allocate blank credentials, such that the credentials can be filled in at a
* later date without risk of ENOMEM.
@@ -231,13 +255,13 @@ struct cred *cred_alloc_blank(void)
#endif
atomic_set(&new->usage, 1);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ new->magic = CRED_MAGIC;
+#endif
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
goto error;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- new->magic = CRED_MAGIC;
-#endif
return new;
error:
@@ -553,8 +577,6 @@ int commit_creds(struct cred *new)
atomic_dec(&old->user->processes);
alter_cred_subscribers(old, -2);
- sched_switch_user(task);
-
/* send notifications */
if (new->uid != old->uid ||
new->euid != old->euid ||
@@ -696,6 +718,8 @@ struct cred *prepare_kernel_cred(struct
validate_creds(old);
*new = *old;
+ atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
get_uid(new->user);
get_group_info(new->group_info);
@@ -713,8 +737,6 @@ struct cred *prepare_kernel_cred(struct
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
goto error;
- atomic_set(&new->usage, 1);
- set_cred_subscribers(new, 0);
put_cred(old);
validate_creds(new);
return new;
@@ -787,7 +809,11 @@ bool creds_are_invalid(const struct cred
if (cred->magic != CRED_MAGIC)
return true;
#ifdef CONFIG_SECURITY_SELINUX
- if (selinux_is_enabled()) {
+ /*
+ * cred->security == NULL if security_cred_alloc_blank() or
+ * security_prepare_creds() returned an error.
+ */
+ if (selinux_is_enabled() && cred->security) {
if ((unsigned long) cred->security < PAGE_SIZE)
return true;
if ((*(u32 *)cred->security & 0xffffff00) ==
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,7 +54,6 @@
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
-#include "cred-internals.h"
static void exit_mm(struct task_struct * tsk);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1079,7 +1079,7 @@ int request_threaded_irq(unsigned int ir
if (retval)
kfree(action);
-#ifdef CONFIG_DEBUG_SHIRQ
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
if (irqflags & IRQF_SHARED) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -176,16 +176,8 @@ static int __init ksysfs_init(void)
goto group_exit;
}
- /* create the /sys/kernel/uids/ directory */
- error = uids_sysfs_init();
- if (error)
- goto notes_exit;
-
return 0;
-notes_exit:
- if (notes_size > 0)
- sysfs_remove_bin_file(kernel_kobj, ¬es_attr);
group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1466,11 +1466,8 @@ static int
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem)
{
- int error = 0;
-
if (nr_highmem > 0) {
- error = get_highmem_buffer(PG_ANY);
- if (error)
+ if (get_highmem_buffer(PG_ANY))
goto err_out;
if (nr_highmem > alloc_highmem) {
nr_highmem -= alloc_highmem;
@@ -1493,7 +1490,7 @@ swsusp_alloc(struct memory_bitmap *orig_
err_out:
swsusp_free();
- return error;
+ return -ENOMEM;
}
asmlinkage int swsusp_save(void)
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -314,7 +314,7 @@ int ptrace_detach(struct task_struct *ch
child->exit_code = data;
dead = __ptrace_detach(current, child);
if (!child->exit_state)
- wake_up_process(child);
+ wake_up_state(child, TASK_TRACED | TASK_STOPPED);
}
write_unlock_irq(&tasklist_lock);
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -237,7 +237,7 @@ static void destroy_rt_bandwidth(struct
*/
static DEFINE_MUTEX(sched_domains_mutex);
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
#include <linux/cgroup.h>
@@ -247,13 +247,7 @@ static LIST_HEAD(task_groups);
/* task group related information */
struct task_group {
-#ifdef CONFIG_CGROUP_SCHED
struct cgroup_subsys_state css;
-#endif
-
-#ifdef CONFIG_USER_SCHED
- uid_t uid;
-#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
@@ -278,35 +272,7 @@ struct task_group {
struct list_head children;
};
-#ifdef CONFIG_USER_SCHED
-
-/* Helper function to pass uid information to create_sched_user() */
-void set_tg_uid(struct user_struct *user)
-{
- user->tg->uid = user->uid;
-}
-
-/*
- * Root task group.
- * Every UID task group (including init_task_group aka UID-0) will
- * be a child to this group.
- */
-struct task_group root_task_group;
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-/* Default task group's sched entity on each cpu */
-static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
-/* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
-#ifdef CONFIG_RT_GROUP_SCHED
-static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
-#endif /* CONFIG_RT_GROUP_SCHED */
-#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
-#endif /* CONFIG_USER_SCHED */
/* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares.
@@ -322,11 +288,7 @@ static int root_task_group_empty(void)
}
#endif
-#ifdef CONFIG_USER_SCHED
-# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
-#else /* !CONFIG_USER_SCHED */
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
-#endif /* CONFIG_USER_SCHED */
/*
* A weight of 0 or 1 can cause arithmetics problems.
@@ -352,13 +314,7 @@ static inline struct task_group *task_gr
{
struct task_group *tg;
-#ifdef CONFIG_USER_SCHED
- rcu_read_lock();
- tg = __task_cred(p)->user->tg;
- rcu_read_unlock();
-
- return tg;
-#elif defined(CONFIG_CGROUP_SCHED)
+#ifdef CONFIG_CGROUP_SCHED
struct cgroup_subsys_state *css;
css = task_subsys_state(p, cpu_cgroup_subsys_id);
@@ -394,7 +350,7 @@ static inline struct task_group *task_gr
return NULL;
}
-#endif /* CONFIG_GROUP_SCHED */
+#endif /* CONFIG_CGROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
@@ -578,6 +534,7 @@ struct rq {
struct mm_struct *prev_mm;
u64 clock;
+ u64 clock_task;
atomic_t nr_iowait;
@@ -585,6 +542,8 @@ struct rq {
struct root_domain *rd;
struct sched_domain *sd;
+ unsigned long cpu_power;
+
unsigned char idle_at_tick;
/* For active balancing */
int post_schedule;
@@ -605,6 +564,10 @@ struct rq {
u64 avg_idle;
#endif
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+#endif
+
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
@@ -642,11 +605,7 @@ struct rq {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static inline
-void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
-{
- rq->curr->sched_class->check_preempt_curr(rq, p, flags);
-}
+static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
static inline int cpu_of(struct rq *rq)
{
@@ -673,9 +632,20 @@ static inline int cpu_of(struct rq *rq)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))
+static u64 irq_time_cpu(int cpu);
+static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
+
inline void update_rq_clock(struct rq *rq)
{
+ int cpu = cpu_of(rq);
+ u64 irq_time;
+
rq->clock = sched_clock_cpu(cpu_of(rq));
+ irq_time = irq_time_cpu(cpu);
+ if (rq->clock - irq_time > rq->clock_task)
+ rq->clock_task = rq->clock - irq_time;
+
+ sched_irq_time_avg_update(rq, irq_time);
}
/*
@@ -1308,6 +1278,10 @@ static void resched_task(struct task_str
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
+
+static void sched_avg_update(struct rq *rq)
+{
+}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
@@ -1557,24 +1531,9 @@ static unsigned long target_load(int cpu
return max(rq->cpu_load[type-1], total);
}
-static struct sched_group *group_of(int cpu)
-{
- struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
-
- if (!sd)
- return NULL;
-
- return sd->groups;
-}
-
static unsigned long power_of(int cpu)
{
- struct sched_group *group = group_of(cpu);
-
- if (!group)
- return SCHED_LOAD_SCALE;
-
- return group->cpu_power;
+ return cpu_rq(cpu)->cpu_power;
}
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
@@ -1856,6 +1815,94 @@ static inline void __set_task_cpu(struct
#endif
}
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
+/*
+ * There are no locks covering percpu hardirq/softirq time.
+ * They are only modified in account_system_vtime, on corresponding CPU
+ * with interrupts disabled. So, writes are safe.
+ * They are read and saved off onto struct rq in update_rq_clock().
+ * This may result in other CPU reading this CPU's irq time and can
+ * race with irq/account_system_vtime on this CPU. We would either get old
+ * or new value (or semi updated value on 32 bit) with a side effect of
+ * accounting a slice of irq time to wrong task when irq is in progress
+ * while we read rq->clock. That is a worthy compromise in place of having
+ * locks on each irq in account_system_time.
+ */
+static DEFINE_PER_CPU(u64, cpu_hardirq_time);
+static DEFINE_PER_CPU(u64, cpu_softirq_time);
+
+static DEFINE_PER_CPU(u64, irq_start_time);
+static int sched_clock_irqtime;
+
+void enable_sched_clock_irqtime(void)
+{
+ sched_clock_irqtime = 1;
+}
+
+void disable_sched_clock_irqtime(void)
+{
+ sched_clock_irqtime = 0;
+}
+
+static u64 irq_time_cpu(int cpu)
+{
+ if (!sched_clock_irqtime)
+ return 0;
+
+ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
+}
+
+void account_system_vtime(struct task_struct *curr)
+{
+ unsigned long flags;
+ int cpu;
+ u64 now, delta;
+
+ if (!sched_clock_irqtime)
+ return;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ now = sched_clock_cpu(cpu);
+ delta = now - per_cpu(irq_start_time, cpu);
+ per_cpu(irq_start_time, cpu) = now;
+ /*
+ * We do not account for softirq time from ksoftirqd here.
+ * We want to continue accounting softirq time to ksoftirqd thread
+ * in that case, so as not to confuse scheduler with a special task
+ * that do not consume any time, but still wants to run.
+ */
+ if (hardirq_count())
+ per_cpu(cpu_hardirq_time, cpu) += delta;
+ else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
+ per_cpu(cpu_softirq_time, cpu) += delta;
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(account_system_vtime);
+
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
+{
+ if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
+ u64 delta_irq = curr_irq_time - rq->prev_irq_time;
+ rq->prev_irq_time = curr_irq_time;
+ sched_rt_avg_update(rq, delta_irq);
+ }
+}
+
+#else
+
+static u64 irq_time_cpu(int cpu)
+{
+ return 0;
+}
+
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
+
+#endif
+
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
@@ -1882,8 +1929,8 @@ static void dec_nr_running(struct rq *rq
static void set_load_weight(struct task_struct *p)
{
if (task_has_rt_policy(p)) {
- p->se.load.weight = prio_to_weight[0] * 2;
- p->se.load.inv_weight = prio_to_wmult[0] >> 1;
+ p->se.load.weight = 0;
+ p->se.load.inv_weight = WMULT_CONST;
return;
}
@@ -2064,6 +2111,9 @@ task_hot(struct task_struct *p, u64 now,
if (p->sched_class != &fair_sched_class)
return 0;
+ if (unlikely(p->policy == SCHED_IDLE))
+ return 0;
+
/*
* Buddy candidates are cache hot:
*/
@@ -2335,6 +2385,24 @@ void task_oncpu_function_call(struct tas
preempt_enable();
}
+static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+{
+ const struct sched_class *class;
+
+ if (p->sched_class == rq->curr->sched_class) {
+ rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+ } else {
+ for_each_class(class) {
+ if (class == rq->curr->sched_class)
+ break;
+ if (class == p->sched_class) {
+ resched_task(rq->curr);
+ break;
+ }
+ }
+ }
+}
+
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
@@ -3156,6 +3224,8 @@ static void update_cpu_load(struct rq *t
this_rq->calc_load_update += LOAD_FREQ;
calc_load_account_active(this_rq);
}
+
+ sched_avg_update(this_rq);
}
#ifdef CONFIG_SMP
@@ -3287,7 +3357,7 @@ int can_migrate_task(struct task_struct
* 2) too many balance attempts have failed.
*/
- tsk_cache_hot = task_hot(p, rq->clock, sd);
+ tsk_cache_hot = task_hot(p, rq->clock_task, sd);
if (!tsk_cache_hot ||
sd->nr_balance_failed > sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
@@ -3470,12 +3540,17 @@ struct sd_lb_stats {
unsigned long this_load;
unsigned long this_load_per_task;
unsigned long this_nr_running;
+ unsigned long this_has_capacity;
+ unsigned int this_idle_cpus;
/* Statistics of the busiest group */
+ unsigned int busiest_idle_cpus;
unsigned long max_load;
unsigned long busiest_load_per_task;
unsigned long busiest_nr_running;
unsigned long busiest_group_capacity;
+ unsigned long busiest_has_capacity;
+ unsigned int busiest_group_weight;
int group_imb; /* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3497,7 +3572,10 @@ struct sg_lb_stats {
unsigned long sum_nr_running; /* Nr tasks running in the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long group_capacity;
+ unsigned long idle_cpus;
+ unsigned long group_weight;
int group_imb; /* Is there an imbalance in the group ? */
+ int group_has_capacity; /* Is there extra capacity in the group? */
};
/**
@@ -3707,10 +3785,14 @@ unsigned long scale_rt_power(int cpu)
struct rq *rq = cpu_rq(cpu);
u64 total, available;
- sched_avg_update(rq);
-
total = sched_avg_period() + (rq->clock - rq->age_stamp);
- available = total - rq->rt_avg;
+
+ if (unlikely(total < rq->rt_avg)) {
+ /* Ensures that power won't end up being negative */
+ available = 0;
+ } else {
+ available = total - rq->rt_avg;
+ }
if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
@@ -3748,6 +3830,7 @@ static void update_cpu_power(struct sche
if (!power)
power = 1;
+ cpu_rq(cpu)->cpu_power = power;
sdg->cpu_power = power;
}
@@ -3792,7 +3875,7 @@ static inline void update_sg_lb_stats(st
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
{
- unsigned long load, max_cpu_load, min_cpu_load;
+ unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
int i;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long avg_load_per_task = 0;
@@ -3806,6 +3889,7 @@ static inline void update_sg_lb_stats(st
/* Tally up the load of all CPUs in the group */
max_cpu_load = 0;
min_cpu_load = ~0UL;
+ max_nr_running = 0;
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
@@ -3823,8 +3907,10 @@ static inline void update_sg_lb_stats(st
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load)
+ if (load > max_cpu_load) {
max_cpu_load = load;
+ max_nr_running = rq->nr_running;
+ }
if (min_cpu_load > load)
min_cpu_load = load;
}
@@ -3832,7 +3918,8 @@ static inline void update_sg_lb_stats(st
sgs->group_load += load;
sgs->sum_nr_running += rq->nr_running;
sgs->sum_weighted_load += weighted_cpuload(i);
-
+ if (idle_cpu(i))
+ sgs->idle_cpus++;
}
/*
@@ -3862,11 +3949,14 @@ static inline void update_sg_lb_stats(st
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity =
- DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+ sgs->group_weight = group->group_weight;
+
+ if (sgs->group_capacity > sgs->sum_nr_running)
+ sgs->group_has_capacity = 1;
}
/**
@@ -3913,9 +4003,14 @@ static inline void update_sd_lb_stats(st
/*
* In case the child domain prefers tasks go to siblings
* first, lower the group capacity to one so that we'll try
- * and move all the excess tasks away.
+ * and move all the excess tasks away. We lower the capacity
+ * of a group only if the local group has the capacity to fit
+ * these excess tasks, i.e. nr_running < group_capacity. The
+ * extra check prevents the case where you always pull from the
+ * heaviest group when it is already under-utilized (possible
+ * with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling)
+ if (prefer_sibling && !local_group && sds->this_has_capacity)
sgs.group_capacity = min(sgs.group_capacity, 1UL);
if (local_group) {
@@ -3923,14 +4018,19 @@ static inline void update_sd_lb_stats(st
sds->this = group;
sds->this_nr_running = sgs.sum_nr_running;
sds->this_load_per_task = sgs.sum_weighted_load;
+ sds->this_has_capacity = sgs.group_has_capacity;
+ sds->this_idle_cpus = sgs.idle_cpus;
} else if (sgs.avg_load > sds->max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
sds->max_load = sgs.avg_load;
sds->busiest = group;
sds->busiest_nr_running = sgs.sum_nr_running;
+ sds->busiest_idle_cpus = sgs.idle_cpus;
sds->busiest_group_capacity = sgs.group_capacity;
+ sds->busiest_group_weight = sgs.group_weight;
sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->busiest_has_capacity = sgs.group_has_capacity;
sds->group_imb = sgs.group_imb;
}
@@ -4076,6 +4176,7 @@ static inline void calculate_imbalance(s
return fix_small_imbalance(sds, this_cpu, imbalance);
}
+
/******* find_busiest_group() helpers end here *********************/
/**
@@ -4127,6 +4228,11 @@ find_busiest_group(struct sched_domain *
* 4) This group is more busy than the avg busieness at this
* sched_domain.
* 5) The imbalance is within the specified limit.
+ *
+ * Note: when doing newidle balance, if the local group has excess
+ * capacity (i.e. nr_running < group_capacity) and the busiest group
+ * does not have any capacity, we force a load balance to pull tasks
+ * to the local group. In this case, we skip past checks 3, 4 and 5.
*/
if (balance && !(*balance))
goto ret;
@@ -4134,6 +4240,11 @@ find_busiest_group(struct sched_domain *
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
+ /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+ if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
+ !sds.busiest_has_capacity)
+ goto force_balance;
+
if (sds.this_load >= sds.max_load)
goto out_balanced;
@@ -4142,9 +4253,28 @@ find_busiest_group(struct sched_domain *
if (sds.this_load >= sds.avg_load)
goto out_balanced;
- if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
- goto out_balanced;
+ /*
+ * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
+ * And to check for busy balance use !idle_cpu instead of
+ * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
+ * even when they are idle.
+ */
+ if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+ goto out_balanced;
+ } else {
+ /*
+ * This cpu is idle. If the busiest group load doesn't
+ * have more tasks than the number of available cpu's and
+ * there is no imbalance between this and busiest group
+ * wrt to idle cpu's, it is balanced.
+ */
+ if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
+ sds.busiest_nr_running <= sds.busiest_group_weight)
+ goto out_balanced;
+ }
+force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
@@ -4300,7 +4430,14 @@ redo:
if (!ld_moved) {
schedstat_inc(sd, lb_failed[idle]);
- sd->nr_balance_failed++;
+ /*
+ * Increment the failure counter only on periodic balance.
+ * We do not want newidle balance, which can be very
+ * frequent, pollute the failure counter causing
+ * excessive cache_hot migrations and active balances.
+ */
+ if (idle != CPU_NEWLY_IDLE)
+ sd->nr_balance_failed++;
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
@@ -5045,7 +5182,7 @@ static u64 do_task_delta_exec(struct tas
if (task_current(rq, p)) {
update_rq_clock(rq);
- ns = rq->clock - p->se.exec_start;
+ ns = rq->clock_task - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
@@ -5189,7 +5326,7 @@ void account_system_time(struct task_str
tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
cpustat->irq = cputime64_add(cpustat->irq, tmp);
- else if (softirq_count())
+ else if (in_serving_softirq())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else
cpustat->system = cputime64_add(cpustat->system, tmp);
@@ -7133,7 +7270,19 @@ void __cpuinit init_idle(struct task_str
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+ /*
+ * We're having a chicken and egg problem, even though we are
+ * holding rq->lock, the cpu isn't yet set to this cpu so the
+ * lockdep check in task_group() will fail.
+ *
+ * Similar case to sched_fork(). / Alternatively we could
+ * use task_rq_lock() here and obtain the other rq->lock.
+ *
+ * Silence PROVE_RCU
+ */
+ rcu_read_lock();
__set_task_cpu(idle, cpu);
+ rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
@@ -8640,6 +8789,8 @@ static void init_sched_groups_power(int
if (cpu != group_first_cpu(sd->groups))
return;
+ sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+
child = sd->child;
sd->groups->cpu_power = 0;
@@ -9523,9 +9674,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
-#ifdef CONFIG_USER_SCHED
- alloc_size *= 2;
-#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
@@ -9543,13 +9691,6 @@ void __init sched_init(void)
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
-#ifdef CONFIG_USER_SCHED
- root_task_group.se = (struct sched_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
- root_task_group.cfs_rq = (struct cfs_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
@@ -9558,13 +9699,6 @@ void __init sched_init(void)
init_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
-#ifdef CONFIG_USER_SCHED
- root_task_group.rt_se = (struct sched_rt_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
- root_task_group.rt_rq = (struct rt_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
@@ -9584,24 +9718,14 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&init_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
-#ifdef CONFIG_USER_SCHED
- init_rt_bandwidth(&root_task_group.rt_bandwidth,
- global_rt_period(), RUNTIME_INF);
-#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
list_add(&init_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children);
-#ifdef CONFIG_USER_SCHED
- INIT_LIST_HEAD(&root_task_group.children);
- init_task_group.parent = &root_task_group;
- list_add(&init_task_group.siblings, &root_task_group.children);
-#endif /* CONFIG_USER_SCHED */
-
- autogroup_init(&init_task);
-#endif /* CONFIG_GROUP_SCHED */
+ autogroup_init(&init_task);
+#endif /* CONFIG_CGROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
@@ -9641,25 +9765,6 @@ void __init sched_init(void)
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
-#elif defined CONFIG_USER_SCHED
- root_task_group.shares = NICE_0_LOAD;
- init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
- /*
- * In case of task-groups formed thr' the user id of tasks,
- * init_task_group represents tasks belonging to root user.
- * Hence it forms a sibling of all subsequent groups formed.
- * In this case, init_task_group gets only a fraction of overall
- * system cpu resource, based on the weight assigned to root
- * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
- * by letting tasks of init_task_group sit in a separate cfs_rq
- * (init_tg_cfs_rq) and having one entity represent this group of
- * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
- */
- init_tg_cfs_entry(&init_task_group,
- &per_cpu(init_tg_cfs_rq, i),
- &per_cpu(init_sched_entity, i), i, 1,
- root_task_group.se[i]);
-
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -9682,6 +9787,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
+ rq->cpu_power = SCHED_LOAD_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
@@ -10065,7 +10171,7 @@ static inline void unregister_rt_sched_g
}
#endif /* CONFIG_RT_GROUP_SCHED */
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
@@ -10156,12 +10262,12 @@ void __sched_move_task(struct task_struc
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
- set_task_rq(tsk, task_cpu(tsk));
-
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->moved_group)
- tsk->sched_class->moved_group(tsk, on_rq);
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
+ else
#endif
+ set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
@@ -10179,7 +10285,7 @@ void sched_move_task(struct task_struct
task_rq_unlock(rq, &flags);
}
-#endif /* CONFIG_GROUP_SCHED */
+#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
@@ -10321,13 +10427,6 @@ static int tg_schedulable(struct task_gr
runtime = d->rt_runtime;
}
-#ifdef CONFIG_USER_SCHED
- if (tg == &root_task_group) {
- period = global_rt_period();
- runtime = global_rt_runtime();
- }
-#endif
-
/*
* Cannot have more runtime than the period.
*/
@@ -11103,4 +11202,4 @@ void synchronize_sched_expedited(void)
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#endif /* #else #ifndef CONFIG_SMP */
-#endif /* CONFIG_SCHED_BFS */
+#endif /* CONFIG_SCHED_BFS */
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -173,11 +173,6 @@ void print_cfs_rq(struct seq_file *m, in
task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
-#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
- {
- uid_t uid = cfs_rq->tg->uid;
- SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
- }
#else
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
#endif
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -496,7 +496,7 @@ __update_curr(struct cfs_rq *cfs_rq, str
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_of(cfs_rq)->clock;
+ u64 now = rq_of(cfs_rq)->clock_task;
unsigned long delta_exec;
if (unlikely(!curr))
@@ -579,7 +579,7 @@ update_stats_curr_start(struct cfs_rq *c
/*
* We are starting a new run period:
*/
- se->exec_start = rq_of(cfs_rq)->clock;
+ se->exec_start = rq_of(cfs_rq)->clock_task;
}
/**************************************************
@@ -1222,7 +1222,6 @@ static int wake_affine(struct sched_doma
unsigned long this_load, load;
int idx, this_cpu, prev_cpu;
unsigned long tl_per_task;
- unsigned int imbalance;
struct task_group *tg;
unsigned long weight;
int balanced;
@@ -1262,8 +1261,6 @@ static int wake_affine(struct sched_doma
tg = task_group(p);
weight = p->se.load.weight;
- imbalance = 100 + (sd->imbalance_pct - 100) / 2;
-
/*
* In low-load situations, where prev_cpu is idle and this_cpu is idle
* due to the sync cause above having dropped this_load to 0, we'll
@@ -1273,9 +1270,22 @@ static int wake_affine(struct sched_doma
* Otherwise check if either cpus are near enough in load to allow this
* task to be woken on this_cpu.
*/
- balanced = !this_load ||
- 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
- imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
+ if (this_load) {
+ unsigned long this_eff_load, prev_eff_load;
+
+ this_eff_load = 100;
+ this_eff_load *= power_of(prev_cpu);
+ this_eff_load *= this_load +
+ effective_load(tg, this_cpu, weight, weight);
+
+ prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+ prev_eff_load *= power_of(this_cpu);
+ prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+
+ balanced = this_eff_load <= prev_eff_load;
+ } else
+ balanced = true;
+
rcu_read_unlock();
/*
@@ -1992,8 +2002,11 @@ static void task_fork_fair(struct task_s
update_rq_clock(rq);
- if (unlikely(task_cpu(p) != this_cpu))
+ if (unlikely(task_cpu(p) != this_cpu)) {
+ rcu_read_lock();
__set_task_cpu(p, this_cpu);
+ rcu_read_unlock();
+ }
update_curr(cfs_rq);
@@ -2065,13 +2078,26 @@ static void set_curr_task_fair(struct rq
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void moved_group_fair(struct task_struct *p, int on_rq)
+static void task_move_group_fair(struct task_struct *p, int on_rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
-
- update_curr(cfs_rq);
+ /*
+ * If the task was not on the rq at the time of this cgroup movement
+ * it must have been asleep, sleeping tasks keep their ->vruntime
+ * absolute on their old rq until wakeup (needed for the fair sleeper
+ * bonus in place_entity()).
+ *
+ * If it was on the rq, we've just 'preempted' it, which does convert
+ * ->vruntime to a relative base.
+ *
+ * Make sure both cases convert their relative position when migrating
+ * to another cgroup's rq. This does somewhat interfere with the
+ * fair sleeper stuff for the first placement, but who cares.
+ */
+ if (!on_rq)
+ p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+ set_task_rq(p, task_cpu(p));
if (!on_rq)
- place_entity(cfs_rq, &p->se, 1);
+ p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
}
#endif
@@ -2125,7 +2151,7 @@ static const struct sched_class fair_sch
.get_rr_interval = get_rr_interval_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
- .moved_group = moved_group_fair,
+ .task_move_group = task_move_group_fair,
#endif
};
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -121,3 +121,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
* release the lock. Decreases scheduling overhead.
*/
SCHED_FEAT(OWNER_SPIN, 1)
+
+/*
+ * Decrement CPU power based on irq activity
+ */
+SCHED_FEAT(NONIRQ_POWER, 1)
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -603,7 +603,7 @@ static void update_curr_rt(struct rq *rq
if (!task_has_rt_policy(curr))
return;
- delta_exec = rq->clock - curr->se.exec_start;
+ delta_exec = rq->clock_task - curr->se.exec_start;
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
@@ -612,7 +612,7 @@ static void update_curr_rt(struct rq *rq
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq->clock;
+ curr->se.exec_start = rq->clock_task;
cpuacct_charge(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
@@ -954,18 +954,19 @@ select_task_rq_rt(struct rq *rq, struct
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
- * We want to avoid overloading runqueues. Even if
- * the RT task is of higher priority than the current RT task.
- * RT tasks behave differently than other tasks. If
- * one gets preempted, we try to push it off to another queue.
- * So trying to keep a preempting RT task on the same
- * cache hot CPU will force the running RT task to
- * a cold CPU. So we waste all the cache for the lower
- * RT task in hopes of saving some of a RT task
- * that is just being woken and probably will have
- * cold cache anyway.
+ * We want to avoid overloading runqueues. If the woken
+ * task is a higher priority, then it will stay on this CPU
+ * and the lower prio task should be moved to another CPU.
+ * Even though this will probably make the lower prio task
+ * lose its cache, we do not want to bounce a higher task
+ * around just because it gave up its CPU, perhaps for a
+ * lock?
+ *
+ * For equal prio tasks, we just let the scheduler sort it out.
*/
if (unlikely(rt_task(rq->curr)) &&
+ (rq->curr->rt.nr_cpus_allowed < 2 ||
+ rq->curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
int cpu = find_lowest_rq(p);
@@ -1068,7 +1069,7 @@ static struct task_struct *_pick_next_ta
} while (rt_rq);
p = rt_task_of(rt_se);
- p->se.exec_start = rq->clock;
+ p->se.exec_start = rq->clock_task;
return p;
}
@@ -1493,7 +1494,10 @@ static void task_woken_rt(struct rq *rq,
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
has_pushable_tasks(rq) &&
- p->rt.nr_cpus_allowed > 1)
+ p->rt.nr_cpus_allowed > 1 &&
+ rt_task(rq->curr) &&
+ (rq->curr->rt.nr_cpus_allowed < 2 ||
+ rq->curr->prio < p->prio))
push_rt_tasks(rq);
}
@@ -1731,7 +1735,7 @@ static void set_curr_task_rt(struct rq *
{
struct task_struct *p = rq->curr;
- p->se.exec_start = rq->clock;
+ p->se.exec_start = rq->clock_task;
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -193,6 +193,24 @@ void generic_smp_call_function_interrupt
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
+ /*
+ * Since we walk the list without any locks, we might
+ * see an entry that was completed, removed from the
+ * list and is in the process of being reused.
+ *
+ * We must check that the cpu is in the cpumask before
+ * checking the refs, and both must be set before
+ * executing the callback on this cpu.
+ */
+
+ if (!cpumask_test_cpu(cpu, data->cpumask))
+ continue;
+
+ smp_rmb();
+
+ if (atomic_read(&data->refs) == 0)
+ continue;
+
if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
continue;
@@ -201,6 +219,8 @@ void generic_smp_call_function_interrupt
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
+ WARN_ON(!cpumask_empty(data->cpumask));
+
spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
spin_unlock(&call_function.lock);
@@ -401,11 +421,21 @@ void smp_call_function_many(const struct
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
+ BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
+
+ /*
+ * To ensure the interrupt handler gets an complete view
+ * we order the cpumask and refs writes and order the read
+ * of them in the interrupt handler. In addition we may
+ * only clear our own cpu bit from the mask.
+ */
+ smp_wmb();
+
atomic_set(&data->refs, cpumask_weight(data->cpumask));
spin_lock_irqsave(&call_function.lock, flags);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -77,11 +77,21 @@ void wakeup_softirqd(void)
}
/*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+ * softirq processing.
+ * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ * on local_bh_disable or local_bh_enable.
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
+
+/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip)
+static void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
- preempt_count() += SOFTIRQ_OFFSET;
+ preempt_count() += cnt;
/*
* Were softirqs turned off above:
*/
- if (softirq_count() == SOFTIRQ_OFFSET)
+ if (softirq_count() == cnt)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
- if (preempt_count() == SOFTIRQ_OFFSET)
+ if (preempt_count() == cnt)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip)
+static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
- add_preempt_count(SOFTIRQ_OFFSET);
+ add_preempt_count(cnt);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
void local_bh_disable(void)
{
- __local_bh_disable((unsigned long)__builtin_return_address(0));
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(local_bh_disable);
+static void __local_bh_enable(unsigned int cnt)
+{
+ WARN_ON_ONCE(in_irq());
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (softirq_count() == cnt)
+ trace_softirqs_on((unsigned long)__builtin_return_address(0));
+ sub_preempt_count(cnt);
+}
+
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
@@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable);
*/
void _local_bh_enable(void)
{
- WARN_ON_ONCE(in_irq());
- WARN_ON_ONCE(!irqs_disabled());
-
- if (softirq_count() == SOFTIRQ_OFFSET)
- trace_softirqs_on((unsigned long)__builtin_return_address(0));
- sub_preempt_count(SOFTIRQ_OFFSET);
+ __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(_local_bh_enable);
@@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(u
/*
* Are softirqs going to be turned on now:
*/
- if (softirq_count() == SOFTIRQ_OFFSET)
+ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
- sub_preempt_count(SOFTIRQ_OFFSET - 1);
+ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
@@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void)
pending = local_softirq_pending();
account_system_vtime(current);
- __local_bh_disable((unsigned long)__builtin_return_address(0));
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
lockdep_softirq_enter();
cpu = smp_processor_id();
@@ -245,7 +262,7 @@ restart:
lockdep_softirq_exit();
account_system_vtime(current);
- _local_bh_enable();
+ __local_bh_enable(SOFTIRQ_OFFSET);
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -279,10 +296,16 @@ void irq_enter(void)
rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
- __irq_enter();
+ /*
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+ local_bh_disable();
tick_check_idle(cpu);
- } else
- __irq_enter();
+ _local_bh_enable();
+ }
+
+ __irq_enter();
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -701,6 +724,7 @@ static int ksoftirqd(void * __bind_cpu)
{
set_current_state(TASK_INTERRUPTIBLE);
+ current->flags |= PF_KSOFTIRQD;
while (!kthread_should_stop()) {
preempt_disable();
if (!local_softirq_pending()) {
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -567,11 +567,6 @@ static int set_user(struct cred *new)
if (!new_user)
return -EAGAIN;
- if (!task_can_switch_user(new_user, current)) {
- free_uid(new_user);
- return -EINVAL;
- }
-
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) {
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
#endif
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
{
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
- return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+ if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return 0;
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 1;
+ return tick_broadcast_oneshot_available();
}
/*
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -37,6 +37,7 @@ extern void tick_shutdown_broadcast_ones
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
@@ -47,6 +48,7 @@ static inline void tick_broadcast_switch
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
# endif /* !BROADCAST */
#else /* !ONESHOT */
@@ -77,6 +79,7 @@ static inline int tick_resume_broadcast_
return 0;
}
static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
#endif /* !TICK_ONESHOT */
/*
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
-#include "cred-internals.h"
struct user_namespace init_user_ns = {
.kref = {
@@ -56,9 +55,6 @@ struct user_struct root_user = {
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
.user_ns = &init_user_ns,
-#ifdef CONFIG_USER_SCHED
- .tg = &init_task_group,
-#endif
};
/*
@@ -75,268 +71,6 @@ static void uid_hash_remove(struct user_
put_user_ns(up->user_ns);
}
-#ifdef CONFIG_USER_SCHED
-
-static void sched_destroy_user(struct user_struct *up)
-{
- sched_destroy_group(up->tg);
-}
-
-static int sched_create_user(struct user_struct *up)
-{
- int rc = 0;
-
- up->tg = sched_create_group(&root_task_group);
- if (IS_ERR(up->tg))
- rc = -ENOMEM;
-
- set_tg_uid(up);
-
- return rc;
-}
-
-#else /* CONFIG_USER_SCHED */
-
-static void sched_destroy_user(struct user_struct *up) { }
-static int sched_create_user(struct user_struct *up) { return 0; }
-
-#endif /* CONFIG_USER_SCHED */
-
-#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
-
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
-{
- struct user_struct *user;
- struct hlist_node *h;
-
- hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if (user->uid == uid) {
- /* possibly resurrect an "almost deleted" object */
- if (atomic_inc_return(&user->__count) == 1)
- cancel_delayed_work(&user->work);
- return user;
- }
- }
-
- return NULL;
-}
-
-static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
-static DEFINE_MUTEX(uids_mutex);
-
-static inline void uids_mutex_lock(void)
-{
- mutex_lock(&uids_mutex);
-}
-
-static inline void uids_mutex_unlock(void)
-{
- mutex_unlock(&uids_mutex);
-}
-
-/* uid directory attributes */
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static ssize_t cpu_shares_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct user_struct *up = container_of(kobj, struct user_struct, kobj);
-
- return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
-}
-
-static ssize_t cpu_shares_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t size)
-{
- struct user_struct *up = container_of(kobj, struct user_struct, kobj);
- unsigned long shares;
- int rc;
-
- sscanf(buf, "%lu", &shares);
-
- rc = sched_group_set_shares(up->tg, shares);
-
- return (rc ? rc : size);
-}
-
-static struct kobj_attribute cpu_share_attr =
- __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
-static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct user_struct *up = container_of(kobj, struct user_struct, kobj);
-
- return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
-}
-
-static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t size)
-{
- struct user_struct *up = container_of(kobj, struct user_struct, kobj);
- unsigned long rt_runtime;
- int rc;
-
- sscanf(buf, "%ld", &rt_runtime);
-
- rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
-
- return (rc ? rc : size);
-}
-
-static struct kobj_attribute cpu_rt_runtime_attr =
- __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
-
-static ssize_t cpu_rt_period_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct user_struct *up = container_of(kobj, struct user_struct, kobj);
-
- return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
-}
-
-static ssize_t cpu_rt_period_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t size)
-{
- struct user_struct *up = container_of(kobj, struct user_struct, kobj);
- unsigned long rt_period;
- int rc;
-
- sscanf(buf, "%lu", &rt_period);
-
- rc = sched_group_set_rt_period(up->tg, rt_period);
-
- return (rc ? rc : size);
-}
-
-static struct kobj_attribute cpu_rt_period_attr =
- __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
-#endif
-
-/* default attributes per uid directory */
-static struct attribute *uids_attributes[] = {
-#ifdef CONFIG_FAIR_GROUP_SCHED
- &cpu_share_attr.attr,
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
- &cpu_rt_runtime_attr.attr,
- &cpu_rt_period_attr.attr,
-#endif
- NULL
-};
-
-/* the lifetime of user_struct is not managed by the core (now) */
-static void uids_release(struct kobject *kobj)
-{
- return;
-}
-
-static struct kobj_type uids_ktype = {
- .sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = uids_attributes,
- .release = uids_release,
-};
-
-/*
- * Create /sys/kernel/uids/<uid>/cpu_share file for this user
- * We do not create this file for users in a user namespace (until
- * sysfs tagging is implemented).
- *
- * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
- */
-static int uids_user_create(struct user_struct *up)
-{
- struct kobject *kobj = &up->kobj;
- int error;
-
- memset(kobj, 0, sizeof(struct kobject));
- if (up->user_ns != &init_user_ns)
- return 0;
- kobj->kset = uids_kset;
- error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
- if (error) {
- kobject_put(kobj);
- goto done;
- }
-
- kobject_uevent(kobj, KOBJ_ADD);
-done:
- return error;
-}
-
-/* create these entries in sysfs:
- * "/sys/kernel/uids" directory
- * "/sys/kernel/uids/0" directory (for root user)
- * "/sys/kernel/uids/0/cpu_share" file (for root user)
- */
-int __init uids_sysfs_init(void)
-{
- uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
- if (!uids_kset)
- return -ENOMEM;
-
- return uids_user_create(&root_user);
-}
-
-/* delayed work function to remove sysfs directory for a user and free up
- * corresponding structures.
- */
-static void cleanup_user_struct(struct work_struct *w)
-{
- struct user_struct *up = container_of(w, struct user_struct, work.work);
- unsigned long flags;
- int remove_user = 0;
-
- /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
- * atomic.
- */
- uids_mutex_lock();
-
- spin_lock_irqsave(&uidhash_lock, flags);
- if (atomic_read(&up->__count) == 0) {
- uid_hash_remove(up);
- remove_user = 1;
- }
- spin_unlock_irqrestore(&uidhash_lock, flags);
-
- if (!remove_user)
- goto done;
-
- if (up->user_ns == &init_user_ns) {
- kobject_uevent(&up->kobj, KOBJ_REMOVE);
- kobject_del(&up->kobj);
- kobject_put(&up->kobj);
- }
-
- sched_destroy_user(up);
- key_put(up->uid_keyring);
- key_put(up->session_keyring);
- kmem_cache_free(uid_cachep, up);
-
-done:
- uids_mutex_unlock();
-}
-
-/* IRQs are disabled and uidhash_lock is held upon function entry.
- * IRQ state (as stored in flags) is restored and uidhash_lock released
- * upon function exit.
- */
-static void free_user(struct user_struct *up, unsigned long flags)
-{
- INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
- schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
- spin_unlock_irqrestore(&uidhash_lock, flags);
-}
-
-#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
-
static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
@@ -352,45 +86,20 @@ static struct user_struct *uid_hash_find
return NULL;
}
-int uids_sysfs_init(void) { return 0; }
-static inline int uids_user_create(struct user_struct *up) { return 0; }
-static inline void uids_mutex_lock(void) { }
-static inline void uids_mutex_unlock(void) { }
-
/* IRQs are disabled and uidhash_lock is held upon function entry.
* IRQ state (as stored in flags) is restored and uidhash_lock released
* upon function exit.
*/
static void free_user(struct user_struct *up, unsigned long flags)
+ __releases(&uidhash_lock)
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
- sched_destroy_user(up);
key_put(up->uid_keyring);
key_put(up->session_keyring);
kmem_cache_free(uid_cachep, up);
}
-#endif
-
-#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
-/*
- * We need to check if a setuid can take place. This function should be called
- * before successfully completing the setuid.
- */
-int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
-{
-
- return sched_rt_can_attach(up->tg, tsk);
-
-}
-#else
-int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
-{
- return 1;
-}
-#endif
-
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().
@@ -428,11 +137,7 @@ struct user_struct *alloc_uid(struct use
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up, *new;
- /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
- * atomic.
- */
- uids_mutex_lock();
-
+ /* Make uid_hash_find() + uid_hash_insert() atomic. */
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
@@ -445,14 +150,8 @@ struct user_struct *alloc_uid(struct use
new->uid = uid;
atomic_set(&new->__count, 1);
- if (sched_create_user(new) < 0)
- goto out_free_user;
-
new->user_ns = get_user_ns(ns);
- if (uids_user_create(new))
- goto out_destoy_sched;
-
/*
* Before adding this, check whether we raced
* on adding the same user already..
@@ -460,11 +159,6 @@ struct user_struct *alloc_uid(struct use
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
- /* This case is not possible when CONFIG_USER_SCHED
- * is defined, since we serialize alloc_uid() using
- * uids_mutex. Hence no need to call
- * sched_destroy_user() or remove_user_sysfs_dir().
- */
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
@@ -475,17 +169,9 @@ struct user_struct *alloc_uid(struct use
spin_unlock_irq(&uidhash_lock);
}
- uids_mutex_unlock();
-
return up;
-out_destoy_sched:
- sched_destroy_user(new);
- put_user_ns(new->user_ns);
-out_free_user:
- kmem_cache_free(uid_cachep, new);
out_unlock:
- uids_mutex_unlock();
return NULL;
}
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -111,39 +111,41 @@ EXPORT_SYMBOL(sk_filter);
*/
unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
{
- struct sock_filter *fentry; /* We walk down these */
void *ptr;
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
+ unsigned long memvalid = 0;
u32 tmp;
int k;
int pc;
+ BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
/*
* Process array of filter instructions.
*/
for (pc = 0; pc < flen; pc++) {
- fentry = &filter[pc];
+ const struct sock_filter *fentry = &filter[pc];
+ u32 f_k = fentry->k;
switch (fentry->code) {
case BPF_ALU|BPF_ADD|BPF_X:
A += X;
continue;
case BPF_ALU|BPF_ADD|BPF_K:
- A += fentry->k;
+ A += f_k;
continue;
case BPF_ALU|BPF_SUB|BPF_X:
A -= X;
continue;
case BPF_ALU|BPF_SUB|BPF_K:
- A -= fentry->k;
+ A -= f_k;
continue;
case BPF_ALU|BPF_MUL|BPF_X:
A *= X;
continue;
case BPF_ALU|BPF_MUL|BPF_K:
- A *= fentry->k;
+ A *= f_k;
continue;
case BPF_ALU|BPF_DIV|BPF_X:
if (X == 0)
@@ -151,49 +153,49 @@ unsigned int sk_run_filter(struct sk_buf
A /= X;
continue;
case BPF_ALU|BPF_DIV|BPF_K:
- A /= fentry->k;
+ A /= f_k;
continue;
case BPF_ALU|BPF_AND|BPF_X:
A &= X;
continue;
case BPF_ALU|BPF_AND|BPF_K:
- A &= fentry->k;
+ A &= f_k;
continue;
case BPF_ALU|BPF_OR|BPF_X:
A |= X;
continue;
case BPF_ALU|BPF_OR|BPF_K:
- A |= fentry->k;
+ A |= f_k;
continue;
case BPF_ALU|BPF_LSH|BPF_X:
A <<= X;
continue;
case BPF_ALU|BPF_LSH|BPF_K:
- A <<= fentry->k;
+ A <<= f_k;
continue;
case BPF_ALU|BPF_RSH|BPF_X:
A >>= X;
continue;
case BPF_ALU|BPF_RSH|BPF_K:
- A >>= fentry->k;
+ A >>= f_k;
continue;
case BPF_ALU|BPF_NEG:
A = -A;
continue;
case BPF_JMP|BPF_JA:
- pc += fentry->k;
+ pc += f_k;
continue;
case BPF_JMP|BPF_JGT|BPF_K:
- pc += (A > fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A > f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JGE|BPF_K:
- pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A >= f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JEQ|BPF_K:
- pc += (A == fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A == f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JSET|BPF_K:
- pc += (A & fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A & f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JGT|BPF_X:
pc += (A > X) ? fentry->jt : fentry->jf;
@@ -208,7 +210,7 @@ unsigned int sk_run_filter(struct sk_buf
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
case BPF_LD|BPF_W|BPF_ABS:
- k = fentry->k;
+ k = f_k;
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
if (ptr != NULL) {
@@ -217,7 +219,7 @@ load_w:
}
break;
case BPF_LD|BPF_H|BPF_ABS:
- k = fentry->k;
+ k = f_k;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
if (ptr != NULL) {
@@ -226,7 +228,7 @@ load_h:
}
break;
case BPF_LD|BPF_B|BPF_ABS:
- k = fentry->k;
+ k = f_k;
load_b:
ptr = load_pointer(skb, k, 1, &tmp);
if (ptr != NULL) {
@@ -241,32 +243,34 @@ load_b:
X = skb->len;
continue;
case BPF_LD|BPF_W|BPF_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_w;
case BPF_LD|BPF_H|BPF_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_h;
case BPF_LD|BPF_B|BPF_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_b;
case BPF_LDX|BPF_B|BPF_MSH:
- ptr = load_pointer(skb, fentry->k, 1, &tmp);
+ ptr = load_pointer(skb, f_k, 1, &tmp);
if (ptr != NULL) {
X = (*(u8 *)ptr & 0xf) << 2;
continue;
}
return 0;
case BPF_LD|BPF_IMM:
- A = fentry->k;
+ A = f_k;
continue;
case BPF_LDX|BPF_IMM:
- X = fentry->k;
+ X = f_k;
continue;
case BPF_LD|BPF_MEM:
- A = mem[fentry->k];
+ A = (memvalid & (1UL << f_k)) ?
+ mem[f_k] : 0;
continue;
case BPF_LDX|BPF_MEM:
- X = mem[fentry->k];
+ X = (memvalid & (1UL << f_k)) ?
+ mem[f_k] : 0;
continue;
case BPF_MISC|BPF_TAX:
X = A;
@@ -275,14 +279,16 @@ load_b:
A = X;
continue;
case BPF_RET|BPF_K:
- return fentry->k;
+ return f_k;
case BPF_RET|BPF_A:
return A;
case BPF_ST:
- mem[fentry->k] = A;
+ memvalid |= 1UL << f_k;
+ mem[f_k] = A;
continue;
case BPF_STX:
- mem[fentry->k] = X;
+ memvalid |= 1UL << f_k;
+ mem[f_k] = X;
continue;
default:
WARN_ON(1);
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -616,6 +616,9 @@ int dccp_rcv_state_process(struct sock *
/* Caller (dccp_v4_do_rcv) will send Reset */
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
+ } else if (sk->sk_state == DCCP_CLOSED) {
+ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+ return 1;
}
if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -678,10 +681,6 @@ int dccp_rcv_state_process(struct sock *
}
switch (sk->sk_state) {
- case DCCP_CLOSED:
- dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
- return 1;
-
case DCCP_REQUESTING:
queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
if (queued >= 0)
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1032,6 +1032,21 @@ static inline bool inetdev_valid_mtu(uns
return mtu >= 68;
}
+static void inetdev_send_gratuitous_arp(struct net_device *dev,
+ struct in_device *in_dev)
+
+{
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ if (!ifa)
+ return;
+
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ ifa->ifa_address, dev,
+ ifa->ifa_address, NULL,
+ dev->dev_addr, NULL);
+}
+
/* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1083,18 +1098,13 @@ static int inetdev_event(struct notifier
}
ip_mc_up(in_dev);
/* fall through */
- case NETDEV_NOTIFY_PEERS:
case NETDEV_CHANGEADDR:
+ if (!IN_DEV_ARP_NOTIFY(in_dev))
+ break;
+ /* fall through */
+ case NETDEV_NOTIFY_PEERS:
/* Send gratuitous ARP to notify of link change */
- if (IN_DEV_ARP_NOTIFY(in_dev)) {
- struct in_ifaddr *ifa = in_dev->ifa_list;
-
- if (ifa)
- arp_send(ARPOP_REQUEST, ETH_P_ARP,
- ifa->ifa_address, dev,
- ifa->ifa_address, NULL,
- dev->dev_addr, NULL);
- }
+ inetdev_send_gratuitous_arp(dev, in_dev);
break;
case NETDEV_DOWN:
ip_mc_down(in_dev);
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2117,7 +2117,7 @@ static int do_tcp_setsockopt(struct sock
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
* know which interface is going to be used */
- if (val < 8 || val > MAX_TCP_WINDOW) {
+ if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
err = -EINVAL;
break;
}
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -110,7 +110,7 @@ static int cls_cgroup_classify(struct sk
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
- if (softirq_count() != SOFTIRQ_OFFSET)
+ if (in_serving_softirq())
return -1;
rcu_read_lock();
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -542,16 +542,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hma
id = ntohs(hmacs->hmac_ids[i]);
/* Check the id is in the supported range */
- if (id > SCTP_AUTH_HMAC_ID_MAX)
+ if (id > SCTP_AUTH_HMAC_ID_MAX) {
+ id = 0;
continue;
+ }
/* See is we support the id. Supported IDs have name and
* length fields set, so that we can allocated and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
- if (!sctp_hmac_list[id].hmac_name)
+ if (!sctp_hmac_list[id].hmac_name) {
+ id = 0;
continue;
+ }
break;
}
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3361,21 +3361,6 @@ int sctp_process_asconf_ack(struct sctp_
sctp_chunk_free(asconf);
asoc->addip_last_asconf = NULL;
- /* Send the next asconf chunk from the addip chunk queue. */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- asconf = list_entry(entry, struct sctp_chunk, list);
-
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-
return retval;
}
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -997,6 +997,29 @@ static int sctp_cmd_send_msg(struct sctp
}
+/* Sent the next ASCONF packet currently stored in the association.
+ * This happens after the ASCONF_ACK was succeffully processed.
+ */
+static void sctp_cmd_send_asconf(struct sctp_association *asoc)
+{
+ /* Send the next asconf chunk from the addip chunk
+ * queue.
+ */
+ if (!list_empty(&asoc->addip_chunk_list)) {
+ struct list_head *entry = asoc->addip_chunk_list.next;
+ struct sctp_chunk *asconf = list_entry(entry,
+ struct sctp_chunk, list);
+ list_del_init(entry);
+
+ /* Hold the chunk until an ASCONF_ACK is received. */
+ sctp_chunk_hold(asconf);
+ if (sctp_primitive_ASCONF(asoc, asconf))
+ sctp_chunk_free(asconf);
+ else
+ asoc->addip_last_asconf = asconf;
+ }
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
@@ -1650,6 +1673,9 @@ static int sctp_cmd_interpreter(sctp_eve
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
+ case SCTP_CMD_SEND_NEXT_ASCONF:
+ sctp_cmd_send_asconf(asoc);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3670,8 +3670,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack))
+ asconf_ack)) {
+ /* Successfully processed ASCONF_ACK. We can
+ * release the next asconf if we have one.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
+ SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
+ }
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1234,8 +1234,10 @@ static int content_open(struct inode *in
if (!cd || !try_module_get(cd->owner))
return -EACCES;
han = __seq_open_private(file, &cache_content_op, sizeof(*han));
- if (han == NULL)
+ if (han == NULL) {
+ module_put(cd->owner);
return -ENOMEM;
+ }
han->cd = cd;
return 0;
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -391,8 +391,12 @@ void __exit x25_link_free(void)
write_lock_bh(&x25_neigh_list_lock);
list_for_each_safe(entry, tmp, &x25_neigh_list) {
+ struct net_device *dev;
+
nb = list_entry(entry, struct x25_neigh, node);
+ dev = nb->dev;
__x25_remove_neigh(nb);
+ dev_put(dev);
}
write_unlock_bh(&x25_neigh_list_lock);
}
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2601,7 +2601,10 @@ static int selinux_inode_init_security(s
sid = tsec->sid;
newsid = tsec->create_sid;
- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ if ((sbsec->flags & SE_SBINITIALIZED) &&
+ (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
+ newsid = sbsec->mntpoint_sid;
+ else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
&newsid);
@@ -3259,7 +3262,11 @@ static void selinux_cred_free(struct cre
{
struct task_security_struct *tsec = cred->security;
- BUG_ON((unsigned long) cred->security < PAGE_SIZE);
+ /*
+ * cred->security == NULL if security_cred_alloc_blank() or
+ * security_prepare_creds() returned an error.
+ */
+ BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
cred->security = (void *) 0x7UL;
kfree(tsec);
}
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -66,6 +66,8 @@ static struct nlmsg_perm nlmsg_route_per
{ RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
};
static struct nlmsg_perm nlmsg_firewall_perms[] =
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -44,12 +44,13 @@ static enum hrtimer_restart snd_hrtimer_
{
struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
struct snd_timer *t = stime->timer;
+ unsigned long oruns;
if (!atomic_read(&stime->running))
return HRTIMER_NORESTART;
- hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
- snd_timer_interrupt(stime->timer, t->sticks);
+ oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+ snd_timer_interrupt(stime->timer, t->sticks * oruns);
if (!atomic_read(&stime->running))
return HRTIMER_NORESTART;
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -42,11 +42,7 @@ static struct snd_pcm_hardware snd_vorte
.rate_min = 5000,
.rate_max = 48000,
.channels_min = 1,
-#ifdef CHIP_AU8830
- .channels_max = 4,
-#else
.channels_max = 2,
-#endif
.buffer_bytes_max = 0x10000,
.period_bytes_min = 0x1,
.period_bytes_max = 0x1000,
@@ -115,6 +111,17 @@ static struct snd_pcm_hardware snd_vorte
.periods_max = 64,
};
#endif
+#ifdef CHIP_AU8830
+static unsigned int au8830_channels[3] = {
+ 1, 2, 4,
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
+ .count = ARRAY_SIZE(au8830_channels),
+ .list = au8830_channels,
+ .mask = 0,
+};
+#endif
/* open callback */
static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
{
@@ -156,6 +163,15 @@ static int snd_vortex_pcm_open(struct sn
if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB
|| VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S)
runtime->hw = snd_vortex_playback_hw_adb;
+#ifdef CHIP_AU8830
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
+ runtime->hw.channels_max = 4;
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &hw_constraints_au8830_channels);
+ }
+#endif
substream->runtime->private_data = NULL;
}
#ifndef CHIP_AU8810
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -383,7 +383,7 @@ static void hdmi_show_short_audio_desc(s
snd_print_pcm_rates(a->rates, buf, sizeof(buf));
if (a->format == AUDIO_CODING_TYPE_LPCM)
- snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
+ snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
else if (a->max_bitrate)
snprintf(buf2, sizeof(buf2),
", max bitrate = %d", a->max_bitrate);
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2236,6 +2236,7 @@ static struct snd_pci_quirk position_fix
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -366,10 +366,16 @@ static int conexant_add_jack(struct hda_
struct conexant_spec *spec;
struct conexant_jack *jack;
const char *name;
- int err;
+ int i, err;
spec = codec->spec;
snd_array_init(&spec->jacks, sizeof(*jack), 32);
+
+ jack = spec->jacks.list;
+ for (i = 0; i < spec->jacks.used; i++, jack++)
+ if (jack->nid == nid)
+ return 0 ; /* already present */
+
jack = snd_array_new(&spec->jacks);
name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -260,9 +260,9 @@ static int bf5xx_ac97_suspend(struct snd
pr_debug("%s : sport %d\n", __func__, dai->id);
if (!dai->active)
return 0;
- if (dai->capture.active)
+ if (dai->capture_active)
sport_rx_stop(sport);
- if (dai->playback.active)
+ if (dai->playback_active)
sport_tx_stop(sport);
return 0;
}
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1185,7 +1185,7 @@ static int wm8990_set_bias_level(struct
WM8990_VMIDTOG);
/* Delay to allow output caps to discharge */
- msleep(msecs_to_jiffies(300));
+ msleep(300);
/* Disable VMIDTOG */
snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
@@ -1197,17 +1197,17 @@ static int wm8990_set_bias_level(struct
/* Enable outputs */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
- msleep(msecs_to_jiffies(50));
+ msleep(50);
/* Enable VMID at 2x50k */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
- msleep(msecs_to_jiffies(100));
+ msleep(100);
/* Enable VREF */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
- msleep(msecs_to_jiffies(600));
+ msleep(600);
/* Enable BUFIOEN */
snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
@@ -1252,7 +1252,7 @@ static int wm8990_set_bias_level(struct
/* Disable VMID */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
- msleep(msecs_to_jiffies(300));
+ msleep(300);
/* Enable all output discharge bits */
snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -639,7 +639,7 @@ int snd_usb_caiaq_audio_init(struct snd_
}
dev->pcm->private_data = dev;
- strcpy(dev->pcm->name, dev->product_name);
+ strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
--- a/sound/usb/caiaq/midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -135,7 +135,7 @@ int snd_usb_caiaq_midi_init(struct snd_u
if (ret < 0)
return ret;
- strcpy(rmidi->name, device->product_name);
+ strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = device;
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -234,29 +234,26 @@ static unsigned int usb_stream_hwdep_pol
struct file *file, poll_table *wait)
{
struct us122l *us122l = hw->private_data;
- struct usb_stream *s = us122l->sk.s;
unsigned *polled;
unsigned int mask;
poll_wait(file, &us122l->sk.sleep, wait);
- switch (s->state) {
- case usb_stream_ready:
- if (us122l->first == file)
- polled = &s->periods_polled;
- else
- polled = &us122l->second_periods_polled;
- if (*polled != s->periods_done) {
- *polled = s->periods_done;
- mask = POLLIN | POLLOUT | POLLWRNORM;
- break;
+ mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
+ if (mutex_trylock(&us122l->mutex)) {
+ struct usb_stream *s = us122l->sk.s;
+ if (s && s->state == usb_stream_ready) {
+ if (us122l->first == file)
+ polled = &s->periods_polled;
+ else
+ polled = &us122l->second_periods_polled;
+ if (*polled != s->periods_done) {
+ *polled = s->periods_done;
+ mask = POLLIN | POLLOUT | POLLWRNORM;
+ } else
+ mask = 0;
}
- /* Fall through */
- mask = 0;
- break;
- default:
- mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
- break;
+ mutex_unlock(&us122l->mutex);
}
return mask;
}
@@ -342,6 +339,7 @@ static int usb_stream_hwdep_ioctl(struct
{
struct usb_stream_config *cfg;
struct us122l *us122l = hw->private_data;
+ struct usb_stream *s;
unsigned min_period_frames;
int err = 0;
bool high_speed;
@@ -387,18 +385,18 @@ static int usb_stream_hwdep_ioctl(struct
snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
mutex_lock(&us122l->mutex);
+ s = us122l->sk.s;
if (!us122l->master)
us122l->master = file;
else if (us122l->master != file) {
- if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
+ if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
err = -EIO;
goto unlock;
}
us122l->slave = file;
}
- if (!us122l->sk.s ||
- memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
- us122l->sk.s->state == usb_stream_xrun) {
+ if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
+ s->state == usb_stream_xrun) {
us122l_stop(us122l);
if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
err = -EIO;
@@ -409,6 +407,7 @@ unlock:
mutex_unlock(&us122l->mutex);
free:
kfree(cfg);
+ wake_up_all(&us122l->sk.sleep);
return err;
}