Updated to 2.6.32.27

file:c57d3964e00c99a176279542b3deb37bcfe2ede3 -> file:b23a092ee64bcde576ac46ed8559b46da845193f
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1974,6 +1974,12 @@ W: http://acpi4asus.sf.net
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
+EFIFB FRAMEBUFFER DRIVER
+L: linux-fbdev@vger.kernel.org
+M: Peter Jones <pjones@redhat.com>
+S: Maintained
+F: drivers/video/efifb.c
+
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
file:c40e7dc93f6d07690eba581927ae23dd1559a3d1 -> file:c6fb55e5b381c6b93067cd0602b7d9980be41f7e
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 32
-EXTRAVERSION = .17
+EXTRAVERSION = .27
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
file:52a79dfc13c6774ec1f88f2e288af1a42f774f51 -> file:5c905aaaeccd82861ea62d9186517475f0d7019c
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
#define IO7__ERR_CYC__CYCLE__M (0x7)
printk("%s Packet In Error: %s\n"
- "%s Error in %s, cycle %ld%s%s\n",
+ "%s Error in %s, cycle %lld%s%s\n",
err_print_prefix,
packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
err_print_prefix,
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym
}
printk("%s Up Hose Garbage Symptom:\n"
- "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n",
+ "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
err_print_prefix,
err_print_prefix,
EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt
#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
printk("%s Split Completion Error:\n"
- "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n",
+ "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n",
err_print_prefix,
err_print_prefix,
EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
file:0681707cbca2b59e2bd321ee35c33b745c17968a -> file:fe1b026882bc6b340ad2a1769f0c7f29b0b492ff
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -890,6 +890,18 @@ config ARM_ERRATA_460075
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
+config ARM_ERRATA_720789
+ bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 720789 Cortex-A9 (prior to
+ r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
+ broadcasted CP15 TLB maintenance operations TLBIASIDIS and TLBIMVAIS.
+ As a consequence of this erratum, some TLB entries which should be
+ invalidated are not, resulting in an incoherency in the system page
+ tables. The workaround changes the TLB flushing routines to invalidate
+ entries regardless of the ASID.
+
endmenu
source "arch/arm/common/Kconfig"
file:00f46d9ce299310360ed89a8c19e2521db58e118 -> file:eea494790b56cfaf9e398e29780f6d4d3f84d0f0
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -215,7 +215,7 @@
@ Slightly optimised to avoid incrementing the pointer twice
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
.if \rept == 2
- usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
+ usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
.endif
add\cond \ptr, #\rept * \inc
file:6674c42a1a27585c93d18146a0a3219aeea6191e -> file:1df645713d483fedd29e10bbd8cbc79678edb392
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -150,19 +150,24 @@ struct pt_regs {
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
- long mode = regs->ARM_cpsr & MODE_MASK;
+ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
- if (((mode == USR_MODE) ||
- ((elf_hwcap & HWCAP_26BIT) && (mode == USR26_MODE))) &&
- (regs->ARM_cpsr & PSR_I_BIT) == 0) {
- regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
- return 1;
+ /*
+ * Always clear the F (FIQ) and A (delayed abort) bits
+ */
+ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+
+ if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
+ if (mode == USR_MODE)
+ return 1;
+ if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
+ return 1;
}
/*
* Force CPSR to something logical...
*/
- regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
+ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
if (!(elf_hwcap & HWCAP_26BIT))
regs->ARM_cpsr |= USR_MODE;
file:c2f1605de35902df4378b1e77898cc796441b4db -> file:00c1cba729cb3053d46908771fb8552d2442e830
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -369,7 +369,11 @@ static inline void local_flush_tlb_mm(st
if (tlb_flag(TLB_V6_I_ASID))
asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V7_UIS_ASID))
+#ifdef CONFIG_ARM_ERRATA_720789
+ asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
+#else
asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
+#endif
if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */
@@ -409,7 +413,11 @@ local_flush_tlb_page(struct vm_area_stru
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V7_UIS_PAGE))
+#ifdef CONFIG_ARM_ERRATA_720789
+ asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
+#else
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
+#endif
if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */
file:2c1db77d78487cbdfa95d660f3abfa8acf6e215b -> file:a6c66f598d14e85fd7396a557de3ed1ac809ed46
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -382,11 +382,13 @@ ENDPROC(sys_clone_wrapper)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
file:1e4cbd4e7be930f2515bece2a19c23f2f2a07845 -> file:64f6bc1a91326c79800bbb9205645a7d0e7e5e0c
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
*/
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
- rsb r1, r3, #0
- and r3, r3, r1
+ rsb r0, r3, #0
+ and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
addeq r2, r2, #1
mov r0, r2
#endif
+ cmp r1, r0 @ Clamp to maxbit
+ movlo r0, r1
mov pc, lr
file:332b784050b272592d402afddf077b0ec591d00b -> file:2f7e4980a1d8b8e4f49d5a925deb97bc18d320a7
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[]
.end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
- [2] = {
+ [1] = {
.start = AT91SAM9G45_ID_DMA,
.end = AT91SAM9G45_ID_DMA,
.flags = IORESOURCE_IRQ,
file:cfc4a8b43e6a613f589c6d210d4226dc0721f5d3 -> file:c47aa88cc83e2515fe7f0280ef45326850716e9b
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -223,13 +223,16 @@ static void _set_gpio_direction(struct g
struct mxc_gpio_port *port =
container_of(chip, struct mxc_gpio_port, chip);
u32 l;
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
l = __raw_readl(port->base + GPIO_GDIR);
if (dir)
l |= 1 << offset;
else
l &= ~(1 << offset);
__raw_writel(l, port->base + GPIO_GDIR);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -238,9 +241,12 @@ static void mxc_gpio_set(struct gpio_chi
container_of(chip, struct mxc_gpio_port, chip);
void __iomem *reg = port->base + GPIO_DR;
u32 l;
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
l = (__raw_readl(reg) & (~(1 << offset))) | (value << offset);
__raw_writel(l, reg);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -294,6 +300,8 @@ int __init mxc_gpio_init(struct mxc_gpio
port[i].chip.base = i * 32;
port[i].chip.ngpio = 32;
+ spin_lock_init(&port[i].lock);
+
/* its a serious configuration bug when it fails */
BUG_ON( gpiochip_add(&port[i].chip) < 0 );
file:894d2f87c85600c495d117a6d0f524060ee15aaa -> file:7a0dc5aa2479c4d46794279ebf54d425e2f0fc30
--- a/arch/arm/plat-mxc/include/mach/gpio.h
+++ b/arch/arm/plat-mxc/include/mach/gpio.h
@@ -19,6 +19,7 @@
#ifndef __ASM_ARCH_MXC_GPIO_H__
#define __ASM_ARCH_MXC_GPIO_H__
+#include <linux/spinlock.h>
#include <mach/hardware.h>
#include <asm-generic/gpio.h>
@@ -36,6 +37,7 @@ struct mxc_gpio_port {
int virtual_irq_start;
struct gpio_chip chip;
u32 both_edges;
+ spinlock_t lock;
};
int mxc_gpio_init(struct mxc_gpio_port*, int);
file:674a8374c6d9ec989616827a20cd1895dd5fbcbb -> file:01ae69be074a6794abc65b29155aa80fdebdc274
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct
spin_unlock_irqrestore(&ioc->saved_lock, flags);
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
- if (unlikely(pide >= (ioc->res_size << 3)))
- panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
- ioc->ioc_hpa);
+ if (unlikely(pide >= (ioc->res_size << 3))) {
+ printk(KERN_WARNING "%s: I/O MMU @ %p is"
+ "out of mapping resources, %u %u %lx\n",
+ __func__, ioc->ioc_hpa, ioc->res_size,
+ pages_needed, dma_get_seg_boundary(dev));
+ return -1;
+ }
#else
- panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
- ioc->ioc_hpa);
+ printk(KERN_WARNING "%s: I/O MMU @ %p is"
+ "out of mapping resources, %u %u %lx\n",
+ __func__, ioc->ioc_hpa, ioc->res_size,
+ pages_needed, dma_get_seg_boundary(dev));
+ return -1;
#endif
}
}
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct de
#endif
pide = sba_alloc_range(ioc, dev, size);
+ if (pide < 0)
+ return 0;
iovp = (dma_addr_t) pide << iovp_shift;
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, str
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
+ int idx;
while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, str
vcontig_sg->dma_length = vcontig_len;
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
- dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
- | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
- | dma_offset);
+ idx = sba_alloc_range(ioc, dev, dma_len);
+ if (idx < 0) {
+ dma_sg->dma_length = 0;
+ return -1;
+ }
+ dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
+ | dma_offset);
n_mappings++;
}
return n_mappings;
}
-
+static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct devic
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
+ if (coalesced < 0) {
+ sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
+ return 0;
+ }
/*
** Program the I/O Pdir
file:dfcf75b8426d8bde81dd8b7fda51ad5f12df137c -> file:c8662cd40fdc97aa070a6588db18d98ea135286e
--- a/arch/ia64/include/asm/compat.h
+++ b/arch/ia64/include/asm/compat.h
@@ -198,7 +198,7 @@ ptr_to_compat(void __user *uptr)
}
static __inline__ void __user *
-compat_alloc_user_space (long len)
+arch_compat_alloc_user_space (long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
file:3567d54f8cee7533ecba41847c5f9957d9481296 -> file:331d42bda77ae97f457b13f970aa83c40e11d4b0
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14
;;
RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
- mov ar.ccv=0
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
+ mov r8=EINVAL // default to EINVAL
#ifdef CONFIG_SMP
- mov r17=1
- ;;
- cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
- mov r8=EINVAL // default to EINVAL
+ // __ticket_spin_trylock(r31)
+ ld4 r17=[r31]
;;
+ mov.m ar.ccv=r17
+ extr.u r9=r17,17,15
+ adds r19=1,r17
+ extr.u r18=r17,0,15
+ ;;
+ cmp.eq p6,p7=r9,r18
+ ;;
+(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
+(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
+(p7) br.cond.spnt.many .lock_contention
+ ;;
+ cmp4.eq p0,p7=r9,r17
+ adds r31=2,r31
+(p7) br.cond.spnt.many .lock_contention
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
- cmp4.ne p6,p0=r18,r0
-(p6) br.cond.spnt.many .lock_contention
;;
#else
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
- mov r8=EINVAL // default to EINVAL
#endif
add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14
(p6) br.cond.spnt.few 1b // yes -> retry
#ifdef CONFIG_SMP
- st4.rel [r31]=r0 // release the lock
+ // __ticket_spin_unlock(r31)
+ st2.rel [r31]=r20
+ mov r20=0 // i must not leak kernel bits...
#endif
SSM_PSR_I(p0, p9, r31)
;;
@@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
.sig_pending:
#ifdef CONFIG_SMP
- st4.rel [r31]=r0 // release the lock
+ // __ticket_spin_unlock(r31)
+ st2.rel [r31]=r20 // release the lock
#endif
SSM_PSR_I(p0, p9, r17)
;;
file:6c89228560493df8d17d6e0f7d1b65d28ee543ad -> file:4a746ea838ff37b423f92f31a8eecc8207f6217a
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(uns
if (irq_prepare_move(irq, cpu))
return -1;
- read_msi_msg(irq, &msg);
+ get_cached_msi_msg(irq, &msg);
addr = msg.address_lo;
addr &= MSI_ADDR_DEST_ID_MASK;
file:4990495d753189933bebf64f18d1f52303b6e6cd -> file:a35c661e5e89a544b097f731af1e65486c3077c2
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall(struct timespec *wall, struct clocksource *c)
+void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
{
unsigned long flags;
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wa
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
- fsyscall_gtod_data.clk_mult = c->mult;
+ fsyscall_gtod_data.clk_mult = mult;
fsyscall_gtod_data.clk_shift = c->shift;
fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
file:fbbfb970120128a29df68c549895187ea3e3013e -> file:9ab2617e46ec1a292988d412a60aa2812fcb9cc7
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -174,7 +174,7 @@ static int sn_set_msi_irq_affinity(unsig
* Release XIO resources for the old MSI PCI address
*/
- read_msi_msg(irq, &msg);
+ get_cached_msi_msg(irq, &msg);
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
file:34187354304a6217d16ff3dab5315e605409ab05 -> file:f76c8581d747f90909f8f208c6054f0ae5589064
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -69,12 +69,16 @@ export MMU DTB
all: linux.bin
-BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
+# With make 3.82 we cannot mix normal and wildcard targets
+BOOT_TARGETS1 = linux.bin linux.bin.gz
+BOOT_TARGETS2 = simpleImage.%
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-$(BOOT_TARGETS): vmlinux
+$(BOOT_TARGETS1): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+$(BOOT_TARGETS2): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp
file:dd75d673447e37147c438991829b70c15c7c721b -> file:09e7128ec0f0d31afdb3d4574bf46a7032d3d85f
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -434,7 +434,7 @@ static __inline__ void atomic64_add(long
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %0, %1 # atomic64_add \n"
- " addu %0, %2 \n"
+ " daddu %0, %2 \n"
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
@@ -446,7 +446,7 @@ static __inline__ void atomic64_add(long
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %0, %1 # atomic64_add \n"
- " addu %0, %2 \n"
+ " daddu %0, %2 \n"
" scd %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
@@ -479,7 +479,7 @@ static __inline__ void atomic64_sub(long
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %0, %1 # atomic64_sub \n"
- " subu %0, %2 \n"
+ " dsubu %0, %2 \n"
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
@@ -491,7 +491,7 @@ static __inline__ void atomic64_sub(long
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %0, %1 # atomic64_sub \n"
- " subu %0, %2 \n"
+ " dsubu %0, %2 \n"
" scd %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
@@ -524,10 +524,10 @@ static __inline__ long atomic64_add_retu
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %1, %2 # atomic64_add_return \n"
- " addu %0, %1, %3 \n"
+ " daddu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
+ " daddu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
@@ -538,10 +538,10 @@ static __inline__ long atomic64_add_retu
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %1, %2 # atomic64_add_return \n"
- " addu %0, %1, %3 \n"
+ " daddu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqz %0, 2f \n"
- " addu %0, %1, %3 \n"
+ " daddu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
@@ -576,10 +576,10 @@ static __inline__ long atomic64_sub_retu
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %1, %2 # atomic64_sub_return \n"
- " subu %0, %1, %3 \n"
+ " dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
+ " dsubu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
@@ -590,10 +590,10 @@ static __inline__ long atomic64_sub_retu
__asm__ __volatile__(
" .set mips3 \n"
"1: lld %1, %2 # atomic64_sub_return \n"
- " subu %0, %1, %3 \n"
+ " dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqz %0, 2f \n"
- " subu %0, %1, %3 \n"
+ " dsubu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
file:f58aed354bfd3a50095e30cbb879045b57f85410 -> file:27505bdc386e72b41727c05283f159c2ed1a2623
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -144,7 +144,7 @@ static inline compat_uptr_t ptr_to_compa
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = (struct pt_regs *)
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
file:5adba4fc7c1015c6cf4e0638be471ac78e797ddd -> file:266c0036323a16d7ea768ddbb1af0bd0ff098042
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -725,10 +725,15 @@ static void __cpuinit build_r4000_tlb_re
* create the plain linear handler
*/
if (bcm1250_m3_war()) {
- UASM_i_MFC0(&p, K0, C0_BADVADDR);
- UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+ unsigned int segbits = 44;
+
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
- UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ uasm_i_dsrl32(&p, K1, K0, 62 - 32);
+ uasm_i_dsrl(&p, K0, K0, 12 + 1);
+ uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
+ uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
/* No need for uasm_i_nop */
}
@@ -1242,10 +1247,15 @@ static void __cpuinit build_r4000_tlb_lo
memset(relocs, 0, sizeof(relocs));
if (bcm1250_m3_war()) {
- UASM_i_MFC0(&p, K0, C0_BADVADDR);
- UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+ unsigned int segbits = 44;
+
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
- UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ uasm_i_dsrl32(&p, K1, K0, 62 - 32);
+ uasm_i_dsrl(&p, K0, K0, 12 + 1);
+ uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
+ uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
/* No need for uasm_i_nop */
}
file:f467199676a8526c67aff9464efd98d5a7c9b68c -> file:e1bd527377edc1860508da46c6a6d02753135d25
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -62,7 +62,7 @@ enum opcode {
insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr,
insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
- insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
+ insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
};
@@ -116,6 +116,7 @@ static struct insn insn_table[] __cpuini
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
@@ -362,6 +363,7 @@ I_u2s3u1(_lw)
I_u1u2u3(_mfc0)
I_u1u2u3(_mtc0)
I_u2u1u3(_ori)
+I_u3u1u2(_or)
I_u2s3u1(_pref)
I_0(_rfe)
I_u2s3u1(_sc)
file:c6d1e3dd82d490df1bb174da54756110c4ceb4fb -> file:5198ae5f64ee0c16ea16f9d042e6442127c54b47
--- a/arch/mips/mm/uasm.h
+++ b/arch/mips/mm/uasm.h
@@ -78,6 +78,7 @@ Ip_u2s3u1(_lw);
Ip_u1u2u3(_mfc0);
Ip_u1u2u3(_mtc0);
Ip_u2u1u3(_ori);
+Ip_u3u1u2(_or);
Ip_u2s3u1(_pref);
Ip_0(_rfe);
Ip_u2s3u1(_sc);
file:2fbfa1a8c3a9a96a4a2bc159a9eb8c986bc4297e -> file:bf80921f2f56c0b145e698fa2c673ff49c7c0636
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -247,6 +247,8 @@ void __init mips_pcibios_init(void)
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;
+ controller->io_map_base = mips_io_port_base;
+
register_pci_controller(controller);
}
file:eee4f3dfc410e248d5caf1e625b14dcbbb273ff3 -> file:98e86ddb86ccd6e07766d53a04998d7f3a413cc4
--- a/arch/mips/nxp/pnx8550/common/pci.c
+++ b/arch/mips/nxp/pnx8550/common/pci.c
@@ -44,6 +44,7 @@ extern struct pci_ops pnx8550_pci_ops;
static struct pci_controller pnx8550_controller = {
.pci_ops = &pnx8550_pci_ops,
+ .io_map_base = PNX8550_PORT_BASE,
.io_resource = &pci_io_resource,
.mem_resource = &pci_mem_resource,
};
file:2aed50fef10ff8b800a58c8cdc0551d654b8fbf8 -> file:64246c9c875c51d09e5c3861ca0e6f1096d50ac5
--- a/arch/mips/nxp/pnx8550/common/setup.c
+++ b/arch/mips/nxp/pnx8550/common/setup.c
@@ -113,7 +113,7 @@ void __init plat_mem_setup(void)
PNX8550_GLB2_ENAB_INTA_O = 0;
/* IO/MEM resources. */
- set_io_port_base(KSEG1);
+ set_io_port_base(PNX8550_PORT_BASE);
ioport_resource.start = 0;
ioport_resource.end = ~0;
iomem_resource.start = 0;
file:32548b5d68d6738ec7bf5709cf0a26f6055da3f4 -> file:421e1a0d1e29ddc795c629290b94698dd7a2cc07
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -944,6 +944,7 @@ static struct pci_controller msp_pci_con
.pci_ops = &msp_pci_ops,
.mem_resource = &pci_mem_resource,
.mem_offset = 0,
+ .io_map_base = MSP_PCI_IOSPACE_BASE,
.io_resource = &pci_io_resource,
.io_offset = 0
};
file:0357946f30e6ec5d2719446eaf68a9b86e980b98 -> file:cf5e1a25cb7d7fb942c3d0ebe53e9b3c6e5e14c1
--- a/arch/mips/pci/pci-yosemite.c
+++ b/arch/mips/pci/pci-yosemite.c
@@ -54,6 +54,7 @@ static int __init pmc_yosemite_setup(voi
panic(ioremap_failed);
set_io_port_base(io_v_base);
+ py_controller.io_map_base = io_v_base;
TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
ioport_resource.end = TITAN_IO_SIZE - 1;
file:7f32611a7a5ebc1fe64e36716db7748df21c5070 -> file:7c77fa93ab33c29415e2876b92c137fecc3dd334
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -146,7 +146,7 @@ static inline compat_uptr_t ptr_to_compa
return (u32)(unsigned long)uptr;
}
-static __inline__ void __user *compat_alloc_user_space(long len)
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = &current->thread.regs;
return (void __user *)regs->gr[30];
file:4c247e02d9b1b0e0655e47ebc15470801b4d9881 -> file:df971fa0c32f1fcbf411437a8e61ce07be5e57af
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1123,7 +1123,6 @@ static char __attribute__((aligned(64)))
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
- static int posx; /* for simple TAB-Simulation... */
unsigned int i;
unsigned long flags;
@@ -1133,19 +1132,12 @@ int pdc_iodc_print(const unsigned char *
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
- posx = 0;
goto print;
- case '\t':
- while (posx & 7) {
- iodc_dbuf[i] = ' ';
- i++, posx++;
- }
- break;
case '\b': /* BS */
- posx -= 2;
+ i--; /* overwrite last */
default:
iodc_dbuf[i] = str[i];
- i++, posx++;
+ i++;
break;
}
}
file:1a54a3b3a3fa8da5397842d5cbbd2e370dc6d40b -> file:c107b74748908b3f3726d2eeaad470edda605111
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -158,9 +158,11 @@ drivers-$(CONFIG_OPROFILE) += arch/power
# Default to zImage, override when needed
all: zImage
-BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+# With make 3.82 we cannot mix normal and wildcard targets
+BOOT_TARGETS1 := zImage zImage.initrd uImage
+BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
-PHONY += $(BOOT_TARGETS)
+PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
boot := arch/$(ARCH)/boot
@@ -175,10 +177,16 @@ relocs_check: arch/powerpc/relocs_check.
zImage: relocs_check
endif
-$(BOOT_TARGETS): vmlinux
+$(BOOT_TARGETS1): vmlinux
+ $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+$(BOOT_TARGETS2): vmlinux
+ $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+
+
+bootwrapper_install:
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-bootwrapper_install %.dtb:
+%.dtb:
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
define archhelp
file:4774c2f92232b8ce98177d555b4d2c592dc076e5 -> file:8d0fff39cdbae152bb03771914f1628109a5197d
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -133,7 +133,7 @@ static inline compat_uptr_t ptr_to_compa
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current->thread.regs;
unsigned long usp = regs->gpr[1];
file:2828f9d0f66ddb1d66be47662c999282fc25fdaf -> file:fa6648aaf070524d1c2de8253b8ea35da16de3ff
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -137,6 +137,11 @@ struct device_node * find_device_pe(stru
void eeh_sysfs_add_device(struct pci_dev *pdev);
void eeh_sysfs_remove_device(struct pci_dev *pdev);
+static inline const char *eeh_pci_name(struct pci_dev *pdev)
+{
+ return pdev ? pci_name(pdev) : "<null>";
+}
+
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
file:c38afdb45d7b066d2720d69a484f4c5a38ed71c0 -> file:0a3cf9eb4ca490459f2855c6d85a7031cd6120d9
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -563,15 +563,21 @@ __secondary_start:
/* Set thread priority to MEDIUM */
HMT_MEDIUM
- /* Do early setup for that CPU (stab, slb, hash table pointer) */
- bl .early_setup_secondary
-
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOAD_REG_ADDR(r3, current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
- ldx r1,r3,r28
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- std r1,PACAKSAVE(r13)
+ ldx r14,r3,r28
+ addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ std r14,PACAKSAVE(r13)
+
+ /* Do early setup for that CPU (stab, slb, hash table pointer) */
+ bl .early_setup_secondary
+
+ /*
+ * setup the new stack pointer, but *don't* use this until
+ * translation is on.
+ */
+ mr r1, r14
/* Clear backchain so we get nice backtraces */
li r7,0
file:479574413a93fa5c7aa3b38e28537b2830517f90 -> file:ec9b95f635bbe52137c9bcd4dd45c65ca3894b2a
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -173,9 +173,11 @@ static int p970_marked_instr_event(u64 e
switch (unit) {
case PM_VPU:
mask = 0x4c; /* byte 0 bits 2,3,6 */
+ break;
case PM_LSU0:
/* byte 2 bits 0,2,3,4,6; all of byte 1 */
mask = 0x085dff00;
+ break;
case PM_LSU1L:
mask = 0x50 << 24; /* byte 3 bits 4,6 */
break;
file:02aab7f90295b40ba03cdfc8469aeef00f6846cb -> file:7143d4ce1cd875e2a8ca38fc5fd1ba0286ddaaf0
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -864,7 +864,8 @@ static cycle_t timebase_read(struct cloc
return (cycle_t)get_tb();
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+ u32 mult)
{
u64 t2x, stamp_xsec;
@@ -877,7 +878,7 @@ void update_vsyscall(struct timespec *wa
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
- t2x = (u64) clock->mult * 4611686018ULL;
+ t2x = (u64) mult * 4611686018ULL;
stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
do_div(stamp_xsec, 1000000000);
stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
file:ccd8dd03b8c987e701e476fe43a8f84348901d13 -> file:3304f32fc7b897c5ce2183bd02f426fdaff522ab
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -491,7 +491,7 @@ int eeh_dn_check_failure(struct device_n
pdn->eeh_mode & EEH_MODE_NOCHECK) {
ignored_check++;
pr_debug("EEH: Ignored check (%x) for %s %s\n",
- pdn->eeh_mode, pci_name (dev), dn->full_name);
+ pdn->eeh_mode, eeh_pci_name(dev), dn->full_name);
return 0;
}
@@ -515,7 +515,7 @@ int eeh_dn_check_failure(struct device_n
printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
pdn->eeh_check_count, location,
- dev->driver->name, pci_name(dev));
+ dev->driver->name, eeh_pci_name(dev));
printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
dev->driver->name);
dump_stack();
file:0e8db6771252a2f3dcb4c28891e0f6d2958389a0 -> file:52c4b4038cd7c6ee685359152694e13e9ec41a2e
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -353,7 +353,7 @@ struct pci_dn * handle_eeh_events (struc
location = location ? location : "unknown";
printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
"for location=%s pci addr=%s\n",
- location, pci_name(event->dev));
+ location, eeh_pci_name(event->dev));
return NULL;
}
@@ -384,7 +384,7 @@ struct pci_dn * handle_eeh_events (struc
pci_str = pci_name (frozen_pdn->pcidev);
drv_str = pcid_name (frozen_pdn->pcidev);
} else {
- pci_str = pci_name (event->dev);
+ pci_str = eeh_pci_name(event->dev);
drv_str = pcid_name (event->dev);
}
file:ddb80f5d850b77231783b7f05c1d7adadd6f9aed -> file:ec5df8f519c7417327923bcadafdbda76874ec04
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -80,7 +80,7 @@ static int eeh_event_handler(void * dumm
eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
- pci_name(event->dev));
+ eeh_pci_name(event->dev));
pdn = handle_eeh_events(event);
file:01a08020bc0e2bb3757e783ca51ee35478bbbc52 -> file:0c940d38d6b7dc60e6a816991d3e4b8acc642774
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -180,7 +180,7 @@ static inline int is_compat_task(void)
#endif
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
file:f23961ada7fb2ce8c99e3b240627d8e1016e1ba2 -> file:258ba88b7b5039edb8ed6b849774966d1e717bda
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -183,6 +183,7 @@ struct s390_idle_data {
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
+ int nohz_delay;
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
@@ -198,4 +199,11 @@ static inline void s390_idle_check(void)
vtime_start_cpu();
}
+static inline int s390_nohz_delay(int cpu)
+{
+ return per_cpu(s390_idle, cpu).nohz_delay != 0;
+}
+
+#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
+
#endif /* _S390_CPUTIME_H */
file:015e27da40eb100e5375d356c1160d4c3a98c552 -> file:24fd61132e35eea95497e49410b34cbd1f0831c7
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
static int notrace s390_revalidate_registers(struct mci *mci)
{
int kill_task;
- u64 tmpclock;
u64 zero;
void *fpt_save_area, *fpt_creg_save_area;
@@ -214,11 +213,10 @@ static int notrace s390_revalidate_regis
: "0", "cc");
#endif
/* Revalidate clock comparator register */
- asm volatile(
- " stck 0(%1)\n"
- " sckc 0(%1)"
- : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
-
+ if (S390_lowcore.clock_comparator == -1)
+ set_clock_comparator(get_clock());
+ else
+ set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
if (!mci->wp)
/*
file:0de305b598cee30a1352f67d4ded1eda4f0d3be0 -> file:59618bcd99b7a64bc8b9077b688d907ac73fc0d8
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -126,6 +126,8 @@ void __irq_entry do_extint(struct pt_reg
/* Serve timer interrupts first. */
clock_comparator_work();
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+ if (code != 0x1004)
+ __get_cpu_var(s390_idle).nohz_delay = 1;
index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) {
if (likely(p->code == code))
file:34162a0b2caa6483f397306cc3e21a457b83b051 -> file:68e1ecf5ebabf657a6c7e053577752dc69d6046b
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -214,7 +214,8 @@ struct clocksource * __init clocksource_
return &clocksource_tod;
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+ u32 mult)
{
if (clock != &clocksource_tod)
return;
file:c41bb0d416e1a25a56f50dde6d40f13a17cab125 -> file:b59a812a010e2b1bd0861e30ae3d662c45d3e04a
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -167,6 +167,8 @@ void vtime_stop_cpu(void)
/* Wait for external, I/O or machine check interrupt. */
psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
+ idle->nohz_delay = 0;
+
/* Check if the CPU timer needs to be reprogrammed. */
if (vq->do_spt) {
__u64 vmax = VTIMER_MAX_SLICE;
file:752b362bf651d7e1bdf84a3f54601ddf863e8b12 -> file:7c37ec359ec29ad40fc2cf47642316ac3cbba8bc
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned l
{
unsigned long mask, cr0, cr0_saved;
u64 clock_saved;
+ u64 end;
+ mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
+ end = get_clock() + (usecs << 12);
clock_saved = local_tick_disable();
- set_clock_comparator(get_clock() + (usecs << 12));
__ctl_store(cr0_saved, 0, 0);
cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
__ctl_load(cr0 , 0, 0);
- mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
lockdep_off();
- trace_hardirqs_on();
- __load_psw_mask(mask);
- local_irq_disable();
+ do {
+ set_clock_comparator(end);
+ trace_hardirqs_on();
+ __load_psw_mask(mask);
+ local_irq_disable();
+ } while (get_clock() < end);
lockdep_on();
__ctl_load(cr0_saved, 0, 0);
local_tick_enable(clock_saved);
file:f2e48009989e1e57f552066d8ae9890264590d5c -> file:f5cc06f44c174f973203ddad92d21d2d5773d878
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -20,14 +20,14 @@
#define atomic64_set(v, i) (((v)->counter) = i)
extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(int, atomic64_t *);
+extern void atomic64_add(long, atomic64_t *);
extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(int, atomic64_t *);
+extern void atomic64_sub(long, atomic64_t *);
extern int atomic_add_ret(int, atomic_t *);
-extern int atomic64_add_ret(int, atomic64_t *);
+extern long atomic64_add_ret(long, atomic64_t *);
extern int atomic_sub_ret(int, atomic_t *);
-extern int atomic64_sub_ret(int, atomic64_t *);
+extern long atomic64_sub_ret(long, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -91,7 +91,7 @@ static inline int atomic_add_unless(atom
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
file:0e706257918f35397b10657be3b95538ef23e45b -> file:612bb3862c6d4b2da47cf85de7afc66546dc2ac0
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -166,7 +166,7 @@ static inline compat_uptr_t ptr_to_compa
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
file:679c7504625acc86deab57d1e1feada4ef408317 -> file:2889574608db72f36b3ae1ea61309f77e22e282f
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -249,10 +249,14 @@ extern void iounmap(volatile void __iome
#define ioread8(X) readb(X)
#define ioread16(X) readw(X)
+#define ioread16be(X) __raw_readw(X)
#define ioread32(X) readl(X)
+#define ioread32be(X) __raw_readl(X)
#define iowrite8(val,X) writeb(val,X)
#define iowrite16(val,X) writew(val,X)
+#define iowrite16be(val,X) __raw_writew(val,X)
#define iowrite32(val,X) writel(val,X)
+#define iowrite32be(val,X) __raw_writel(val,X)
static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
{
file:4aee21dc9c6f10d8f6d0f908177f7c56f5095c72 -> file:9517d063c79c604d99d21967981f07b92cb1c70c
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -468,10 +468,14 @@ static inline void iounmap(volatile void
#define ioread8(X) readb(X)
#define ioread16(X) readw(X)
+#define ioread16be(X) __raw_readw(X)
#define ioread32(X) readl(X)
+#define ioread32be(X) __raw_readl(X)
#define iowrite8(val,X) writeb(val,X)
#define iowrite16(val,X) writew(val,X)
+#define iowrite16be(val,X) __raw_writew(val,X)
#define iowrite32(val,X) writel(val,X)
+#define iowrite32be(val,X) __raw_writel(val,X)
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
file:a5db0317b5fbfecc523e82b7a27172252cb1f4a2 -> file:3e0b2d62303df55133d1f9547e8c984ba5dbbeb2
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -185,9 +185,8 @@ extern int prom_getunumber(int syndrome_
char *buf, int buflen);
/* Retain physical memory to the caller across soft resets. */
-extern unsigned long prom_retain(const char *name,
- unsigned long pa_low, unsigned long pa_high,
- long size, long align);
+extern int prom_retain(const char *name, unsigned long size,
+ unsigned long align, unsigned long *paddr);
/* Load explicit I/D TLB entries into the calling processor. */
extern long prom_itlb_load(unsigned long index,
@@ -287,26 +286,6 @@ extern void prom_sun4v_guest_soft_state(
extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
/* Client interface level routines. */
-extern long p1275_cmd(const char *, long, ...);
-
-#if 0
-#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
-#else
-#define P1275_SIZE(x) x
-#endif
-
-/* We support at most 16 input and 1 output argument */
-#define P1275_ARG_NUMBER 0
-#define P1275_ARG_IN_STRING 1
-#define P1275_ARG_OUT_BUF 2
-#define P1275_ARG_OUT_32B 3
-#define P1275_ARG_IN_FUNCTION 4
-#define P1275_ARG_IN_BUF 5
-#define P1275_ARG_IN_64B 6
-
-#define P1275_IN(x) ((x) & 0xf)
-#define P1275_OUT(x) (((x) << 4) & 0xf0)
-#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
-#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
+extern void p1275_cmd_direct(unsigned long *);
#endif /* !(__SPARC64_OPLIB_H) */
file:ff9ead640c4aa090f33de6f9576766b6d639e6d8 -> file:43cf002d480b09743fd0f056dd5ee9cec4abb408
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -228,6 +228,10 @@ static const struct of_device_id ecpp_ma
.name = "parallel",
.compatible = "ns87317-ecpp",
},
+ {
+ .name = "parallel",
+ .compatible = "pnpALI,1533,3",
+ },
{},
};
file:a303c9d64d845989e313a326f4c5e92f283148a5 -> file:e4c61a18bb2843855c493bb86c45d7787e862a38
--- a/arch/sparc/include/asm/rwsem-const.h
+++ b/arch/sparc/include/asm/rwsem-const.h
@@ -5,7 +5,7 @@
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS 0xffff0000
+#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
file:ea22cd373c64f4bc371478d2b7be14f71cd32560 -> file:75fad425e249bc40559f98d14ead5699839bbbb8
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_re
return err;
}
-static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+/* The I-cache flush instruction only works in the primary ASI, which
+ * right now is the nucleus, aka. kernel space.
+ *
+ * Therefore we have to kick the instructions out using the kernel
+ * side linear mapping of the physical address backing the user
+ * instructions.
+ */
+static void flush_signal_insns(unsigned long address)
+{
+ unsigned long pstate, paddr;
+ pte_t *ptep, pte;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+
+ /* Commit all stores of the instructions we are about to flush. */
+ wmb();
+
+ /* Disable cross-call reception. In this way even a very wide
+ * munmap() on another cpu can't tear down the page table
+ * hierarchy from underneath us, since that can't complete
+ * until the IPI tlb flush returns.
+ */
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ __asm__ __volatile__("wrpr %0, %1, %%pstate"
+ : : "r" (pstate), "i" (PSTATE_IE));
+
+ pgdp = pgd_offset(current->mm, address);
+ if (pgd_none(*pgdp))
+ goto out_irqs_on;
+ pudp = pud_offset(pgdp, address);
+ if (pud_none(*pudp))
+ goto out_irqs_on;
+ pmdp = pmd_offset(pudp, address);
+ if (pmd_none(*pmdp))
+ goto out_irqs_on;
+
+ ptep = pte_offset_map(pmdp, address);
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto out_unmap;
+
+ paddr = (unsigned long) page_address(pte_page(pte));
+
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (paddr),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+
+out_unmap:
+ pte_unmap(ptep);
+out_irqs_on:
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+
+}
+
+static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int sigframe_size;
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigac
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
- /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pud_t *pudp = pud_offset(pgdp, address);
- pmd_t *pmdp = pmd_offset(pudp, address);
- pte_t *ptep;
- pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigac
if (err)
goto sigsegv;
- preempt_disable();
- ptep = pte_offset_map(pmdp, address);
- pte = *ptep;
- if (pte_present(pte)) {
- unsigned long page = (unsigned long)
- page_address(pte_page(pte));
-
- wmb();
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (page),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- pte_unmap(ptep);
- preempt_enable();
+ flush_signal_insns(address);
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- unsigned long signr, sigset_t *oldset,
- siginfo_t *info)
+static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ unsigned long signr, sigset_t *oldset,
+ siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int sigframe_size;
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_si
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
- /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pud_t *pudp = pud_offset(pgdp, address);
- pmd_t *pmdp = pmd_offset(pudp, address);
- pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_si
if (err)
goto sigsegv;
- preempt_disable();
- ptep = pte_offset_map(pmdp, address);
- if (pte_present(*ptep)) {
- unsigned long page = (unsigned long)
- page_address(pte_page(*ptep));
-
- wmb();
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (page),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- pte_unmap(ptep);
- preempt_enable();
+ flush_signal_insns(address);
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signr, current);
+ return -EFAULT;
}
-static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
+ int err;
+
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame32(ka, regs, signr, oldset, info);
+ err = setup_rt_frame32(ka, regs, signr, oldset, info);
else
- setup_frame32(ka, regs, signr, oldset);
+ err = setup_frame32(ka, regs, signr, oldset);
+
+ if (err)
+ return err;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsig
sigaddset(&current->blocked,signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struc
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
- handle_signal32(signr, &ka, &info, oldset, regs);
-
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ }
return;
}
if (restart_syscall &&
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struc
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
file:9882df92ba0a2c8b8da4639f7e181214930c8ed6 -> file:5e5c5fd03783c997f5c344025e8f4784182a0ddc
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __s
return err;
}
-static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame __user *sf;
int sigframe_size, err;
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigacti
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return;
+ return 0;
sigill_and_return:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset, siginfo_t *info)
+static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size;
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_siga
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static inline void
+static inline int
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
+ int err;
+
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(ka, regs, signr, oldset, info);
+ err = setup_rt_frame(ka, regs, signr, oldset, info);
else
- setup_frame(ka, regs, signr, oldset);
+ err = setup_frame(ka, regs, signr, oldset);
+
+ if (err)
+ return err;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struc
sigaddset(&current->blocked, signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *re
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- handle_signal(signr, &ka, &info, oldset, regs);
-
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
return;
}
if (restart_syscall &&
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *re
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
regs->npc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
regs->npc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* if there's no signal to deliver, we just put the saved sigmask
file:9fa48c30037e5356c2f686be695ea8bcfb3613f3 -> file:006fe4515886dc6ae2a7a8e6cc9b6df9c16fda46
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(
return (void __user *) sp;
}
-static inline void
+static inline int
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, s
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
- setup_rt_frame(ka, regs, signr, oldset,
- (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ int err;
+
+ err = setup_rt_frame(ka, regs, signr, oldset,
+ (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ if (err)
+ return err;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *re
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- handle_signal(signr, &ka, &info, oldset, regs);
-
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ }
return;
}
if (restart_syscall &&
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *re
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
file:5f27ad779c0c578097218e44b005eaed770acf7d -> file:9c86b4b7d4290b75a5967c28790c59b3ee24b5c9
--- a/arch/sparc/prom/cif.S
+++ b/arch/sparc/prom/cif.S
@@ -9,18 +9,18 @@
#include <asm/thread_info.h>
.text
- .globl prom_cif_interface
-prom_cif_interface:
- sethi %hi(p1275buf), %o0
- or %o0, %lo(p1275buf), %o0
- ldx [%o0 + 0x010], %o1 ! prom_cif_stack
- save %o1, -192, %sp
- ldx [%i0 + 0x008], %l2 ! prom_cif_handler
+ .globl prom_cif_direct
+prom_cif_direct:
+ sethi %hi(p1275buf), %o1
+ or %o1, %lo(p1275buf), %o1
+ ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
+ save %o2, -192, %sp
+ ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
mov %g4, %l0
mov %g5, %l1
mov %g6, %l3
call %l2
- add %i0, 0x018, %o0 ! prom_args
+ mov %i0, %o0 ! prom_args
mov %l0, %g4
mov %l1, %g5
mov %l3, %g6
file:e1c3fc87484dd3978126050e60abc786cba8e0bb -> file:7b707b6a3fcaf6162e563bc7f7f6624fa1e047ca
--- a/arch/sparc/prom/console_64.c
+++ b/arch/sparc/prom/console_64.c
@@ -21,14 +21,22 @@ extern int prom_stdin, prom_stdout;
inline int
prom_nbgetchar(void)
{
+ unsigned long args[7];
char inc;
- if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
- P1275_INOUT(3,1),
- prom_stdin, &inc, P1275_SIZE(1)) == 1)
+ args[0] = (unsigned long) "read";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) prom_stdin;
+ args[4] = (unsigned long) &inc;
+ args[5] = 1;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[6] == 1)
return inc;
- else
- return -1;
+ return -1;
}
/* Non blocking put character to console device, returns -1 if
@@ -37,12 +45,22 @@ prom_nbgetchar(void)
inline int
prom_nbputchar(char c)
{
+ unsigned long args[7];
char outc;
outc = c;
- if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
- P1275_INOUT(3,1),
- prom_stdout, &outc, P1275_SIZE(1)) == 1)
+
+ args[0] = (unsigned long) "write";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) prom_stdout;
+ args[4] = (unsigned long) &outc;
+ args[5] = 1;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[6] == 1)
return 0;
else
return -1;
@@ -68,7 +86,15 @@ prom_putchar(char c)
void
prom_puts(const char *s, int len)
{
- p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
- P1275_INOUT(3,1),
- prom_stdout, s, P1275_SIZE(len));
+ unsigned long args[7];
+
+ args[0] = (unsigned long) "write";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) prom_stdout;
+ args[4] = (unsigned long) s;
+ args[5] = len;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
}
file:9dbd803e46e1f4c460300c6f5f2485f3d6c54e7d -> file:a017119e7ef17c40ee8fa8328252f921c59a6ca8
--- a/arch/sparc/prom/devops_64.c
+++ b/arch/sparc/prom/devops_64.c
@@ -18,16 +18,32 @@
int
prom_devopen(const char *dstr)
{
- return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
- P1275_INOUT(1,1),
- dstr);
+ unsigned long args[5];
+
+ args[0] = (unsigned long) "open";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) dstr;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[4];
}
/* Close the device described by device handle 'dhandle'. */
int
prom_devclose(int dhandle)
{
- p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "close";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned int) dhandle;
+
+ p1275_cmd_direct(args);
+
return 0;
}
@@ -37,5 +53,15 @@ prom_devclose(int dhandle)
void
prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
{
- p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
+ unsigned long args[7];
+
+ args[0] = (unsigned long) "seek";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) dhandle;
+ args[4] = seekhi;
+ args[5] = seeklo;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
}
file:39fc6af21b7c55ddc0e752c80c745ed6c018820a -> file:6cb1581d6aef507fca9c036708cebe24e5ab053c
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -20,10 +20,17 @@
int prom_service_exists(const char *service_name)
{
- int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_INOUT(1, 1), service_name);
+ unsigned long args[5];
- if (err)
+ args[0] = (unsigned long) "test";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) service_name;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[4])
return 0;
return 1;
}
@@ -31,30 +38,47 @@ int prom_service_exists(const char *serv
void prom_sun4v_guest_soft_state(void)
{
const char *svc = "SUNW,soft-state-supported";
+ unsigned long args[3];
if (!prom_service_exists(svc))
return;
- p1275_cmd(svc, P1275_INOUT(0, 0));
+ args[0] = (unsigned long) svc;
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
/* Reset and reboot the machine with the command 'bcommand'. */
void prom_reboot(const char *bcommand)
{
+ unsigned long args[4];
+
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_reboot(bcommand);
#endif
- p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_INOUT(1, 0), bcommand);
+ args[0] = (unsigned long) "boot";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned long) bcommand;
+
+ p1275_cmd_direct(args);
}
/* Forth evaluate the expression contained in 'fstring'. */
void prom_feval(const char *fstring)
{
+ unsigned long args[5];
+
if (!fstring || fstring[0] == 0)
return;
- p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_INOUT(1, 1), fstring);
+ args[0] = (unsigned long) "interpret";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) fstring;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
}
EXPORT_SYMBOL(prom_feval);
@@ -68,6 +92,7 @@ extern void smp_release(void);
*/
void prom_cmdline(void)
{
+ unsigned long args[3];
unsigned long flags;
local_irq_save(flags);
@@ -76,7 +101,11 @@ void prom_cmdline(void)
smp_capture();
#endif
- p1275_cmd("enter", P1275_INOUT(0, 0));
+ args[0] = (unsigned long) "enter";
+ args[1] = 0;
+ args[2] = 0;
+
+ p1275_cmd_direct(args);
#ifdef CONFIG_SMP
smp_release();
@@ -90,22 +119,32 @@ void prom_cmdline(void)
*/
void notrace prom_halt(void)
{
+ unsigned long args[3];
+
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_power_off();
#endif
again:
- p1275_cmd("exit", P1275_INOUT(0, 0));
+ args[0] = (unsigned long) "exit";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
goto again; /* PROM is out to get me -DaveM */
}
void prom_halt_power_off(void)
{
+ unsigned long args[3];
+
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_power_off();
#endif
- p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
+ args[0] = (unsigned long) "SUNW,power-off";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
/* if nothing else helps, we just halt */
prom_halt();
@@ -114,10 +153,15 @@ void prom_halt_power_off(void)
/* Set prom sync handler to call function 'funcp'. */
void prom_setcallback(callback_func_t funcp)
{
+ unsigned long args[5];
if (!funcp)
return;
- p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
- P1275_INOUT(1, 1), funcp);
+ args[0] = (unsigned long) "set-callback";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) funcp;
+ args[4] = (unsigned long) -1;
+ p1275_cmd_direct(args);
}
/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
@@ -173,57 +217,61 @@ static int prom_get_memory_ihandle(void)
}
/* Load explicit I/D TLB entries. */
+static long tlb_load(const char *type, unsigned long index,
+ unsigned long tte_data, unsigned long vaddr)
+{
+ unsigned long args[9];
+
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 5;
+ args[2] = 1;
+ args[3] = (unsigned long) type;
+ args[4] = (unsigned int) prom_get_mmu_ihandle();
+ args[5] = vaddr;
+ args[6] = tte_data;
+ args[7] = index;
+ args[8] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (long) args[8];
+}
+
long prom_itlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
- return p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(2, P1275_ARG_IN_64B) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_INOUT(5, 1)),
- "SUNW,itlb-load",
- prom_get_mmu_ihandle(),
- /* And then our actual args are pushed backwards. */
- vaddr,
- tte_data,
- index);
+ return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
}
long prom_dtlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
- return p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(2, P1275_ARG_IN_64B) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_INOUT(5, 1)),
- "SUNW,dtlb-load",
- prom_get_mmu_ihandle(),
- /* And then our actual args are pushed backwards. */
- vaddr,
- tte_data,
- index);
+ return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
}
int prom_map(int mode, unsigned long size,
unsigned long vaddr, unsigned long paddr)
{
- int ret = p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_ARG(4, P1275_ARG_IN_64B) |
- P1275_ARG(6, P1275_ARG_IN_64B) |
- P1275_INOUT(7, 1)),
- prom_map_name,
- prom_get_mmu_ihandle(),
- mode,
- size,
- vaddr,
- 0,
- paddr);
+ unsigned long args[11];
+ int ret;
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 7;
+ args[2] = 1;
+ args[3] = (unsigned long) prom_map_name;
+ args[4] = (unsigned int) prom_get_mmu_ihandle();
+ args[5] = (unsigned int) mode;
+ args[6] = size;
+ args[7] = vaddr;
+ args[8] = 0;
+ args[9] = paddr;
+ args[10] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ ret = (int) args[10];
if (ret == 0)
ret = -1;
return ret;
@@ -231,40 +279,51 @@ int prom_map(int mode, unsigned long siz
void prom_unmap(unsigned long size, unsigned long vaddr)
{
- p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(2, P1275_ARG_IN_64B) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_INOUT(4, 0)),
- prom_unmap_name,
- prom_get_mmu_ihandle(),
- size,
- vaddr);
+ unsigned long args[7];
+
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 4;
+ args[2] = 0;
+ args[3] = (unsigned long) prom_unmap_name;
+ args[4] = (unsigned int) prom_get_mmu_ihandle();
+ args[5] = size;
+ args[6] = vaddr;
+
+ p1275_cmd_direct(args);
}
/* Set aside physical memory which is not touched or modified
* across soft resets.
*/
-unsigned long prom_retain(const char *name,
- unsigned long pa_low, unsigned long pa_high,
- long size, long align)
-{
- /* XXX I don't think we return multiple values correctly.
- * XXX OBP supposedly returns pa_low/pa_high here, how does
- * XXX it work?
- */
+int prom_retain(const char *name, unsigned long size,
+ unsigned long align, unsigned long *paddr)
+{
+ unsigned long args[11];
- /* If align is zero, the pa_low/pa_high args are passed,
- * else they are not.
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 5;
+ args[2] = 3;
+ args[3] = (unsigned long) "SUNW,retain";
+ args[4] = (unsigned int) prom_get_memory_ihandle();
+ args[5] = align;
+ args[6] = size;
+ args[7] = (unsigned long) name;
+ args[8] = (unsigned long) -1;
+ args[9] = (unsigned long) -1;
+ args[10] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[8])
+ return (int) args[8];
+
+ /* Next we get "phys_high" then "phys_low". On 64-bit
+ * the phys_high cell is don't care since the phys_low
+ * cell has the full value.
*/
- if (align == 0)
- return p1275_cmd("SUNW,retain",
- (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
- name, pa_low, pa_high, size, align);
- else
- return p1275_cmd("SUNW,retain",
- (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
- name, size, align);
+ *paddr = args[10];
+
+ return 0;
}
/* Get "Unumber" string for the SIMM at the given
@@ -277,62 +336,129 @@ int prom_getunumber(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
- return p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(3, P1275_ARG_OUT_BUF) |
- P1275_ARG(6, P1275_ARG_IN_64B) |
- P1275_INOUT(8, 2)),
- "SUNW,get-unumber", prom_get_memory_ihandle(),
- buflen, buf, P1275_SIZE(buflen),
- 0, phys_addr, syndrome_code);
+ unsigned long args[12];
+
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 7;
+ args[2] = 2;
+ args[3] = (unsigned long) "SUNW,get-unumber";
+ args[4] = (unsigned int) prom_get_memory_ihandle();
+ args[5] = buflen;
+ args[6] = (unsigned long) buf;
+ args[7] = 0;
+ args[8] = phys_addr;
+ args[9] = (unsigned int) syndrome_code;
+ args[10] = (unsigned long) -1;
+ args[11] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[10];
}
/* Power management extensions. */
void prom_sleepself(void)
{
- p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
+ unsigned long args[3];
+
+ args[0] = (unsigned long) "SUNW,sleep-self";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
int prom_sleepsystem(void)
{
- return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,sleep-system";
+ args[1] = 0;
+ args[2] = 1;
+ args[3] = (unsigned long) -1;
+ p1275_cmd_direct(args);
+
+ return (int) args[3];
}
int prom_wakeupsystem(void)
{
- return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,wakeup-system";
+ args[1] = 0;
+ args[2] = 1;
+ args[3] = (unsigned long) -1;
+ p1275_cmd_direct(args);
+
+ return (int) args[3];
}
#ifdef CONFIG_SMP
void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
{
- p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
+ unsigned long args[6];
+
+ args[0] = (unsigned long) "SUNW,start-cpu";
+ args[1] = 3;
+ args[2] = 0;
+ args[3] = (unsigned int) cpunode;
+ args[4] = pc;
+ args[5] = arg;
+ p1275_cmd_direct(args);
}
void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
{
- p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
- cpuid, pc, arg);
+ unsigned long args[6];
+
+ args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid";
+ args[1] = 3;
+ args[2] = 0;
+ args[3] = (unsigned int) cpuid;
+ args[4] = pc;
+ args[5] = arg;
+ p1275_cmd_direct(args);
}
void prom_stopcpu_cpuid(int cpuid)
{
- p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
- cpuid);
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned int) cpuid;
+ p1275_cmd_direct(args);
}
void prom_stopself(void)
{
- p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
+ unsigned long args[3];
+
+ args[0] = (unsigned long) "SUNW,stop-self";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
void prom_idleself(void)
{
- p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
+ unsigned long args[3];
+
+ args[0] = (unsigned long) "SUNW,idle-self";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
void prom_resumecpu(int cpunode)
{
- p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,resume-cpu";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned int) cpunode;
+ p1275_cmd_direct(args);
}
#endif
file:815cab6be5a39aeb47dc4734f44de977b95cde74 -> file:7ae5b5408d7c2103d7164f606d59678147e91304
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -22,13 +22,11 @@ struct {
long prom_callback; /* 0x00 */
void (*prom_cif_handler)(long *); /* 0x08 */
unsigned long prom_cif_stack; /* 0x10 */
- unsigned long prom_args [23]; /* 0x18 */
- char prom_buffer [3000];
} p1275buf;
extern void prom_world(int);
-extern void prom_cif_interface(void);
+extern void prom_cif_direct(unsigned long *args);
extern void prom_cif_callback(void);
/*
@@ -36,114 +34,20 @@ extern void prom_cif_callback(void);
*/
DEFINE_SPINLOCK(prom_entry_lock);
-long p1275_cmd(const char *service, long fmt, ...)
+void p1275_cmd_direct(unsigned long *args)
{
- char *p, *q;
unsigned long flags;
- int nargs, nrets, i;
- va_list list;
- long attrs, x;
-
- p = p1275buf.prom_buffer;
raw_local_save_flags(flags);
raw_local_irq_restore(PIL_NMI);
spin_lock(&prom_entry_lock);
- p1275buf.prom_args[0] = (unsigned long)p; /* service */
- strcpy (p, service);
- p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
- p1275buf.prom_args[1] = nargs = (fmt & 0x0f); /* nargs */
- p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */
- attrs = fmt >> 8;
- va_start(list, fmt);
- for (i = 0; i < nargs; i++, attrs >>= 3) {
- switch (attrs & 0x7) {
- case P1275_ARG_NUMBER:
- p1275buf.prom_args[i + 3] =
- (unsigned)va_arg(list, long);
- break;
- case P1275_ARG_IN_64B:
- p1275buf.prom_args[i + 3] =
- va_arg(list, unsigned long);
- break;
- case P1275_ARG_IN_STRING:
- strcpy (p, va_arg(list, char *));
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
- break;
- case P1275_ARG_OUT_BUF:
- (void) va_arg(list, char *);
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- x = va_arg(list, long);
- i++; attrs >>= 3;
- p = (char *)(((long)(p + (int)x + 7)) & ~7);
- p1275buf.prom_args[i + 3] = x;
- break;
- case P1275_ARG_IN_BUF:
- q = va_arg(list, char *);
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- x = va_arg(list, long);
- i++; attrs >>= 3;
- memcpy (p, q, (int)x);
- p = (char *)(((long)(p + (int)x + 7)) & ~7);
- p1275buf.prom_args[i + 3] = x;
- break;
- case P1275_ARG_OUT_32B:
- (void) va_arg(list, char *);
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- p += 32;
- break;
- case P1275_ARG_IN_FUNCTION:
- p1275buf.prom_args[i + 3] =
- (unsigned long)prom_cif_callback;
- p1275buf.prom_callback = va_arg(list, long);
- break;
- }
- }
- va_end(list);
-
prom_world(1);
- prom_cif_interface();
+ prom_cif_direct(args);
prom_world(0);
- attrs = fmt >> 8;
- va_start(list, fmt);
- for (i = 0; i < nargs; i++, attrs >>= 3) {
- switch (attrs & 0x7) {
- case P1275_ARG_NUMBER:
- (void) va_arg(list, long);
- break;
- case P1275_ARG_IN_STRING:
- (void) va_arg(list, char *);
- break;
- case P1275_ARG_IN_FUNCTION:
- (void) va_arg(list, long);
- break;
- case P1275_ARG_IN_BUF:
- (void) va_arg(list, char *);
- (void) va_arg(list, long);
- i++; attrs >>= 3;
- break;
- case P1275_ARG_OUT_BUF:
- p = va_arg(list, char *);
- x = va_arg(list, long);
- memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
- i++; attrs >>= 3;
- break;
- case P1275_ARG_OUT_32B:
- p = va_arg(list, char *);
- memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
- break;
- }
- }
- va_end(list);
- x = p1275buf.prom_args [nargs + 3];
-
spin_unlock(&prom_entry_lock);
raw_local_irq_restore(flags);
-
- return x;
}
void prom_cif_init(void *cif_handler, void *cif_stack)
file:8ea73ddc61dcb6fc69e62a235e98e5066fee29b8 -> file:6a05c76f58fdec24c135f6734ec5aaecc971b0a3
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -16,22 +16,39 @@
#include <asm/oplib.h>
#include <asm/ldc.h>
+static int prom_node_to_node(const char *type, int node)
+{
+ unsigned long args[5];
+
+ args[0] = (unsigned long) type;
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[4];
+}
+
/* Return the child of node 'node' or zero if no this node has no
* direct descendent.
*/
inline int __prom_getchild(int node)
{
- return p1275_cmd ("child", P1275_INOUT(1, 1), node);
+ return prom_node_to_node("child", node);
}
inline int prom_getchild(int node)
{
int cnode;
- if(node == -1) return 0;
+ if (node == -1)
+ return 0;
cnode = __prom_getchild(node);
- if(cnode == -1) return 0;
- return (int)cnode;
+ if (cnode == -1)
+ return 0;
+ return cnode;
}
EXPORT_SYMBOL(prom_getchild);
@@ -39,10 +56,12 @@ inline int prom_getparent(int node)
{
int cnode;
- if(node == -1) return 0;
- cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
- if(cnode == -1) return 0;
- return (int)cnode;
+ if (node == -1)
+ return 0;
+ cnode = prom_node_to_node("parent", node);
+ if (cnode == -1)
+ return 0;
+ return cnode;
}
/* Return the next sibling of node 'node' or zero if no more siblings
@@ -50,7 +69,7 @@ inline int prom_getparent(int node)
*/
inline int __prom_getsibling(int node)
{
- return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
+ return prom_node_to_node(prom_peer_name, node);
}
inline int prom_getsibling(int node)
@@ -72,11 +91,21 @@ EXPORT_SYMBOL(prom_getsibling);
*/
inline int prom_getproplen(int node, const char *prop)
{
- if((!node) || (!prop)) return -1;
- return p1275_cmd ("getproplen",
- P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_INOUT(2, 1),
- node, prop);
+ unsigned long args[6];
+
+ if (!node || !prop)
+ return -1;
+
+ args[0] = (unsigned long) "getproplen";
+ args[1] = 2;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) prop;
+ args[5] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[5];
}
EXPORT_SYMBOL(prom_getproplen);
@@ -87,19 +116,25 @@ EXPORT_SYMBOL(prom_getproplen);
inline int prom_getproperty(int node, const char *prop,
char *buffer, int bufsize)
{
+ unsigned long args[8];
int plen;
plen = prom_getproplen(node, prop);
- if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
+ if ((plen > bufsize) || (plen == 0) || (plen == -1))
return -1;
- } else {
- /* Ok, things seem all right. */
- return p1275_cmd(prom_getprop_name,
- P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_OUT_BUF)|
- P1275_INOUT(4, 1),
- node, prop, buffer, P1275_SIZE(plen));
- }
+
+ args[0] = (unsigned long) prom_getprop_name;
+ args[1] = 4;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) prop;
+ args[5] = (unsigned long) buffer;
+ args[6] = bufsize;
+ args[7] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[7];
}
EXPORT_SYMBOL(prom_getproperty);
@@ -110,7 +145,7 @@ inline int prom_getint(int node, const c
{
int intprop;
- if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+ if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
return intprop;
return -1;
@@ -126,7 +161,8 @@ int prom_getintdefault(int node, const c
int retval;
retval = prom_getint(node, property);
- if(retval == -1) return deflt;
+ if (retval == -1)
+ return deflt;
return retval;
}
@@ -138,7 +174,8 @@ int prom_getbool(int node, const char *p
int retval;
retval = prom_getproplen(node, prop);
- if(retval == -1) return 0;
+ if (retval == -1)
+ return 0;
return 1;
}
EXPORT_SYMBOL(prom_getbool);
@@ -152,7 +189,8 @@ void prom_getstring(int node, const char
int len;
len = prom_getproperty(node, prop, user_buf, ubuf_size);
- if(len != -1) return;
+ if (len != -1)
+ return;
user_buf[0] = 0;
return;
}
@@ -165,7 +203,8 @@ int prom_nodematch(int node, const char
{
char namebuf[128];
prom_getproperty(node, "name", namebuf, sizeof(namebuf));
- if(strcmp(namebuf, name) == 0) return 1;
+ if (strcmp(namebuf, name) == 0)
+ return 1;
return 0;
}
@@ -191,16 +230,29 @@ int prom_searchsiblings(int node_start,
}
EXPORT_SYMBOL(prom_searchsiblings);
+static const char *prom_nextprop_name = "nextprop";
+
/* Return the first property type for node 'node'.
* buffer should be at least 32B in length
*/
inline char *prom_firstprop(int node, char *buffer)
{
+ unsigned long args[7];
+
*buffer = 0;
- if(node == -1) return buffer;
- p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
- P1275_INOUT(3, 0),
- node, (char *) 0x0, buffer);
+ if (node == -1)
+ return buffer;
+
+ args[0] = (unsigned long) prom_nextprop_name;
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = 0;
+ args[5] = (unsigned long) buffer;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
return buffer;
}
EXPORT_SYMBOL(prom_firstprop);
@@ -211,9 +263,10 @@ EXPORT_SYMBOL(prom_firstprop);
*/
inline char *prom_nextprop(int node, const char *oprop, char *buffer)
{
+ unsigned long args[7];
char buf[32];
- if(node == -1) {
+ if (node == -1) {
*buffer = 0;
return buffer;
}
@@ -221,10 +274,17 @@ inline char *prom_nextprop(int node, con
strcpy (buf, oprop);
oprop = buf;
}
- p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_OUT_32B)|
- P1275_INOUT(3, 0),
- node, oprop, buffer);
+
+ args[0] = (unsigned long) prom_nextprop_name;
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) oprop;
+ args[5] = (unsigned long) buffer;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
return buffer;
}
EXPORT_SYMBOL(prom_nextprop);
@@ -232,12 +292,19 @@ EXPORT_SYMBOL(prom_nextprop);
int
prom_finddevice(const char *name)
{
+ unsigned long args[5];
+
if (!name)
return 0;
- return p1275_cmd(prom_finddev_name,
- P1275_ARG(0,P1275_ARG_IN_STRING)|
- P1275_INOUT(1, 1),
- name);
+ args[0] = (unsigned long) "finddevice";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) name;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[4];
}
EXPORT_SYMBOL(prom_finddevice);
@@ -248,7 +315,7 @@ int prom_node_has_property(int node, con
*buf = 0;
do {
prom_nextprop(node, buf, buf);
- if(!strcmp(buf, prop))
+ if (!strcmp(buf, prop))
return 1;
} while (*buf);
return 0;
@@ -261,6 +328,8 @@ EXPORT_SYMBOL(prom_node_has_property);
int
prom_setprop(int node, const char *pname, char *value, int size)
{
+ unsigned long args[8];
+
if (size == 0)
return 0;
if ((pname == 0) || (value == 0))
@@ -272,19 +341,37 @@ prom_setprop(int node, const char *pname
return 0;
}
#endif
- return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_IN_BUF)|
- P1275_INOUT(4, 1),
- node, pname, value, P1275_SIZE(size));
+ args[0] = (unsigned long) "setprop";
+ args[1] = 4;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) pname;
+ args[5] = (unsigned long) value;
+ args[6] = size;
+ args[7] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[7];
}
EXPORT_SYMBOL(prom_setprop);
inline int prom_inst2pkg(int inst)
{
+ unsigned long args[5];
int node;
- node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
- if (node == -1) return 0;
+ args[0] = (unsigned long) "instance-to-package";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned int) inst;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ node = (int) args[4];
+ if (node == -1)
+ return 0;
return node;
}
@@ -297,17 +384,28 @@ prom_pathtoinode(const char *path)
int node, inst;
inst = prom_devopen (path);
- if (inst == 0) return 0;
- node = prom_inst2pkg (inst);
- prom_devclose (inst);
- if (node == -1) return 0;
+ if (inst == 0)
+ return 0;
+ node = prom_inst2pkg(inst);
+ prom_devclose(inst);
+ if (node == -1)
+ return 0;
return node;
}
int prom_ihandle2path(int handle, char *buffer, int bufsize)
{
- return p1275_cmd("instance-to-path",
- P1275_ARG(1,P1275_ARG_OUT_BUF)|
- P1275_INOUT(3, 1),
- handle, buffer, P1275_SIZE(bufsize));
+ unsigned long args[7];
+
+ args[0] = (unsigned long) "instance-to-path";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) handle;
+ args[4] = (unsigned long) buffer;
+ args[5] = bufsize;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[6];
}
file:cf8a97f3451856665b11fdb6172b67953d6ee513 -> file:ec8a0eea13f707b1579f54b4846b1ed01644f17d
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -727,6 +727,9 @@ struct winch {
static void free_winch(struct winch *winch, int free_irq_ok)
{
+ if (free_irq_ok)
+ free_irq(WINCH_IRQ, winch);
+
list_del(&winch->list);
if (winch->pid != -1)
@@ -735,8 +738,6 @@ static void free_winch(struct winch *win
os_close_file(winch->fd);
if (winch->stack != 0)
free_stack(winch->stack, 0);
- if (free_irq_ok)
- free_irq(WINCH_IRQ, winch);
kfree(winch);
}
file:635d16d90a80d4158df7769ec11df5ab6c64c38d -> file:9fcf26c5216ee6595efcb600591b5c80dc204894
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -160,6 +160,7 @@ struct ubd {
struct scatterlist sg[MAX_SG];
struct request *request;
int start_sg, end_sg;
+ sector_t rq_pos;
};
#define DEFAULT_COW { \
@@ -184,6 +185,7 @@ struct ubd {
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
+ .rq_pos = 0, \
}
/* Protected by ubd_lock */
@@ -1222,7 +1224,6 @@ static void do_ubd_request(struct reques
{
struct io_thread_req *io_req;
struct request *req;
- sector_t sector;
int n;
while(1){
@@ -1233,12 +1234,12 @@ static void do_ubd_request(struct reques
return;
dev->request = req;
+ dev->rq_pos = blk_rq_pos(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
- sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
@@ -1250,10 +1251,9 @@ static void do_ubd_request(struct reques
return;
}
prepare_request(req, io_req,
- (unsigned long long)sector << 9,
+ (unsigned long long)dev->rq_pos << 9,
sg->offset, sg->length, sg_page(sg));
- sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){
@@ -1266,6 +1266,7 @@ static void do_ubd_request(struct reques
return;
}
+ dev->rq_pos += sg->length >> 9;
dev->start_sg++;
}
dev->end_sg = 0;
file:e7a6cca667aa5c0a6ec0a8df5392976d8d97aea9 -> file:664f94216eb739cdb799b4396f30a0d96d3dc0f4
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -22,7 +22,7 @@ SECTIONS
_text = .;
_stext = .;
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_TEXT_SECTION(0)
. = ALIGN(PAGE_SIZE);
.text :
file:dec5678fc17f7b65ef5b1113d8f6246f910bab79 -> file:6e3359d6a8394c0d2f811a28a24f41e673e8793c
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -60,7 +60,7 @@ static inline long long timeval_to_ns(co
long long disable_timer(void)
{
struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
- int remain, max = UM_NSEC_PER_SEC / UM_HZ;
+ long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
printk(UM_KERN_ERR "disable_timer - setitimer failed, "
file:fbc161d05fed2c365bd3e1ae15021bf177002217 -> file:cb5a57c610751cc58692cc80d1c8b24a29c7182b
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -227,6 +227,11 @@ config X86_32_LAZY_GS
config KTIME_SCALAR
def_bool X86_32
+
+config ARCH_CPU_PROBE_RELEASE
+ def_bool y
+ depends on HOTPLUG_CPU
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
file:5294d84467328f5bb859dcaa2ec84483c355279a -> file:4edd8eb425cfe4ea29193e4d5afa6657ca743063
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -50,7 +50,12 @@
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %eax because syscall_trace_enter() returned
- * the value it wants us to use in the table lookup.
+ * the %rax value we should see. Instead, we just truncate that
+ * value to 32 bits again as we did on entry from user mode.
+ * If it's a new value set by user_regset during entry tracing,
+ * this matches the normal truncation of the user-mode value.
+ * If it's -1 to make us punt the syscall, then (u32)-1 is still
+ * an appropriately invalid value.
*/
.macro LOAD_ARGS32 offset, _r9=0
.if \_r9
@@ -60,6 +65,7 @@
movl \offset+48(%rsp),%edx
movl \offset+56(%rsp),%esi
movl \offset+64(%rsp),%edi
+ movl %eax,%eax /* zero extension */
.endm
.macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
sysenter_do_call:
IA32_ARG_FIXUP
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
movl %ebx,%edi /* reload 1st syscall arg */
movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@ sysenter_tracesys:
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
jmp sysenter_do_call
CFI_ENDPROC
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
- cmpl $IA32_NR_syscalls-1,%eax
+ cmpq $IA32_NR_syscalls-1,%rax
ja ia32_badsys
cstar_do_call:
IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@ cstar_tracesys:
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
RESTORE_REST
xchgl %ebp,%r9d
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
jmp cstar_do_call
END(ia32_cstar_target)
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
orl $TS_COMPAT,TI_status(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
jnz ia32_tracesys
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
ia32_do_call:
IA32_ARG_FIXUP
@@ -444,7 +450,7 @@ ia32_tracesys:
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
jmp ia32_do_call
END(ia32_syscall)
file:2a2cc7a78a81b6b06d460dfee51ad8e9093eb808 -> file:7beb491de20ef70d5bfb37a66fb4d51088ec5e23
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -305,6 +305,9 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
+ /* flags read from acpi table */
+ u8 acpi_flags;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -348,6 +351,15 @@ struct amd_iommu {
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
+
+ /*
+ * This array is required to work around a potential BIOS bug.
+ * The BIOS may miss to restore parts of the PCI configuration
+ * space when the system resumes from S3. The result is that the
+ * IOMMU does not execute commands anymore which leads to system
+ * failure.
+ */
+ u32 cache_cfg[4];
};
/*
@@ -469,4 +481,10 @@ static inline void amd_iommu_stats_init(
/* some function prototypes */
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
file:ee1931be6593aba1d344b7644340a1422d7a4f6d -> file:9873a5f64676cd25098797b01ac59d123d50580a
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -17,60 +17,33 @@ struct __xchg_dummy {
#define __xg(x) ((struct __xchg_dummy *)(x))
/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
+ * CMPXCHG8B only writes to the target if we had the previous
+ * value in registers, otherwise it acts as a read and gives us the
+ * "new previous" value. That is why there is a loop. Preloading
+ * EDX:EAX is a performance optimization: in the common case it means
+ * we need only one locked operation.
*
- * cmpxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
+ * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
+ * least an FPU save and/or %cr0.ts manipulation.
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow the
+ * instruction to be executed atomically. We need to have the reader
+ * side to see the coherent 64bit value.
*/
-static inline void __set_64bit(unsigned long long *ptr,
- unsigned int low, unsigned int high)
+static inline void set_64bit(volatile u64 *ptr, u64 value)
{
+ u32 low = value;
+ u32 high = value >> 32;
+ u64 prev = *ptr;
+
asm volatile("\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b (%0)\n\t"
+ LOCK_PREFIX "cmpxchg8b %0\n\t"
"jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax", "dx", "memory");
-}
-
-static inline void __set_64bit_constant(unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
-}
-
-#define ll_low(x) *(((unsigned int *)&(x)) + 0)
-#define ll_high(x) *(((unsigned int *)&(x)) + 1)
-
-static inline void __set_64bit_var(unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr, ll_low(value), ll_high(value));
+ : "=m" (*ptr), "+A" (prev)
+ : "b" (low), "c" (high)
+ : "memory");
}
-#define set_64bit(ptr, value) \
- (__builtin_constant_p((value)) \
- ? __set_64bit_constant((ptr), (value)) \
- : __set_64bit_var((ptr), (value)))
-
-#define _set_64bit(ptr, value) \
- (__builtin_constant_p(value) \
- ? __set_64bit(ptr, (unsigned int)(value), \
- (unsigned int)((value) >> 32)) \
- : __set_64bit(ptr, ll_low((value)), ll_high((value))))
-
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -82,20 +55,20 @@ static inline unsigned long __xchg(unsig
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
- : "=q" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=q" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
}
@@ -139,21 +112,21 @@ static inline unsigned long __cmpxchg(vo
unsigned long prev;
switch (size) {
case 1:
- asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgl %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -172,21 +145,21 @@ static inline unsigned long __sync_cmpxc
unsigned long prev;
switch (size) {
case 1:
- asm volatile("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgl %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -200,21 +173,21 @@ static inline unsigned long __cmpxchg_lo
unsigned long prev;
switch (size) {
case 1:
- asm volatile("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgl %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -226,11 +199,10 @@ static inline unsigned long long __cmpxc
unsigned long long new)
{
unsigned long long prev;
- asm volatile(LOCK_PREFIX "cmpxchg8b %3"
- : "=A"(prev)
+ asm volatile(LOCK_PREFIX "cmpxchg8b %1"
+ : "=A"(prev), "+m" (*__xg(ptr))
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
@@ -241,11 +213,10 @@ static inline unsigned long long __cmpxc
unsigned long long new)
{
unsigned long long prev;
- asm volatile("cmpxchg8b %3"
- : "=A"(prev)
+ asm volatile("cmpxchg8b %1"
+ : "=A"(prev), "+m"(*__xg(ptr))
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
file:52de72e0de8c882c3673fcff25c315f270db977f -> file:e8cb051b8681e4be97c803076716f8c287ada567
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -8,13 +8,11 @@
#define __xg(x) ((volatile long *)(x))
-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+static inline void set_64bit(volatile u64 *ptr, u64 val)
{
*ptr = val;
}
-#define _set_64bit set_64bit
-
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -26,26 +24,26 @@ static inline unsigned long __xchg(unsig
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
- : "=q" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=q" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %k0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 8:
asm volatile("xchgq %0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
}
@@ -66,27 +64,27 @@ static inline unsigned long __cmpxchg(vo
unsigned long prev;
switch (size) {
case 1:
- asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgl %k2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 8:
- asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgq %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -105,21 +103,27 @@ static inline unsigned long __sync_cmpxc
unsigned long prev;
switch (size) {
case 1:
- asm volatile("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgl %k2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ asm volatile("lock; cmpxchgq %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -133,27 +137,27 @@ static inline unsigned long __cmpxchg_lo
unsigned long prev;
switch (size) {
case 1:
- asm volatile("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgl %k2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 8:
- asm volatile("cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgq %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
file:9a9c7bdc923dee66a411bdae3f30065e9e4ee09f -> file:c8c9a74d8ccc1e6f70ab807d0db9a6860a96117d
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compa
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
file:2cbf0a279ab9de1a796cfa9adea053eff165b740 -> file:1efb1fae606fb41f7808684ac047701b2b05ad9b
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -150,7 +150,7 @@
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
+#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
file:73739322b6d05675ca8155aac9f5126a5e701460 -> file:6a63b86c64a136872aa3c522fab6dfc93e5923ad
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -172,6 +172,7 @@ static inline void __iomem *ioremap(reso
extern void iounmap(volatile void __iomem *addr);
+extern void set_iounmap_nonlazy(void);
#ifdef CONFIG_X86_32
# include "io_32.h"
file:91139545b122a23b8f235be0160894bc057cab59 -> file:600807b3d3650420c1aac56b10b69cc9abd15deb
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -674,20 +674,6 @@ static inline struct kvm_mmu_page *page_
return (struct kvm_mmu_page *)page_private(page);
}
-static inline u16 kvm_read_fs(void)
-{
- u16 seg;
- asm("mov %%fs, %0" : "=g"(seg));
- return seg;
-}
-
-static inline u16 kvm_read_gs(void)
-{
- u16 seg;
- asm("mov %%gs, %0" : "=g"(seg));
- return seg;
-}
-
static inline u16 kvm_read_ldt(void)
{
u16 ldt;
@@ -695,16 +681,6 @@ static inline u16 kvm_read_ldt(void)
return ldt;
}
-static inline void kvm_load_fs(u16 sel)
-{
- asm("mov %0, %%fs" : : "rm"(sel));
-}
-
-static inline void kvm_load_gs(u16 sel)
-{
- asm("mov %0, %%gs" : : "rm"(sel));
-}
-
static inline void kvm_load_ldt(u16 sel)
{
asm("lldt %0" : : "rm"(sel));
file:01fd9461d323b89827afdc9a5a4c205a6bae3407 -> file:750f1bf1fab18747650710c8e3d64b1fc9049511
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -27,6 +27,7 @@ struct mm_struct;
struct vm_area_struct;
extern pgd_t swapper_pg_dir[1024];
+extern pgd_t trampoline_pg_dir[1024];
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
file:4cfc908240684c561b4554281c36850643fc139f -> file:4c2f63c7fc1b2ec071d3d32465f389a6d9df24dd
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus);
- void (*smp_send_stop)(void);
+ void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
static inline void smp_send_stop(void)
{
- smp_ops.smp_send_stop();
+ smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+ smp_ops.stop_other_cpus(1);
}
static inline void smp_prepare_boot_cpu(void)
file:90f06c25221d792dab33f662c25012b7d0c39f59 -> file:ebace684c476effa7488ca2f678cb5e58fa53d6e
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,15 +13,18 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp;
extern unsigned long initial_code;
+extern unsigned long initial_page_table;
extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
#define TRAMPOLINE_BASE 0x6000
extern unsigned long setup_trampoline(void);
+extern void __init setup_trampoline_page_table(void);
extern void __init reserve_trampoline_memory(void);
#else
-static inline void reserve_trampoline_memory(void) {};
+static inline void setup_trampoline_page_table(void) {}
+static inline void reserve_trampoline_memory(void) {}
#endif /* CONFIG_X86_TRAMPOLINE */
#endif /* __ASSEMBLY__ */
file:c0427295e8f58956e32f833c78c9ad75676778d2 -> file:1ca132fc0d039cbc8c3b7f605fa4dbbd91db7291
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cp
extern void check_tsc_sync_target(void);
extern int notsc_setup(char *);
+extern void save_sched_clock_state(void);
+extern void restore_sched_clock_state(void);
#endif /* _ASM_X86_TSC_H */
file:d8e5d0cdd678d3b4396c0e7f859b7c3f6ac0d212 -> file:d1911abac18056def432f5549d8ff882c03e6287
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
+CFLAGS_REMOVE_pvclock.o = -pg
+CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
file:f0fa7a1782d59771207a795a274456653735768b -> file:7cd33f75a69c4c44360528bc44b00418cb1f29de
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1688,6 +1688,7 @@ static void __unmap_single(struct amd_io
size_t size,
int dir)
{
+ dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@@ -1695,6 +1696,7 @@ static void __unmap_single(struct amd_io
(dma_addr + size > dma_dom->aperture_size))
return;
+ flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -1709,7 +1711,7 @@ static void __unmap_single(struct amd_io
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
+ iommu_flush_pages(iommu, dma_dom->domain.id, flush_addr, size);
dma_dom->need_flush = false;
}
}
file:3925adfba7654c1c40da35e5b30a96141d67ea5d -> file:400be996de7b0e5d49edadc9cd4ff4336d620cef
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -622,6 +622,13 @@ static void __init init_iommu_from_pci(s
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+ if (is_rd890_iommu(iommu->dev)) {
+ pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
+ pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
+ pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
+ pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
+ }
}
/*
@@ -639,29 +646,9 @@ static void __init init_iommu_from_acpi(
struct ivhd_entry *e;
/*
- * First set the recommended feature enable bits from ACPI
- * into the IOMMU control registers
+ * First save the recommended feature enable bits from ACPI
*/
- h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
- iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
- h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
- h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
- h->flags & IVHD_FLAG_ISOC_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
- iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
- /*
- * make IOMMU memory accesses cache coherent
- */
- iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+ iommu->acpi_flags = h->flags;
/*
* Done. Now parse the device entries
@@ -1089,6 +1076,40 @@ static void init_device_table(void)
}
}
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+ iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+ /*
+ * make IOMMU memory accesses cache coherent
+ */
+ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_quirks(struct amd_iommu *iommu)
+{
+ if (is_rd890_iommu(iommu->dev)) {
+ pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
+ pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
+ pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
+ pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
+ }
+}
+
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
@@ -1099,6 +1120,8 @@ static void enable_iommus(void)
for_each_iommu(iommu) {
iommu_disable(iommu);
+ iommu_apply_quirks(iommu);
+ iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
file:bc9cd5aea79bd74e1ad230c003174a0a70e06627 -> file:6702ab74c58c7dfeea0c6743431e6b9259fe4927
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -51,6 +51,7 @@
#include <asm/smp.h>
#include <asm/mce.h>
#include <asm/kvm_para.h>
+#include <asm/tsc.h>
unsigned int num_processors;
@@ -1172,8 +1173,13 @@ static void __cpuinit lapic_setup_esr(vo
*/
void __cpuinit setup_local_APIC(void)
{
- unsigned int value;
- int i, j;
+ unsigned int value, queued;
+ int i, j, acked = 0;
+ unsigned long long tsc = 0, ntsc;
+ long long max_loops = cpu_khz;
+
+ if (cpu_has_tsc)
+ rdtscll(tsc);
if (disable_apic) {
arch_disable_smp_support();
@@ -1225,13 +1231,32 @@ void __cpuinit setup_local_APIC(void)
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
- for (i = APIC_ISR_NR - 1; i >= 0; i--) {
- value = apic_read(APIC_ISR + i*0x10);
- for (j = 31; j >= 0; j--) {
- if (value & (1<<j))
- ack_APIC_irq();
+ do {
+ queued = 0;
+ for (i = APIC_ISR_NR - 1; i >= 0; i--)
+ queued |= apic_read(APIC_IRR + i*0x10);
+
+ for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ value = apic_read(APIC_ISR + i*0x10);
+ for (j = 31; j >= 0; j--) {
+ if (value & (1<<j)) {
+ ack_APIC_irq();
+ acked++;
+ }
+ }
}
- }
+ if (acked > 256) {
+ printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
+ acked);
+ break;
+ }
+ if (cpu_has_tsc) {
+ rdtscll(ntsc);
+ max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ } else
+ max_loops--;
+ } while (queued && max_loops > 0);
+ WARN_ON(max_loops <= 0);
/*
* Now that we are all set up, enable the APIC
file:dc4f486fc39e4e481b6942cb8bf94520311bf340 -> file:d850eeb19243c7e582a289ddc6c8bf4d878092b6
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -332,14 +332,19 @@ void arch_init_copy_chip_data(struct irq
old_cfg = old_desc->chip_data;
- memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+ cfg->vector = old_cfg->vector;
+ cfg->move_in_progress = old_cfg->move_in_progress;
+ cpumask_copy(cfg->domain, old_cfg->domain);
+ cpumask_copy(cfg->old_domain, old_cfg->old_domain);
init_copy_irq_2_pin(old_cfg, cfg, node);
}
-static void free_irq_cfg(struct irq_cfg *old_cfg)
+static void free_irq_cfg(struct irq_cfg *cfg)
{
- kfree(old_cfg);
+ free_cpumask_var(cfg->domain);
+ free_cpumask_var(cfg->old_domain);
+ kfree(cfg);
}
void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -1405,6 +1410,7 @@ int setup_ioapic_entry(int apic_id, int
irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = vector;
irte.dest_id = IRTE_DEST(destination);
+ irte.redir_hint = 1;
/* Set source-id of interrupt request */
set_ioapic_sid(&irte, apic_id);
@@ -1484,7 +1490,7 @@ static struct {
static void __init setup_IO_APIC_irqs(void)
{
- int apic_id = 0, pin, idx, irq;
+ int apic_id, pin, idx, irq;
int notcon = 0;
struct irq_desc *desc;
struct irq_cfg *cfg;
@@ -1492,14 +1498,7 @@ static void __init setup_IO_APIC_irqs(vo
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-#ifdef CONFIG_ACPI
- if (!acpi_disabled && acpi_ioapic) {
- apic_id = mp_find_ioapic(0);
- if (apic_id < 0)
- apic_id = 0;
- }
-#endif
-
+ for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
idx = find_irq_entry(apic_id, pin, mp_INT);
if (idx == -1) {
@@ -1521,6 +1520,9 @@ static void __init setup_IO_APIC_irqs(vo
irq = pin_2_irq(idx, apic_id, pin);
+ if ((apic_id > 0) && (irq > 16))
+ continue;
+
/*
* Skip the timer IRQ if there's a quirk handler
* installed and if it returns 1:
@@ -1740,6 +1742,8 @@ __apicdebuginit(void) print_IO_APIC(void
struct irq_pin_list *entry;
cfg = desc->chip_data;
+ if (!cfg)
+ continue;
entry = cfg->irq_2_pin;
if (!entry)
continue;
@@ -3286,6 +3290,7 @@ static int msi_compose_msg(struct pci_de
irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
+ irte.redir_hint = 1;
/* Set source-id of interrupt request */
set_msi_sid(&irte, pdev);
@@ -3340,7 +3345,7 @@ static int set_msi_irq_affinity(unsigned
cfg = desc->chip_data;
- read_msi_msg_desc(desc, &msg);
+ get_cached_msi_msg_desc(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
@@ -4083,27 +4088,23 @@ int acpi_get_override_irq(int bus_irq, i
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
- int pin, ioapic = 0, irq, irq_entry;
+ int pin, ioapic, irq, irq_entry;
struct irq_desc *desc;
const struct cpumask *mask;
if (skip_ioapic_setup == 1)
return;
-#ifdef CONFIG_ACPI
- if (!acpi_disabled && acpi_ioapic) {
- ioapic = mp_find_ioapic(0);
- if (ioapic < 0)
- ioapic = 0;
- }
-#endif
-
+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
irq_entry = find_irq_entry(ioapic, pin, mp_INT);
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
+ if ((ioapic > 0) && (irq > 16))
+ continue;
+
desc = irq_to_desc(irq);
/*
file:9ee87cfe0859fa042550a32ece8eed5ce2349324 -> file:c7ee9c9e02454fa5ab9bbfc025b8948a187d8847
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -595,9 +595,11 @@ void __init uv_system_init(void)
for (j = 0; j < 64; j++) {
if (!test_bit(j, &present))
continue;
- uv_blade_info[blade].pnode = (i * 64 + j);
+ pnode = (i * 64 + j);
+ uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
+ max_pnode = max(pnode, max_pnode);
blade++;
}
}
@@ -635,10 +637,6 @@ void __init uv_system_init(void)
uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
- max_pnode = max(pnode, max_pnode);
-
- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
- cpu, apicid, pnode, nid, lcpu, blade);
}
/* Add blade/pnode info for nodes without cpus */
@@ -650,7 +648,6 @@ void __init uv_system_init(void)
pnode = (paddr >> m_val) & pnode_mask;
blade = boot_pnode_to_blade(pnode);
uv_node_to_blade[nid] = blade;
- max_pnode = max(pnode, max_pnode);
}
map_gru_high(max_pnode);
file:cc25c2b4a567c2ca3e020127cefe87b2778f02ee -> file:4e34d10b841f35a46e536acc962b177c29747ce4
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -540,7 +540,7 @@ void __cpuinit cpu_detect(struct cpuinfo
}
}
-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;
@@ -579,6 +579,7 @@ static void __cpuinit get_cpu_cap(struct
if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007);
+ init_scattered_cpuid_features(c);
}
static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -727,7 +728,6 @@ static void __cpuinit generic_identify(s
get_model_name(c); /* Default name */
- init_scattered_cpuid_features(c);
detect_nopl(c);
}
file:6de9a908e4008bf6d99e56cb8a9ef909b1f3949c -> file:eb19c0800044b6b2b09e072e4679537f7823715f
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86
*const __x86_cpu_dev_end[];
extern void display_cacheinfo(struct cpuinfo_x86 *c);
+extern void get_cpu_cap(struct cpuinfo_x86 *c);
#endif
file:8b581d3905cb47214af854582e24a379fac444f3 -> file:acb0115f43dc0ed6b228c41d18b06082c8c5ac81
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -741,6 +741,7 @@ static int acpi_cpufreq_cpu_exit(struct
per_cpu(drv_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
+ kfree(data->freq_table);
kfree(data);
}
file:2f12d6dc012e5ab8d6251a2485f6b3afb79643c3 -> file:6a77ccaed1aa8198c7b7e7bb56d31ecd0922120b
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -40,6 +40,7 @@ static void __cpuinit early_init_intel(s
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0);
+ get_cpu_cap(c);
}
}
file:83a3d1f4efca9ff447e0e2b2f902d4c7d4d16248 -> file:8387792a696b025a4b34acb3ea90bd50670a4aff
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -140,6 +140,7 @@ void mce_amd_feature_init(struct cpuinfo
address = (low & MASK_BLKPTR_LO) >> 21;
if (!address)
break;
+
address += MCG_XBLK_ADDR;
} else
++address;
@@ -147,12 +148,8 @@ void mce_amd_feature_init(struct cpuinfo
if (rdmsr_safe(address, &low, &high))
break;
- if (!(high & MASK_VALID_HI)) {
- if (block)
- continue;
- else
- break;
- }
+ if (!(high & MASK_VALID_HI))
+ continue;
if (!(high & MASK_CNTP_HI) ||
(high & MASK_LOCKED_HI))
file:73c86db5acbebacbbb5a58d9603237fd916d80f2 -> file:650c6a5bdae6afc6e23b985687319fa90fd25a47
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -948,7 +948,7 @@ int __init amd_special_default_mtrr(void
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0;
- if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+ if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
file:1cbed97b59cf841d8c41b4466ac040cf1c794b03 -> file:9580152f581356151fd0a35eaac6d57ed1d3864f
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -22,6 +22,7 @@
*/
#include <linux/dmi.h>
+#include <linux/jiffies.h>
#include <asm/div64.h>
#include <asm/vmware.h>
#include <asm/x86_init.h>
@@ -50,7 +51,7 @@ static inline int __vmware_platform(void
static unsigned long vmware_get_tsc_khz(void)
{
- uint64_t tsc_hz;
+ uint64_t tsc_hz, lpj;
uint32_t eax, ebx, ecx, edx;
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
@@ -61,6 +62,13 @@ static unsigned long vmware_get_tsc_khz(
printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
(unsigned long) tsc_hz / 1000,
(unsigned long) tsc_hz % 1000);
+
+ if (!preset_lpj) {
+ lpj = ((u64)tsc_hz * 1000);
+ do_div(lpj, HZ);
+ preset_lpj = lpj;
+ }
+
return tsc_hz;
}
file:045b36cada655370382231cb186d45d5d8820d95 -> file:994828899e098350d12ca73217235af843b0d497
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long p
if (!csize)
return 0;
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long p
} else
memcpy(buf, vaddr + offset, csize);
+ set_iounmap_nonlazy();
iounmap(vaddr);
return csize;
}
file:050c278481b187b6f76528261ea3ce6540c1d82f -> file:34c3308730f878dbe1e871bcd0e2ac17eaf0ba9a
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -324,7 +324,7 @@ ENTRY(startup_32_smp)
/*
* Enable paging
*/
- movl $pa(swapper_pg_dir),%eax
+ movl pa(initial_page_table), %eax
movl %eax,%cr3 /* set the page table pointer.. */
movl %cr0,%eax
orl $X86_CR0_PG,%eax
@@ -604,6 +604,8 @@ ignore_int:
.align 4
ENTRY(initial_code)
.long i386_start_kernel
+ENTRY(initial_page_table)
+ .long pa(swapper_pg_dir)
/*
* BSS section
@@ -619,6 +621,10 @@ ENTRY(swapper_pg_dir)
#endif
swapper_pg_fixmap:
.fill 1024,4,0
+#ifdef CONFIG_X86_TRAMPOLINE
+ENTRY(trampoline_pg_dir)
+ .fill 1024,4,0
+#endif
ENTRY(empty_zero_page)
.fill 4096,1,0
file:19528ef10d311f6bbd31402edfa39a4ae253eaeb -> file:c771e1a37b9de8d7b5f81d79dd6cd33f6ab28e72
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -497,7 +497,7 @@ static int hpet_assign_irq(struct hpet_d
{
unsigned int irq;
- irq = create_irq();
+ irq = create_irq_nr(0, -1);
if (!irq)
return -EINVAL;
file:4006c522adc780a0ce016b43b2d5a7ae93ca0f14 -> file:38faf7211d0b128a62394637005ccf3dd01ebf40
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -115,6 +115,7 @@ int olpc_ec_cmd(unsigned char cmd, unsig
unsigned long flags;
int ret = -EIO;
int i;
+ int restarts = 0;
spin_lock_irqsave(&ec_lock, flags);
@@ -171,7 +172,9 @@ restart:
if (wait_on_obf(0x6c, 1)) {
printk(KERN_ERR "olpc-ec: timeout waiting for"
" EC to provide data!\n");
- goto restart;
+ if (restarts++ < 10)
+ goto restart;
+ goto err;
}
outbuf[i] = inb(0x68);
printk(KERN_DEBUG "olpc-ec: received 0x%x\n",
file:0040164f1a8253c4832ace5e7908c77a73a031fa -> file:12e9feaa2f7aba947b65ca2fe3611be81321a8f1
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -512,6 +512,7 @@ static void __init quirk_amd_nb_node(str
{
struct pci_dev *nb_ht;
unsigned int devfn;
+ u32 node;
u32 val;
devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
@@ -520,7 +521,13 @@ static void __init quirk_amd_nb_node(str
return;
pci_read_config_dword(nb_ht, 0x60, &val);
- set_dev_node(&dev->dev, val & 7);
+ node = val & 7;
+ /*
+ * Some hardware may return an invalid node ID,
+ * so check it first:
+ */
+ if (node_online(node))
+ set_dev_node(&dev->dev, node);
pci_dev_put(nb_ht);
}
file:269c2a3cb43da0e0e41d2a578a6974ddcc7dc706 -> file:200fcde41aa24de89cd5a90021b7b7eb3b18866a
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -633,7 +633,7 @@ void native_machine_shutdown(void)
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
- smp_send_stop();
+ stop_other_cpus();
#endif
lapic_shutdown();
file:d7a08884993893d4010311b0228956d21baa1089 -> file:5449a26982428bbd092868c89ce621fb4babd666
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -110,6 +110,7 @@
#include <asm/numa_64.h>
#endif
#include <asm/mce.h>
+#include <asm/trampoline.h>
/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -998,6 +999,8 @@ void __init setup_arch(char **cmdline_p)
paging_init();
x86_init.paging.pagetable_setup_done(swapper_pg_dir);
+ setup_trampoline_page_table();
+
tboot_probe();
#ifdef CONFIG_X86_64
file:ec1de97600e70638bcfdcb53da52a83bc1288829 -> file:29f0a78ec88743a59f29770d067fc4880ec36656
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -158,10 +158,10 @@ asmlinkage void smp_reboot_interrupt(voi
irq_exit();
}
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
{
unsigned long flags;
- unsigned long wait;
+ unsigned long timeout;
if (reboot_force)
return;
@@ -178,9 +178,12 @@ static void native_smp_send_stop(void)
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
- /* Don't wait longer than a second */
- wait = USEC_PER_SEC;
- while (num_online_cpus() > 1 && wait--)
+ /*
+ * Don't wait longer than a second if the caller
+ * didn't ask us to wait.
+ */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
@@ -226,7 +229,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
- .smp_send_stop = native_smp_send_stop,
+ .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
file:28e963d2bf2ce33ad83500d289a54d6b1d3c1521 -> file:7e8e905e2ccb98c1fe79c11fda27d122420401c5
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -70,7 +70,6 @@
#ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID];
-static int low_mappings;
#endif
/* State of each CPU */
@@ -88,6 +87,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
+
+/*
+ * We need this for trampoline_base protection from concurrent accesses when
+ * off- and onlining cores wildly.
+ */
+static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
+
+void cpu_hotplug_driver_lock()
+{
+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
+}
+
+void cpu_hotplug_driver_unlock()
+{
+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
+}
+
+ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
+ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
#else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -273,6 +291,18 @@ notrace static void __cpuinit start_seco
* fragile that we want to limit the things done here to the
* most necessary things.
*/
+
+#ifdef CONFIG_X86_32
+ /*
+ * Switch away from the trampoline page-table
+ *
+ * Do this before cpu_init() because it needs to access per-cpu
+ * data which may not be mapped in the trampoline page-table.
+ */
+ load_cr3(swapper_pg_dir);
+ __flush_tlb_all();
+#endif
+
vmi_bringup();
cpu_init();
preempt_disable();
@@ -291,12 +321,6 @@ notrace static void __cpuinit start_seco
enable_8259A_irq(0);
}
-#ifdef CONFIG_X86_32
- while (low_mappings)
- cpu_relax();
- __flush_tlb_all();
-#endif
-
/* This must be done before setting cpu_online_mask */
set_cpu_sibling_map(raw_smp_processor_id());
wmb();
@@ -722,6 +746,7 @@ do_rest:
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu);
+ initial_page_table = __pa(&trampoline_pg_dir);
#else
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu);
@@ -866,20 +891,8 @@ int __cpuinit native_cpu_up(unsigned int
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-#ifdef CONFIG_X86_32
- /* init low mem mapping */
- clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
- flush_tlb_all();
- low_mappings = 1;
-
err = do_boot_cpu(apicid, cpu);
- zap_low_mappings(false);
- low_mappings = 0;
-#else
- err = do_boot_cpu(apicid, cpu);
-#endif
if (err) {
pr_debug("do_boot_cpu failed %d\n", err);
return -EIO;
file:cd022121cab611629ec9b04de51677e44bef3567 -> file:0ac23a7bf6f162edd191e53c61fe2b0cf1da3a17
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,6 +1,7 @@
#include <linux/io.h>
#include <asm/trampoline.h>
+#include <asm/pgtable.h>
#include <asm/e820.h>
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -39,3 +40,19 @@ unsigned long __trampinit setup_trampoli
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base);
}
+
+void __init setup_trampoline_page_table(void)
+{
+#ifdef CONFIG_X86_32
+ /* Copy kernel address range */
+ clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ KERNEL_PGD_PTRS);
+
+ /* Initialize low mappings */
+ clone_pgd_range(trampoline_pg_dir,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ min_t(unsigned long, KERNEL_PGD_PTRS,
+ KERNEL_PGD_BOUNDARY));
+#endif
+}
file:597683aa5ba0ba3ef800ab39179669ded4e3aa57 -> file:aaefa71888cbedde0947060496a38b4cbf63103b
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned lo
local_irq_restore(flags);
}
+static unsigned long long cyc2ns_suspend;
+
+void save_sched_clock_state(void)
+{
+ if (!sched_clock_stable)
+ return;
+
+ cyc2ns_suspend = sched_clock();
+}
+
+/*
+ * Even on processors with invariant TSC, TSC gets reset in some the
+ * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
+ * arbitrary value (still sync'd across cpu's) during resume from such sleep
+ * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
+ * that sched_clock() continues from the point where it was left off during
+ * suspend.
+ */
+void restore_sched_clock_state(void)
+{
+ unsigned long long offset;
+ unsigned long flags;
+ int cpu;
+
+ if (!sched_clock_stable)
+ return;
+
+ local_irq_save(flags);
+
+ __get_cpu_var(cyc2ns_offset) = 0;
+ offset = cyc2ns_suspend - sched_clock();
+
+ for_each_possible_cpu(cpu)
+ per_cpu(cyc2ns_offset, cpu) = offset;
+
+ local_irq_restore(flags);
+}
+
#ifdef CONFIG_CPU_FREQ
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
file:8cb4974ff5990c19267077d473caeeb504fe5bae -> file:62f39d79b7754557f0725d5b80b958e5d7c245d8
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -73,7 +73,8 @@ void update_vsyscall_tz(void)
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+ u32 mult)
{
unsigned long flags;
@@ -82,7 +83,7 @@ void update_vsyscall(struct timespec *wa
vsyscall_gtod_data.clock.vread = clock->vread;
vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
vsyscall_gtod_data.clock.mask = clock->mask;
- vsyscall_gtod_data.clock.mult = clock->mult;
+ vsyscall_gtod_data.clock.mult = mult;
vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
file:8faa821d1257522d9fe05f4dce5ab737c21cfbbd -> file:3bc270766f474452e05c78bafdae77bfb2025490
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -318,8 +318,32 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
break;
}
- if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
- continue;
+ if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
+ struct kvm_mmu_page *child;
+ unsigned direct_access;
+
+ if (level != gw->level)
+ continue;
+
+ /*
+ * For the direct sp, if the guest pte's dirty bit
+ * changed form clean to dirty, it will corrupt the
+ * sp's access: allow writable in the read-only sp,
+ * so we should update the spte at this point to get
+ * a new sp with the correct access.
+ */
+ direct_access = gw->pt_access & gw->pte_access;
+ if (!is_dirty_gpte(gw->ptes[gw->level - 1]))
+ direct_access &= ~ACC_WRITE_MASK;
+
+ child = page_header(*sptep & PT64_BASE_ADDR_MASK);
+ if (child->role.access == direct_access)
+ continue;
+
+ mmu_page_remove_parent_pte(child, sptep);
+ __set_spte(sptep, shadow_trap_nonpresent_pte);
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ }
if (is_large_pte(*sptep)) {
rmap_remove(vcpu->kvm, sptep);
@@ -336,6 +360,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
/* advance table_gfn when emulating 1gb pages with 4k */
if (delta == 0)
table_gfn += PT_INDEX(addr, level);
+ access &= gw->pte_access;
} else {
direct = 0;
table_gfn = gw->table_gfn[level - 2];
file:61ba66988aca6fcd0ec2226d51fb704c6c1703bb -> file:253153d2e3e2b18b0c448b71170afd9ea63220eb
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -621,7 +621,6 @@ static void init_vmcb(struct vcpu_svm *s
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
- control->tsc_offset = 0;
control->int_ctl = V_INTR_MASKING_MASK;
init_seg(&save->es);
@@ -754,6 +753,7 @@ static struct kvm_vcpu *svm_create_vcpu(
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
+ svm->vmcb->control.tsc_offset = 0-native_read_tsc();
fx_init(&svm->vcpu);
svm->vcpu.fpu_active = 1;
@@ -795,17 +795,18 @@ static void svm_vcpu_load(struct kvm_vcp
int i;
if (unlikely(cpu != vcpu->cpu)) {
- u64 tsc_this, delta;
+ u64 delta;
- /*
- * Make sure that the guest sees a monotonically
- * increasing TSC.
- */
- rdtscll(tsc_this);
- delta = vcpu->arch.host_tsc - tsc_this;
- svm->vmcb->control.tsc_offset += delta;
- if (is_nested(svm))
- svm->nested.hsave->control.tsc_offset += delta;
+ if (check_tsc_unstable()) {
+ /*
+ * Make sure that the guest sees a monotonically
+ * increasing TSC.
+ */
+ delta = vcpu->arch.host_tsc - native_read_tsc();
+ svm->vmcb->control.tsc_offset += delta;
+ if (is_nested(svm))
+ svm->nested.hsave->control.tsc_offset += delta;
+ }
vcpu->cpu = cpu;
kvm_migrate_timers(vcpu);
svm->asid_generation = 0;
@@ -2111,7 +2112,7 @@ static int cpuid_interception(struct vcp
static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
++svm->vcpu.stat.nmi_window_exits;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
return 1;
}
@@ -2506,7 +2507,7 @@ static void svm_inject_nmi(struct kvm_vc
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
@@ -2697,8 +2698,8 @@ static void svm_vcpu_run(struct kvm_vcpu
sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu);
- fs_selector = kvm_read_fs();
- gs_selector = kvm_read_gs();
+ savesegment(fs, fs_selector);
+ savesegment(gs, gs_selector);
ldt_selector = kvm_read_ldt();
svm->vmcb->save.cr2 = vcpu->arch.cr2;
/* required for live migration with NPT */
@@ -2785,10 +2786,15 @@ static void svm_vcpu_run(struct kvm_vcpu
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
- kvm_load_fs(fs_selector);
- kvm_load_gs(gs_selector);
- kvm_load_ldt(ldt_selector);
load_host_msrs(vcpu);
+ loadsegment(fs, fs_selector);
+#ifdef CONFIG_X86_64
+ load_gs_index(gs_selector);
+ wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+ loadsegment(gs, gs_selector);
+#endif
+ kvm_load_ldt(ldt_selector);
reload_tss(vcpu);
file:6a28d5d95e8fe88f7a5ee7f90ac0b2ae950ec55a -> file:d9c4fb6f0ff37545f95c29c520c3c9dde17c06c5
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -130,6 +130,7 @@ static u64 construct_eptp(unsigned long
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
+static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
@@ -628,7 +629,7 @@ static void vmx_save_host_state(struct k
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
- vmx->host_state.fs_sel = kvm_read_fs();
+ savesegment(fs, vmx->host_state.fs_sel);
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
@@ -636,7 +637,7 @@ static void vmx_save_host_state(struct k
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
- vmx->host_state.gs_sel = kvm_read_gs();
+ savesegment(gs, vmx->host_state.gs_sel);
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
@@ -653,10 +654,7 @@ static void vmx_save_host_state(struct k
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu))
- save_msrs(vmx->host_msrs +
- vmx->msr_offset_kernel_gs_base, 1);
-
+ save_msrs(vmx->host_msrs + vmx->msr_offset_kernel_gs_base, 1);
#endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_transition_efer(vmx);
@@ -664,32 +662,36 @@ static void vmx_save_host_state(struct k
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
- unsigned long flags;
-
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
if (vmx->host_state.fs_reload_needed)
- kvm_load_fs(vmx->host_state.fs_sel);
+ loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu))
+ save_msrs(vmx->guest_msrs + vmx->msr_offset_kernel_gs_base, 1);
+#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
- /*
- * If we have to reload gs, we must take care to
- * preserve our gs base.
- */
- local_irq_save(flags);
- kvm_load_gs(vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+ load_gs_index(vmx->host_state.gs_sel);
+#else
+ loadsegment(gs, vmx->host_state.gs_sel);
#endif
- local_irq_restore(flags);
}
reload_tss();
+#ifdef CONFIG_X86_64
+ save_msrs(vmx->guest_msrs, vmx->msr_offset_kernel_gs_base);
+ save_msrs(vmx->guest_msrs + vmx->msr_offset_kernel_gs_base + 1,
+ vmx->save_nmsrs - vmx->msr_offset_kernel_gs_base - 1);
+#else
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+#endif
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
reload_host_efer(vmx);
+ load_gdt(&__get_cpu_var(host_gdt));
}
static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -1176,6 +1178,8 @@ static void hardware_enable(void *garbag
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
+
+ store_gdt(&__get_cpu_var(host_gdt));
}
static void vmclear_local_vcpus(void)
@@ -2338,8 +2342,8 @@ static int vmx_vcpu_setup(struct vcpu_vm
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
- vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
file:281ac630d246f82d6b70583d31baa846c8128a50 -> file:b2c02a2b00381ca22e47df3180d03980fcf810c7
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1485,7 +1485,7 @@ static void do_cpuid_ent(struct kvm_cpui
const u32 kvm_supported_word6_x86_features =
F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
- F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
+ F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
0 /* SKINIT */ | 0 /* WDT */;
/* all calls to cpuid_count() should be made on the same cpu */
@@ -2102,6 +2102,7 @@ static int kvm_vm_ioctl_get_pit2(struct
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ memset(&ps->reserved, 0, sizeof(ps->reserved));
return r;
}
@@ -2439,6 +2440,7 @@ long kvm_arch_vm_ioctl(struct file *filp
now_ns = timespec_to_ns(&now);
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
user_ns.flags = 0;
+ memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -2782,6 +2784,9 @@ int emulator_get_dr(struct x86_emulate_c
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
+ if (!kvm_x86_ops->get_dr)
+ return X86EMUL_UNHANDLEABLE;
+
switch (dr) {
case 0 ... 3:
*dest = kvm_x86_ops->get_dr(vcpu, dr);
@@ -2797,6 +2802,9 @@ int emulator_set_dr(struct x86_emulate_c
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
int exception;
+ if (!kvm_x86_ops->set_dr)
+ return X86EMUL_UNHANDLEABLE;
+
kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
if (exception) {
/* FIXME: better handling */
file:f4cee9028cf0b01e11951662b625f63371f627e6 -> file:1739358b444d834b1adecdd02681e45216ae0eed
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -801,8 +801,10 @@ do_sigbus(struct pt_regs *regs, unsigned
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER))
+ if (!(error_code & PF_USER)) {
no_context(regs, error_code, address);
+ return;
+ }
/* User-space => ok to do another page fault: */
if (is_prefetch(regs, error_code, address))
file:5a4398a6006bcca5a0bce02e53f06601f399a09e -> file:7d095ad545358b5868a8e4d9331d181f8a9844bc
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -49,6 +49,7 @@
#include <asm/numa.h>
#include <asm/cacheflush.h>
#include <asm/init.h>
+#include <linux/bootmem.h>
static unsigned long dma_reserve __initdata;
@@ -615,6 +616,21 @@ void __init paging_init(void)
*/
#ifdef CONFIG_MEMORY_HOTPLUG
/*
+ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
+ * updating.
+ */
+static void update_end_of_memory_vars(u64 start, u64 size)
+{
+ unsigned long end_pfn = PFN_UP(start + size);
+
+ if (end_pfn > max_pfn) {
+ max_pfn = end_pfn;
+ max_low_pfn = end_pfn;
+ high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
+ }
+}
+
+/*
* Memory is added always to NORMAL zone. This means you will never get
* additional DMA/DMA32 memory.
*/
@@ -633,6 +649,9 @@ int arch_add_memory(int nid, u64 start,
ret = __add_pages(nid, zone, start_pfn, nr_pages);
WARN_ON_ONCE(ret);
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start, size);
+
return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);
file:f1fb411d9e4fa68948c2e5997759e3e47f69a06c -> file:ca6b33667f54c29d6f5f87126c632e9448d5bba2
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -518,8 +518,13 @@ static int __init init_sysfs(void)
int error;
error = sysdev_class_register(&oprofile_sysclass);
- if (!error)
- error = sysdev_register(&device_oprofile);
+ if (error)
+ return error;
+
+ error = sysdev_register(&device_oprofile);
+ if (error)
+ sysdev_class_unregister(&oprofile_sysclass);
+
return error;
}
@@ -530,8 +535,10 @@ static void exit_sysfs(void)
}
#else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
#endif /* CONFIG_PM */
static int __init p4_init(char **cpu_type)
@@ -584,6 +591,18 @@ static int __init ppro_init(char **cpu_t
if (force_arch_perfmon && cpu_has_arch_perfmon)
return 0;
+ /*
+ * Documentation on identifying Intel processors by CPU family
+ * and model can be found in the Intel Software Developer's
+ * Manuals (SDM):
+ *
+ * http://www.intel.com/products/processor/manuals/
+ *
+ * As of May 2010 the documentation for this was in the:
+ * "Intel 64 and IA-32 Architectures Software Developer's
+ * Manual Volume 3B: System Programming Guide", "Table B-1
+ * CPUID Signature Values of DisplayFamily_DisplayModel".
+ */
switch (cpu_model) {
case 0 ... 2:
*cpu_type = "i386/ppro";
@@ -602,15 +621,19 @@ static int __init ppro_init(char **cpu_t
case 14:
*cpu_type = "i386/core";
break;
- case 15: case 23:
+ case 0x0f:
+ case 0x16:
+ case 0x17:
+ case 0x1d:
*cpu_type = "i386/core_2";
break;
+ case 0x1a:
+ case 0x1e:
case 0x2e:
- case 26:
spec = &op_arch_perfmon_spec;
*cpu_type = "i386/core_i7";
break;
- case 28:
+ case 0x1c:
*cpu_type = "i386/atom";
break;
default:
@@ -632,6 +655,8 @@ int __init op_nmi_init(struct oprofile_o
char *cpu_type = NULL;
int ret = 0;
+ using_nmi = 0;
+
if (!cpu_has_apic)
return -ENODEV;
@@ -714,7 +739,10 @@ int __init op_nmi_init(struct oprofile_o
mux_init(ops);
- init_sysfs();
+ ret = init_sysfs();
+ if (ret)
+ return ret;
+
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
file:eeeb52276271e1e3d8759e0d59100ae9eff91405 -> file:fa0f651c573eb596eb4397d3fb95f55d5637b8f9
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -112,6 +112,7 @@ static void __save_processor_state(struc
void save_processor_state(void)
{
__save_processor_state(&saved_context);
+ save_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(save_processor_state);
@@ -253,6 +254,7 @@ static void __restore_processor_state(st
void restore_processor_state(void)
{
__restore_processor_state(&saved_context);
+ restore_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(restore_processor_state);
file:3578688ab670e4298e569021ed8a4495edf74568 -> file:0087b0098903a6c231d5184a78f07981329d6efe
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -924,7 +924,7 @@ static const struct pv_init_ops xen_init
};
static const struct pv_time_ops xen_time_ops __initdata = {
- .sched_clock = xen_sched_clock,
+ .sched_clock = xen_clocksource_read,
};
static const struct pv_cpu_ops xen_cpu_ops __initdata = {
@@ -997,10 +997,6 @@ static void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
-
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
BUG();
}
file:360f8d8c19cd2e933fb010f7f7cfa990fc042823 -> file:ca5f56e9aaa0af9754364bc33d3009aef0a913fc
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -396,9 +396,9 @@ static void stop_self(void *v)
BUG();
}
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
{
- smp_call_function(stop_self, NULL, 0);
+ smp_call_function(stop_self, NULL, wait);
}
static void xen_smp_send_reschedule(int cpu)
@@ -466,7 +466,7 @@ static const struct smp_ops xen_smp_ops
.cpu_disable = xen_cpu_disable,
.play_dead = xen_play_dead,
- .smp_send_stop = xen_smp_send_stop,
+ .stop_other_cpus = xen_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
file:9d1f853120d859cfc814e0f2eaa2e01b3f1575e8 -> file:8e04980d4697279ef461a6412053f48b48b429ce
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -154,45 +154,6 @@ static void do_stolen_accounting(void)
account_idle_ticks(ticks);
}
-/*
- * Xen sched_clock implementation. Returns the number of unstolen
- * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
- * states.
- */
-unsigned long long xen_sched_clock(void)
-{
- struct vcpu_runstate_info state;
- cycle_t now;
- u64 ret;
- s64 offset;
-
- /*
- * Ideally sched_clock should be called on a per-cpu basis
- * anyway, so preempt should already be disabled, but that's
- * not current practice at the moment.
- */
- preempt_disable();
-
- now = xen_clocksource_read();
-
- get_runstate_snapshot(&state);
-
- WARN_ON(state.state != RUNSTATE_running);
-
- offset = now - state.state_entry_time;
- if (offset < 0)
- offset = 0;
-
- ret = state.time[RUNSTATE_blocked] +
- state.time[RUNSTATE_running] +
- offset;
-
- preempt_enable();
-
- return ret;
-}
-
-
/* Get the TSC speed from Xen */
unsigned long xen_tsc_khz(void)
{
file:9083cf0180cc8a296d2529a4ef9d9943ea5d05c1 -> file:30a7e51589305d9504b6689c5f2587d5234ce03f
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_q
unaligned = 1;
break;
}
+ if (!iov[i].iov_len)
+ return -EINVAL;
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
file:9651c0a5abe0d7d371b3c0c15386c2c36874cb1a -> file:7c7b8c1c190f75faadb5318ded7cfa1febc71a8d
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -352,7 +352,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_si
* hardware can operate on without reverting to read-modify-write
* operations.
*/
-void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.physical_block_size = size;
file:0676301f16d0a619bd76261ebb5560040ce1a4e9 -> file:7154a7a7e9ca6240c30ccf4cf5d5213edd3d0c18
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -424,7 +424,7 @@ static int blk_complete_sgv4_hdr_rq(stru
/*
* fill in all the output members
*/
- hdr->device_status = status_byte(rq->errors);
+ hdr->device_status = rq->errors & 0xff;
hdr->transport_status = host_byte(rq->errors);
hdr->driver_status = driver_byte(rq->errors);
hdr->info = 0;
file:e5b10017a50b121a9288d6d0354bfe9ebb15e499 -> file:1d5a7805446b2dfd087df4538cacba86cea4d8ba
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -319,33 +319,47 @@ static int sg_io(struct request_queue *q
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len;
- struct sg_iovec *iov;
+ struct sg_iovec *sg_iov;
+ struct iovec *iov;
+ int i;
- iov = kmalloc(size, GFP_KERNEL);
- if (!iov) {
+ sg_iov = kmalloc(size, GFP_KERNEL);
+ if (!sg_iov) {
ret = -ENOMEM;
goto out;
}
- if (copy_from_user(iov, hdr->dxferp, size)) {
- kfree(iov);
+ if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+ kfree(sg_iov);
ret = -EFAULT;
goto out;
}
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov = (struct iovec *) sg_iov;
+ iov_data_len = 0;
+ for (i = 0; i < hdr->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len) {
+ kfree(sg_iov);
+ ret = -EINVAL;
+ goto out;
+ }
+ iov_data_len += iov[i].iov_len;
+ }
+
/* SG_IO howto says that the shorter of the two wins */
- iov_data_len = iov_length((struct iovec *)iov,
- hdr->iovec_count);
if (hdr->dxfer_len < iov_data_len) {
- hdr->iovec_count = iov_shorten((struct iovec *)iov,
+ hdr->iovec_count = iov_shorten(iov,
hdr->iovec_count,
hdr->dxfer_len);
iov_data_len = hdr->dxfer_len;
}
- ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+ ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
iov_data_len, GFP_KERNEL);
- kfree(iov);
+ kfree(sg_iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
file:6d5b746637be226da5d9aac78af39a9851188464 -> file:2a4106d37946e02f302842671f9b3e55c9b99d8d
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1477,9 +1477,54 @@ static int alg_test_cprng(const struct a
return err;
}
+static int alg_test_null(const struct alg_test_desc *desc,
+ const char *driver, u32 type, u32 mask)
+{
+ return 0;
+}
+
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
+ .alg = "__driver-cbc-aes-aesni",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "__driver-ecb-aes-aesni",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "__ghash-pclmulqdqni",
+ .test = alg_test_null,
+ .suite = {
+ .hash = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.fips_allowed = 1,
@@ -1623,6 +1668,30 @@ static const struct alg_test_desc alg_te
}
}
}, {
+ .alg = "cryptd(__driver-ecb-aes-aesni)",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "cryptd(__ghash-pclmulqdqni)",
+ .test = alg_test_null,
+ .suite = {
+ .hash = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -1668,6 +1737,21 @@ static const struct alg_test_desc alg_te
}
}
}
+ }, {
+ .alg = "ecb(__aes-aesni)",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
}, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
file:81e64f478679be04dafdb1a1786e34e72906c310 -> file:d46e2565ae47244b0f64d5584ea5baef6ea64397
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -846,6 +846,7 @@ struct acpi_bit_register_info {
ACPI_BITMASK_POWER_BUTTON_STATUS | \
ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
ACPI_BITMASK_RT_CLOCK_STATUS | \
+ ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
file:23e5a0519af552d27ddcb9f8c14aa38062b434ea -> file:5624d7bc24af367361bcc05e70cee3d3aa1286d9
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -185,6 +185,12 @@ static int __init dmi_disable_osi_vista(
acpi_osi_setup("!Windows 2006");
return 0;
}
+static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2009");
+ return 0;
+}
static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
{
@@ -211,6 +217,38 @@ static struct dmi_system_id acpi_osi_dmi
DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba Satellite L355",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba Satellite L355",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win7,
+ .ident = "ASUS K50IJ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba P305D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
file:ec742a4e563505704dbc3e92137aecf324d04aaa -> file:71024740d62b178c784fd46c37b9ffca8c76f1a0
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -134,12 +134,6 @@ static int set_no_mwait(const struct dmi
static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
{
- set_no_mwait, "IFL91 board", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
- DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
- {
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
file:8ba0ed0b9ddbc912684d398e2eda14357920aed4 -> file:40d395efec1e9dafab9db46b499956b64f314d42
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -356,7 +356,11 @@ static int acpi_processor_get_performanc
if (result)
goto update_bios;
- return 0;
+ /* We need to call _PPC once when cpufreq starts */
+ if (ignore_ppc != 1)
+ result = acpi_processor_get_platform_limit(pr);
+
+ return result;
/*
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
file:cb05205da586192689b0dcce1ed3ea7aa04035d4 -> file:e3d9816b2a223cf465125bd89aef6c52c1b5fa9b
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -3037,6 +3037,16 @@ static int ahci_init_one(struct pci_dev
if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
return -ENODEV;
+ /*
+ * For some reason, MCP89 on MacBook 7,1 doesn't work with
+ * ahci, use ata_generic instead.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0xcb89)
+ return -ENODEV;
+
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
file:ecfd22b4f1ce9743afb8d9c52c308a6cdfa556a4 -> file:99e719619e7d4f504150cfa95823154e884faad6
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -32,6 +32,11 @@
* A generic parallel ATA driver using libata
*/
+enum {
+ ATA_GEN_CLASS_MATCH = (1 << 0),
+ ATA_GEN_FORCE_DMA = (1 << 1),
+};
+
/**
* generic_set_mode - mode setting
* @link: link to set up
@@ -46,13 +51,17 @@
static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_port *ap = link->ap;
+ const struct pci_device_id *id = ap->host->private_data;
int dma_enabled = 0;
struct ata_device *dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- /* Bits 5 and 6 indicate if DMA is active on master/slave */
- if (ap->ioaddr.bmdma_addr)
+ if (id->driver_data & ATA_GEN_FORCE_DMA) {
+ dma_enabled = 0xff;
+ } else if (ap->ioaddr.bmdma_addr) {
+ /* Bits 5 and 6 indicate if DMA is active on master/slave */
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ }
if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
dma_enabled = 0xFF;
@@ -126,7 +135,7 @@ static int ata_generic_init_one(struct p
const struct ata_port_info *ppi[] = { &info, NULL };
/* Don't use the generic entry unless instructed to do so */
- if (id->driver_data == 1 && all_generic_ide == 0)
+ if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
return -ENODEV;
/* Devices that need care */
@@ -155,7 +164,7 @@ static int ata_generic_init_one(struct p
return rc;
pcim_pin_device(dev);
}
- return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
+ return ata_pci_sff_init_one(dev, ppi, &generic_sht, (void *)id);
}
static struct pci_device_id ata_generic[] = {
@@ -167,12 +176,21 @@ static struct pci_device_id ata_generic[
{ PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
{ PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
- { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
+ { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
+ .driver_data = ATA_GEN_FORCE_DMA },
+ /*
+ * For some reason, MCP89 on MacBook 7,1 doesn't work with
+ * ahci, use ata_generic instead.
+ */
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
+ PCI_VENDOR_ID_APPLE, 0xcb89,
+ .driver_data = ATA_GEN_FORCE_DMA },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
/* Must come last. If you add entries adjust this table appropriately */
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
+ .driver_data = ATA_GEN_CLASS_MATCH },
{ 0, },
};
file:4f94e2275038d497b2c8acfb8931f516f849d826 -> file:c33591d3a5999a4e624578132b53eea00d820a98
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -157,6 +157,7 @@ struct piix_map_db {
struct piix_host_priv {
const int *map;
u32 saved_iocfg;
+ spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */
void __iomem *sidpr;
};
@@ -948,12 +949,15 @@ static int piix_sidpr_scr_read(struct at
unsigned int reg, u32 *val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
+ unsigned long flags;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
+ spin_lock_irqsave(&hpriv->sidpr_lock, flags);
piix_sidpr_sel(link, reg);
*val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
+ spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
return 0;
}
@@ -961,12 +965,15 @@ static int piix_sidpr_scr_write(struct a
unsigned int reg, u32 val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
+ unsigned long flags;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
+ spin_lock_irqsave(&hpriv->sidpr_lock, flags);
piix_sidpr_sel(link, reg);
iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
+ spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
return 0;
}
@@ -1555,6 +1562,7 @@ static int __devinit piix_init_one(struc
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
+ spin_lock_init(&hpriv->sidpr_lock);
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach. This is necessary
file:6a96da69c482abef92f6c0cc7ce0d2113706bd30 -> file:0963cd6a1425aa479231d028e6003bd30b3d86e2
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5504,6 +5504,7 @@ static int ata_host_request_pm(struct at
*/
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
+ unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
/*
@@ -5512,7 +5513,18 @@ int ata_host_suspend(struct ata_host *ho
*/
ata_lpm_enable(host);
- rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+ /*
+ * On some hardware, device fails to respond after spun down
+ * for suspend. As the device won't be used before being
+ * resumed, we don't need to touch the device. Ask EH to skip
+ * the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+ if (mesg.event == PM_EVENT_SUSPEND)
+ ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+ rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
if (rc == 0)
host->dev->power.power_state = mesg;
return rc;
file:e30b9e74794b2966571b0fb660544bd329304c65 -> file:fa9bed06b397b3c845e75a3d1820df8e524a4a27
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3149,6 +3149,10 @@ static int ata_eh_skip_recovery(struct a
if (link->flags & ATA_LFLAG_DISABLED)
return 1;
+ /* skip if explicitly requested */
+ if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+ return 1;
+
/* thaw frozen port and recover failed devices */
if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
return 0;
file:b4ee28dec5218b19db9682e729458e4d6b30d2bb -> file:a158a6c925d07149fee6ca10f08e89ac65b0ed52
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2497,8 +2497,11 @@ static void atapi_qc_complete(struct ata
*
* If door lock fails, always clear sdev->locked to
* avoid this infinite loop.
+ *
+ * This may happen before SCSI scan is complete. Make
+ * sure qc->dev->sdev isn't NULL before dereferencing.
*/
- if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
+ if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
@@ -2825,7 +2828,7 @@ static unsigned int ata_scsi_pass_thru(s
* write indication (used for PIO/DMA setup), result TF is
* copied back and we don't whine too much about its failure.
*/
- tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE)
tf->flags |= ATA_TFLAG_WRITE;
file:2f3c9bed63d99925a02d20b2e93e780340508877 -> file:29111205185a155727b7f4a3a8f996cb928df8b0
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -2,7 +2,7 @@
* pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@lxorguk.ukuu.org.uk>
- * (C) 2007,2009 Bartlomiej Zolnierkiewicz
+ * (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/pdc202xx_old.c
*
@@ -35,6 +35,15 @@ static int pdc2026x_cable_detect(struct
return ATA_CBL_PATA80;
}
+static void pdc202xx_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ iowrite8(tf->command, ap->ioaddr.command_addr);
+ ndelay(400);
+}
+
/**
* pdc202xx_configure_piomode - set chip PIO timing
* @ap: ATA interface
@@ -271,6 +280,8 @@ static struct ata_port_operations pdc202
.cable_detect = ata_cable_40wire,
.set_piomode = pdc202xx_set_piomode,
.set_dmamode = pdc202xx_set_dmamode,
+
+ .sff_exec_command = pdc202xx_exec_command,
};
static struct ata_port_operations pdc2026x_port_ops = {
@@ -284,6 +295,8 @@ static struct ata_port_operations pdc202
.dev_config = pdc2026x_dev_config,
.port_start = pdc2026x_port_start,
+
+ .sff_exec_command = pdc202xx_exec_command,
};
static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
file:6f5093b7c8c597ad63078d98763a222adf0d0a09 -> file:cf41126ff426d70c68f42e406e09db1cf5241021
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1879,19 +1879,25 @@ static void mv_bmdma_start(struct ata_qu
* LOCKING:
* Inherited from caller.
*/
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
- cmd &= ~ATA_DMA_START;
- writelfl(cmd, port_mmio + BMDMA_CMD);
+ if (cmd & ATA_DMA_START) {
+ cmd &= ~ATA_DMA_START;
+ writelfl(cmd, port_mmio + BMDMA_CMD);
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+ }
+}
+
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ mv_bmdma_stop_ap(qc->ap);
}
/**
@@ -1915,8 +1921,21 @@ static u8 mv_bmdma_status(struct ata_por
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
- else
+ else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+ else {
+ /*
+ * Just because DMA_ACTIVE is 0 (DMA completed),
+ * this does _not_ mean the device is "done".
+ * So we should not yet be signalling ATA_DMA_INTR
+ * in some cases. Eg. DSM/TRIM, and perhaps others.
+ */
+ mv_bmdma_stop_ap(ap);
+ if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+ status = 0;
+ else
+ status = ATA_DMA_INTR;
+ }
return status;
}
@@ -1976,6 +1995,9 @@ static void mv_qc_prep(struct ata_queued
switch (tf->protocol) {
case ATA_PROT_DMA:
+ if (tf->command == ATA_CMD_DSM)
+ return;
+ /* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2075,6 +2097,8 @@ static void mv_qc_prep_iie(struct ata_qu
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
+ if (tf->command == ATA_CMD_DSM)
+ return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2270,6 +2294,12 @@ static unsigned int mv_qc_issue(struct a
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
+ if (qc->tf.command == ATA_CMD_DSM) {
+ if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
+ return AC_ERR_OTHER;
+ break; /* use bmdma for this */
+ }
+ /* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
file:c5f5186d62a3160404a50b5819c440239b311870 -> file:a73f10278171ed4a8c3a6adb96a62f15d30dc4b5
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -774,7 +774,8 @@ static struct atm_vcc *find_vcc(struct a
sk_for_each(s, node, head) {
vcc = atm_sk(s);
if (vcc->dev == dev && vcc->vci == vci &&
- vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE)
+ vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
+ test_bit(ATM_VF_READY, &vcc->flags))
goto out;
}
vcc = NULL;
@@ -900,6 +901,10 @@ static void pclose(struct atm_vcc *vcc)
clear_bit(ATM_VF_ADDR, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
+ /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
+ tasklet has finished processing any incoming packets (and, more to
+ the point, using the vcc pointer). */
+ tasklet_unlock_wait(&card->tlet);
return;
}
file:bd112c8c7bcde9b3efdbceadc6fc89d65f6c9ea0 -> file:1c21a3f238689012e8cbcaca2d8a537472b2aef3
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -238,6 +238,8 @@ static int do_lo_send_aops(struct loop_d
if (ret)
goto fail;
+ file_update_time(file);
+
transfer_result = lo_do_transfer(lo, WRITE, page, offset,
bvec->bv_page, bv_offs, size, IV);
copied = size;
file:1be76318f516aadf4108b92221d732dfcaaca1fe -> file:0e9c5646c5981504e8278bf47b3d65c7e4e78903
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -59,6 +59,9 @@ static struct usb_device_id btusb_table[
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ /* Apple iMac11,1 */
+ { USB_DEVICE(0x05ac, 0x8215) },
+
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
file:4895f0e053229bc8c04b568912340ffdbc02a076 -> file:e3d4eda93ed95f915e186a2caaa7e48bf1524e6f
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -258,9 +258,16 @@ static int hci_uart_tty_open(struct tty_
BT_DBG("tty %p", tty);
+ /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
+ the pointer */
if (hu)
return -EEXIST;
+ /* Error if the tty has no write op instead of leaving an exploitable
+ hole */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+
if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
file:6c3837a0184dd8ca0f56ca0c12f90243a0914dd8 -> file:95fdd4d3a6b1a310e1b987d5d2196626b148e37b
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -415,14 +415,6 @@ static struct pci_device_id agp_sis_pci_
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_SI,
- .device = PCI_DEVICE_ID_SI_760,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
{ }
};
file:70a770ac013875a3e4b5200b954a21197623e2e7 -> file:006466d2a830a34850f6be195cf2c454266b2cf8
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -476,6 +476,21 @@ static int hpet_ioctl_ieon(struct hpet_d
if (irq) {
unsigned long irq_flags;
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ /*
+ * To prevent the interrupt handler from seeing an
+ * unwanted interrupt status bit, program the timer
+ * so that it will not fire in the near future ...
+ */
+ writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+ &timer->hpet_config);
+ write_counter(read_counter(&hpet->hpet_mc),
+ &timer->hpet_compare);
+ /* ... and clear any left-over status. */
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ
? IRQF_SHARED : IRQF_DISABLED;
@@ -970,6 +985,8 @@ static int hpet_acpi_add(struct acpi_dev
return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) {
+ if (data.hd_address)
+ iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV;
}
file:cd30853bd230f778926cab735a7d85c49979082c -> file:ef6fdb0357e91fbb053ccfea5c643af980de0402
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -837,10 +837,11 @@ static const struct file_operations zero
/*
* capabilities for /dev/zero
* - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
*/
static struct backing_dev_info zero_bdi = {
.name = "char/mem",
- .capabilities = BDI_CAP_MAP_COPY,
+ .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct file_operations full_fops = {
file:88cee4099be940c294127d0a172996d90c597912 -> file:71f0d72fc2f3af5e3e998b404e2a46d671be4251
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -265,10 +265,16 @@ static ssize_t nvram_write(struct file *
unsigned char contents[NVRAM_BYTES];
unsigned i = *ppos;
unsigned char *tmp;
- int len;
- len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count;
- if (copy_from_user(contents, buf, len))
+ if (i >= NVRAM_BYTES)
+ return 0; /* Past EOF */
+
+ if (count > NVRAM_BYTES - i)
+ count = NVRAM_BYTES - i;
+ if (count > NVRAM_BYTES)
+ return -EFAULT; /* Can't happen, but prove it to gcc */
+
+ if (copy_from_user(contents, buf, count))
return -EFAULT;
spin_lock_irq(&rtc_lock);
@@ -276,7 +282,7 @@ static ssize_t nvram_write(struct file *
if (!__nvram_check_checksum())
goto checksum_err;
- for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
+ for (tmp = contents; count--; ++i, ++tmp)
__nvram_write_byte(*tmp, i);
__nvram_set_checksum();
file:caf6e4d194696204fa41632e220369a2ca2876a7 -> file:a08c8994c89d9975257f3658d3376b54cd222334
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4164,6 +4164,8 @@ static int hdlcdev_ioctl(struct net_devi
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
+ memset(&new_line, 0, size);
+
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
file:73b60a19fabcfcd08942f51449de5e0329ca4fae -> file:a510a4de9ca0d2b7c8d5a6fe4ff1ba046ed6488e
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -412,7 +412,8 @@ static void flush_to_ldisc(struct work_s
spin_lock_irqsave(&tty->buf.lock, flags);
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
- struct tty_buffer *head;
+ struct tty_buffer *head, *tail = tty->buf.tail;
+ int seen_tail = 0;
while ((head = tty->buf.head) != NULL) {
int count;
char *char_buf;
@@ -422,6 +423,15 @@ static void flush_to_ldisc(struct work_s
if (!count) {
if (head->next == NULL)
break;
+ /*
+ There's a possibility tty might get new buffer
+ added during the unlock window below. We could
+ end up spinning in here forever hogging the CPU
+ completely. To avoid this let's have a rest each
+ time we processed the tail buffer.
+ */
+ if (tail == head)
+ seen_tail = 1;
tty->buf.head = head->next;
tty_buffer_free(tty, head);
continue;
@@ -431,7 +441,7 @@ static void flush_to_ldisc(struct work_s
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
- if (!tty->receive_room) {
+ if (!tty->receive_room || seen_tail) {
schedule_delayed_work(&tty->buf.work, 1);
break;
}
file:c6fbc27069a460cd949add7b0f40ab749eae1dcb -> file:8b9f1a5c8be87e63ae41794317915d4744eb368a
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -45,6 +45,7 @@
static DEFINE_SPINLOCK(tty_ldisc_lock);
static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
/* Line disc dispatch table */
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
@@ -81,6 +82,7 @@ static void put_ldisc(struct tty_ldisc *
return;
}
local_irq_restore(flags);
+ wake_up(&tty_ldisc_idle);
}
/**
@@ -442,9 +444,14 @@ static void tty_set_termios_ldisc(struct
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
{
+ int ret;
+
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
- if (ld->ops->open)
- return ld->ops->open(tty);
+ if (ld->ops->open) {
+ ret = ld->ops->open(tty);
+ if (ret)
+ clear_bit(TTY_LDISC_OPEN, &tty->flags);
+ }
return 0;
}
@@ -522,6 +529,23 @@ static int tty_ldisc_halt(struct tty_str
}
/**
+ * tty_ldisc_wait_idle - wait for the ldisc to become idle
+ * @tty: tty to wait for
+ *
+ * Wait for the line discipline to become idle. The discipline must
+ * have been halted for this to guarantee it remains idle.
+ */
+static int tty_ldisc_wait_idle(struct tty_struct *tty)
+{
+ int ret;
+ ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+ atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+ if (ret < 0)
+ return ret;
+ return ret > 0 ? 0 : -EBUSY;
+}
+
+/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
* @ldisc: the line discipline
@@ -616,7 +640,16 @@ int tty_set_ldisc(struct tty_struct *tty
flush_scheduled_work();
+ retval = tty_ldisc_wait_idle(tty);
+
mutex_lock(&tty->ldisc_mutex);
+
+ /* handle wait idle failure locked */
+ if (retval) {
+ tty_ldisc_put(new_ldisc);
+ goto enable;
+ }
+
if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
@@ -649,6 +682,7 @@ int tty_set_ldisc(struct tty_struct *tty
tty_ldisc_put(o_ldisc);
+enable:
/*
* Allow ldisc referencing to occur again
*/
@@ -693,9 +727,12 @@ static void tty_reset_termios(struct tty
* state closed
*/
-static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
{
- struct tty_ldisc *ld;
+ struct tty_ldisc *ld = tty_ldisc_get(ldisc);
+
+ if (IS_ERR(ld))
+ return -1;
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
@@ -703,10 +740,10 @@ static void tty_ldisc_reinit(struct tty_
/*
* Switch the line discipline back
*/
- ld = tty_ldisc_get(ldisc);
- BUG_ON(IS_ERR(ld));
tty_ldisc_assign(tty, ld);
tty_set_termios_ldisc(tty, ldisc);
+
+ return 0;
}
/**
@@ -768,13 +805,16 @@ void tty_ldisc_hangup(struct tty_struct
a FIXME */
if (tty->ldisc) { /* Not yet closed */
if (reset == 0) {
- tty_ldisc_reinit(tty, tty->termios->c_line);
- err = tty_ldisc_open(tty, tty->ldisc);
+
+ if (!tty_ldisc_reinit(tty, tty->termios->c_line))
+ err = tty_ldisc_open(tty, tty->ldisc);
+ else
+ err = 1;
}
/* If the re-open fails or we reset then go to N_TTY. The
N_TTY open cannot fail */
if (reset || err) {
- tty_ldisc_reinit(tty, N_TTY);
+ BUG_ON(tty_ldisc_reinit(tty, N_TTY));
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
}
tty_ldisc_enable(tty);
file:6aa10284104aeb6e4bc3a22833ea347e67c5d976 -> file:6351a2625dfe3783d7eac259a4d5e89548a34594
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -503,6 +503,7 @@ int vt_ioctl(struct tty_struct *tty, str
struct kbd_struct * kbd;
unsigned int console;
unsigned char ucval;
+ unsigned int uival;
void __user *up = (void __user *)arg;
int i, perm;
int ret = 0;
@@ -657,7 +658,7 @@ int vt_ioctl(struct tty_struct *tty, str
break;
case KDGETMODE:
- ucval = vc->vc_mode;
+ uival = vc->vc_mode;
goto setint;
case KDMAPDISP:
@@ -695,7 +696,7 @@ int vt_ioctl(struct tty_struct *tty, str
break;
case KDGKBMODE:
- ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
+ uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
(kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
(kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
K_XLATE);
@@ -717,9 +718,9 @@ int vt_ioctl(struct tty_struct *tty, str
break;
case KDGKBMETA:
- ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+ uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
setint:
- ret = put_user(ucval, (int __user *)arg);
+ ret = put_user(uival, (int __user *)arg);
break;
case KDGETKEYCODE:
@@ -949,7 +950,7 @@ int vt_ioctl(struct tty_struct *tty, str
for (i = 0; i < MAX_NR_CONSOLES; ++i)
if (! VT_IS_IN_USE(i))
break;
- ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
+ uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
goto setint;
/*
file:961f5b5ef6a3584c558c45dbe20a05074fe325b1 -> file:c0732466fb8749a5a5f81d67e3e5245b48eb3227
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -199,16 +199,8 @@ static cycle_t sh_tmu_clocksource_read(s
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
- int ret;
- ret = sh_tmu_enable(p);
- if (ret)
- return ret;
-
- /* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 10;
- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
- return 0;
+ return sh_tmu_enable(p);
}
static void sh_tmu_clocksource_disable(struct clocksource *cs)
@@ -228,6 +220,16 @@ static int sh_tmu_register_clocksource(s
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* clk_get_rate() needs an enabled clock */
+ clk_enable(p->clk);
+ /* channel will be configured at parent clock / 4 */
+ p->rate = clk_get_rate(p->clk) / 4;
+ clk_disable(p->clk);
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+
pr_info("sh_tmu: %s used as clock source\n", cs->name);
clocksource_register(cs);
return 0;
file:84c51e17726966c196ac53ce05671e972f65b98a -> file:71e64824e8f73c1f88262d04d970d169f1c64a68
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -285,7 +285,7 @@ static inline u8 *padlock_xcrypt_cbc(con
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count));
+ : "d" (control_word), "b" (key), "c" (initial));
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
file:466ab10c1ff10de1d001178fde9fdce203c410e3 -> file:f2b44d51d826cec394890d1578606c67e2fa7d11
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -161,7 +161,7 @@ static int mv_is_err_intr(u32 intr_cause
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
- u32 val = (1 << (1 + (chan->idx * 16)));
+ u32 val = ~(1 << (chan->idx * 16));
dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
file:01bc8e232456a665675b183946ef0f056cb96548 -> file:2aa339e94968e5294cb22e2a570719d82639f18a
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -156,7 +156,7 @@ static int amd64_set_scrub_rate(struct m
default:
amd64_printk(KERN_ERR, "Unsupported family!\n");
- break;
+ return -EINVAL;
}
return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
min_scrubrate);
@@ -1491,7 +1491,7 @@ static inline u64 f10_get_base_addr_offs
u64 chan_off;
if (hi_range_sel) {
- if (!(dct_sel_base_addr & 0xFFFFF800) &&
+ if (!(dct_sel_base_addr & 0xFFFF0000) &&
hole_valid && (sys_addr >= 0x100000000ULL))
chan_off = hole_off << 16;
else
file:5089331544ed5336e682fdec039be94068314102 -> file:4560d8ffa171718b498480b66d8384be9c7deb8e
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1299,24 +1299,24 @@ static int dispatch_ioctl(struct client
int ret;
if (_IOC_TYPE(cmd) != '#' ||
- _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+ _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
+ _IOC_SIZE(cmd) > sizeof(buffer))
return -EINVAL;
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+ if (_IOC_DIR(cmd) == _IOC_READ)
+ memset(&buffer, 0, _IOC_SIZE(cmd));
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
- }
ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
if (ret < 0)
return ret;
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ if (copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
return -EFAULT;
- }
return ret;
}
file:9d0dfcbe2c1c82561bf1f026bac88f08324b3037 -> file:17e2b1740202a3921edde10eeeec757cdaedd5e5
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -463,6 +463,7 @@ static int read_bus_info_block(struct fw
return -ENOMEM;
stack = &rom[READ_BIB_ROM_SIZE];
+ memset(rom, 0, sizeof(*rom) * READ_BIB_ROM_SIZE);
device->max_speed = SCODE_100;
file:720b39b0b4edb0da6b3501d311c9165e628ccca5 -> file:8e7a100a4b6d8c2174016abe8787b55f81008fc8
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -628,7 +628,7 @@ static void ar_context_tasklet(unsigned
d = &ab->descriptor;
if (d->res_count == 0) {
- size_t size, rest, offset;
+ size_t size, size2, rest, pktsize, size3, offset;
dma_addr_t start_bus;
void *start;
@@ -639,25 +639,61 @@ static void ar_context_tasklet(unsigned
*/
offset = offsetof(struct ar_buffer, data);
- start = buffer = ab;
+ start = ab;
start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+ buffer = ab->data;
ab = ab->next;
d = &ab->descriptor;
- size = buffer + PAGE_SIZE - ctx->pointer;
+ size = start + PAGE_SIZE - ctx->pointer;
+ /* valid buffer data in the next page */
rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+ /* what actually fits in this page */
+ size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
memmove(buffer, ctx->pointer, size);
- memcpy(buffer + size, ab->data, rest);
- ctx->current_buffer = ab;
- ctx->pointer = (void *) ab->data + rest;
- end = buffer + size + rest;
-
- while (buffer < end)
- buffer = handle_ar_packet(ctx, buffer);
-
- dma_free_coherent(ohci->card.device, PAGE_SIZE,
- start, start_bus);
- ar_context_add_page(ctx);
+ memcpy(buffer + size, ab->data, size2);
+
+ while (size > 0) {
+ void *next = handle_ar_packet(ctx, buffer);
+ pktsize = next - buffer;
+ if (pktsize >= size) {
+ /*
+ * We have handled all the data that was
+ * originally in this page, so we can now
+ * continue in the next page.
+ */
+ buffer = next;
+ break;
+ }
+ /* move the next packet to the start of the buffer */
+ memmove(buffer, next, size + size2 - pktsize);
+ size -= pktsize;
+ /* fill up this page again */
+ size3 = min(rest - size2,
+ (size_t)PAGE_SIZE - offset - size - size2);
+ memcpy(buffer + size + size2,
+ (void *) ab->data + size2, size3);
+ size2 += size3;
+ }
+
+ if (rest > 0) {
+ /* handle the packets that are fully in the next page */
+ buffer = (void *) ab->data +
+ (buffer - (start + offset + size));
+ end = (void *) ab->data + rest;
+
+ while (buffer < end)
+ buffer = handle_ar_packet(ctx, buffer);
+
+ ctx->current_buffer = ab;
+ ctx->pointer = end;
+
+ dma_free_coherent(ohci->card.device, PAGE_SIZE,
+ start, start_bus);
+ ar_context_add_page(ctx);
+ } else {
+ ctx->pointer = start + PAGE_SIZE;
+ }
} else {
buffer = ctx->pointer;
ctx->pointer = end =
file:08173fcdda4d511bb7bdfad6ac512d845f7d832d -> file:1b8745dfd457cb09552a3d8265871c0dda23a65b
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -925,13 +925,13 @@ int drm_crtc_helper_set_config(struct dr
mode_changed = true;
if (mode_changed) {
- old_fb = set->crtc->fb;
- set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
file:a75ca63deea65c09d5b5b0d46265daf9482157fe -> file:0e27d98b2b9fe02482383337530771b116e6115d
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -470,7 +470,9 @@ int drm_ioctl(struct inode *inode, struc
retcode = -EFAULT;
goto err_i1;
}
- }
+ } else
+ memset(kdata, 0, _IOC_SIZE(cmd));
+
retcode = func(dev, kdata, file_priv);
if (cmd & IOC_OUT) {
file:259ec21a93ea5f8bd00d99fa5159668053b334ea -> file:1097dece323cafadbf6c3532aa42cbe9b36a27b5
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -565,8 +565,8 @@ struct drm_display_mode *drm_mode_std(st
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
- mode->vsync_start = mode->vsync_start - 1;
- mode->vsync_end = mode->vsync_end - 1;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
return mode;
}
mode = NULL;
file:9ecc907384ec339d312ca31c5ea26a2a0636aedf -> file:16dce8479e7fad5d0406776f1a828065fff58004
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -214,7 +214,7 @@ static enum drm_connector_status tfp410_
uint8_t ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
- if (ctl2 & TFP410_CTL_2_HTPLG)
+ if (ctl2 & TFP410_CTL_2_RSEN)
ret = connector_status_connected;
else
ret = connector_status_disconnected;
file:eaa1893b6e9b977c3ae8a63c618fb9a1dd7ebd5b -> file:c3aca5c4de996405d5149a0070b8a8a9a860caca
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -683,8 +683,10 @@ static int i915_batchbuffer(struct drm_d
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -725,8 +727,10 @@ static int i915_cmdbuffer(struct drm_dev
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_batch_free;
+ }
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -737,8 +741,10 @@ static int i915_cmdbuffer(struct drm_dev
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_clip_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -1526,6 +1532,15 @@ int i915_driver_unload(struct drm_device
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->child_dev && dev_priv->child_dev_num) {
+ kfree(dev_priv->child_dev);
+ dev_priv->child_dev = NULL;
+ dev_priv->child_dev_num = 0;
+ }
drm_irq_uninstall(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
file:aafbef70de506b9bc9f177c57a81c3a316e44629 -> file:97163f7f2bad0fe33316c1bce3d397719b140a55
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -555,6 +555,8 @@ typedef struct drm_i915_private {
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
+ int child_dev_num;
+ struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
} drm_i915_private_t;
file:952c844713d50f6220cab92cc73d252d8bbea4e4 -> file:3ada62b8bd38190fc91b3e69bb6e2bcf3ae3a5c0
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2262,8 +2262,9 @@ i915_gem_object_get_pages(struct drm_gem
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
page = read_cache_page_gfp(mapping, i,
- mapping_gfp_mask (mapping) |
+ GFP_HIGHUSER |
__GFP_COLD |
+ __GFP_RECLAIMABLE |
gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -3666,6 +3667,7 @@ i915_gem_execbuffer(struct drm_device *d
if (ret != 0) {
DRM_ERROR("copy %d cliprects failed: %d\n",
args->num_cliprects, ret);
+ ret = -EFAULT;
goto pre_mutex_err;
}
}
file:97169ea7432317d6eada3f6373da6c97ff7ab002 -> file:d4b5b18f2f115d56d9cc58fcebc99f8c150e64c7
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -362,6 +362,70 @@ parse_driver_features(struct drm_i915_pr
dev_priv->render_reclock_avail = true;
}
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+ return;
+ }
+ dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -413,6 +477,7 @@ intel_init_bios(struct drm_device *dev)
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
file:0f8e5f69ac7a315eb8eb8c4b9f377a87cc8e5f6c -> file:425ac9d7f724792cce3e36f90122f3e644aceca3
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
file:88d5e3ad48adfdef9bfa8a6ed0b9a5068e7d9e48 -> file:79cc437af3b8f7879f7e60ed4cc842b524d62973
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1402,6 +1402,7 @@ static void igdng_enable_pll_edp (struct
dpa_ctl = I915_READ(DP_A);
dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(200);
}
@@ -1953,6 +1954,9 @@ static void intel_crtc_dpms(struct drm_c
int pipe = intel_crtc->pipe;
bool enabled;
+ if (intel_crtc->dpms_mode == mode)
+ return;
+
dev_priv->display.dpms(crtc, mode);
intel_crtc->dpms_mode = mode;
@@ -3999,7 +4003,7 @@ static void intel_crtc_init(struct drm_d
}
intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ intel_crtc->dpms_mode = -1;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
file:bf0576298948311c48a0e7e37b1bbd256f022158 -> file:d5b736104fd8b8286ea9290bf6241a4424aa6205
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -901,64 +901,45 @@ static const struct dmi_system_id intel_
{ } /* terminating entry */
};
-#ifdef CONFIG_ACPI
/*
- * check_lid_device -- check whether @handle is an ACPI LID device.
- * @handle: ACPI device handle
- * @level : depth in the ACPI namespace tree
- * @context: the number of LID device when we find the device
- * @rv: a return value to fill if desired (Not use)
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ * Note: The addin_offset should also be checked for LVDS panel.
+ * Only when it is non-zero, it is assumed that it is present.
*/
-static acpi_status
-check_lid_device(acpi_handle handle, u32 level, void *context,
- void **return_value)
+static int lvds_is_present_in_vbt(struct drm_device *dev)
{
- struct acpi_device *acpi_dev;
- int *lid_present = context;
-
- acpi_dev = NULL;
- /* Get the acpi device for device handle */
- if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
- /* If there is no ACPI device for handle, return */
- return AE_OK;
- }
-
- if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
- *lid_present = 1;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
- return AE_OK;
-}
+ if (!dev_priv->child_dev_num)
+ return 1;
-/**
- * check whether there exists the ACPI LID device by enumerating the ACPI
- * device tree.
- */
-static int intel_lid_present(void)
-{
- int lid_present = 0;
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not LFP, continue.
+ * If the device type is 0x22, it is also regarded as LFP.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+ p_child->device_type != DEVICE_TYPE_LFP)
+ continue;
- if (acpi_disabled) {
- /* If ACPI is disabled, there is no ACPI device tree to
- * check, so assume the LID device would have been present.
+ /* The addin_offset should be checked. Only when it is
+ * non-zero, it is regarded as present.
*/
- return 1;
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
}
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- check_lid_device, &lid_present, NULL);
-
- return lid_present;
-}
-#else
-static int intel_lid_present(void)
-{
- /* In the absence of ACPI built in, assume that the LID device would
- * have been present.
- */
- return 1;
+ return ret;
}
-#endif
/**
* intel_lvds_init - setup LVDS connectors on this device
@@ -983,15 +964,10 @@ void intel_lvds_init(struct drm_device *
if (dmi_check_system(intel_no_lvds))
return;
- /* Assume that any device without an ACPI LID device also doesn't
- * have an integrated LVDS. We would be better off parsing the BIOS
- * to get a reliable indicator, but that code isn't written yet.
- *
- * In the case of all-in-one desktops using LVDS that we've seen,
- * they're using SDVO LVDS.
- */
- if (!intel_lid_present())
+ if (!lvds_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
+ }
if (IS_IGDNG(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
file:22ce4d6015e8f7700f50d443d64739287ec7426b -> file:7547ec6418bb5edd53608fdabd155848c302eec6
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -261,7 +261,7 @@ static uint8_t radeon_compute_pll_gain(u
if (!ref_div)
return 1;
- vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
/*
* This is horribly crude: the VCO frequency range is divided into
file:c70927ecda2179ea0b616f0818330d605e54350e -> file:8cb88e7e0302f11405ba776865fb720b31f9e2df
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -330,6 +330,7 @@ static int ttm_buffer_object_transfer(st
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
fbo->vm_node = NULL;
+ atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
if (fbo->mem.mm_node)
file:cdd136942bcaa9e1435b1d2efc5444bf6f3317fc -> file:66579c0bf32821c7c418e2114fa524f9d896965f
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -105,11 +105,15 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
- /* FIXME: What stops hidraw_table going NULL */
- struct hid_device *dev = hidraw_table[minor]->hid;
+ struct hid_device *dev;
__u8 *buf;
int ret = 0;
+ if (!hidraw_table[minor])
+ return -ENODEV;
+
+ dev = hidraw_table[minor]->hid;
+
if (!dev->hid_output_raw_report)
return -ENODEV;
@@ -237,11 +241,16 @@ static long hidraw_ioctl(struct file *fi
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
long ret = 0;
- /* FIXME: What stops hidraw_table going NULL */
- struct hidraw *dev = hidraw_table[minor];
+ struct hidraw *dev;
void __user *user_arg = (void __user*) arg;
lock_kernel();
+ dev = hidraw_table[minor];
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
switch (cmd) {
case HIDIOCGRDESCSIZE:
if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -314,6 +323,7 @@ static long hidraw_ioctl(struct file *fi
ret = -ENOTTY;
}
+out:
unlock_kernel();
return ret;
}
file:5d901f698838a7f19bd4b829eda2e4d832fd6173 -> file:d4f7d8509bc9cec316bbbcb527d47bd0e69cb266
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -976,16 +976,6 @@ static int usbhid_start(struct hid_devic
}
}
- init_waitqueue_head(&usbhid->wait);
- INIT_WORK(&usbhid->reset_work, hid_reset);
- INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
-
- spin_lock_init(&usbhid->lock);
-
- usbhid->intf = intf;
- usbhid->ifnum = interface->desc.bInterfaceNumber;
-
usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
if (!usbhid->urbctrl) {
ret = -ENOMEM;
@@ -1156,6 +1146,14 @@ static int usbhid_probe(struct usb_inter
hid->driver_data = usbhid;
usbhid->hid = hid;
+ usbhid->intf = intf;
+ usbhid->ifnum = interface->desc.bInterfaceNumber;
+
+ init_waitqueue_head(&usbhid->wait);
+ INIT_WORK(&usbhid->reset_work, hid_reset);
+ INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
+ setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+ spin_lock_init(&usbhid->lock);
ret = hid_add_device(hid);
if (ret) {
file:e2107e533ede3371aeff3e4213bac1e06c624dfb -> file:afebc3439881cbec36aa917538ea9fe1dde4bc52
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@ I2C_CLIENT_INSMOD_2(f75373, f75375);
#define F75375_REG_PWM2_DROP_DUTY 0x6C
#define FAN_CTRL_LINEAR(nr) (4 + nr)
-#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
/*
* Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode = ~(3 << FAN_CTRL_MODE(nr));
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
switch (val) {
case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct devic
mutex_lock(&data->update_lock);
conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf = ~(1 << FAN_CTRL_LINEAR(nr));
+ conf &= ~(1 << FAN_CTRL_LINEAR(nr));
if (val == 0)
conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
file:f808d188afa3bfdbe4ae394ee72e8a6d14c5570c -> file:4f84d1a76d52ae10d76b7e2af85e16359df2f98d
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -143,6 +143,37 @@ static struct pci_device_id k8temp_ids[]
MODULE_DEVICE_TABLE(pci, k8temp_ids);
+static int __devinit is_rev_g_desktop(u8 model)
+{
+ u32 brandidx;
+
+ if (model < 0x69)
+ return 0;
+
+ if (model == 0xc1 || model == 0x6c || model == 0x7c)
+ return 0;
+
+ /*
+ * Differentiate between AM2 and ASB1.
+ * See "Constructing the processor Name String" in "Revision
+ * Guide for AMD NPT Family 0Fh Processors" (33610).
+ */
+ brandidx = cpuid_ebx(0x80000001);
+ brandidx = (brandidx >> 9) & 0x1f;
+
+ /* Single core */
+ if ((model == 0x6f || model == 0x7f) &&
+ (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
+ return 0;
+
+ /* Dual core */
+ if (model == 0x6b &&
+ (brandidx == 0xb || brandidx == 0xc))
+ return 0;
+
+ return 1;
+}
+
static int __devinit k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct
"wrong - check erratum #141\n");
}
- if ((model >= 0x69) &&
- !(model == 0xc1 || model == 0x6c || model == 0x7c ||
- model == 0x6b || model == 0x6f || model == 0x7f)) {
+ if (is_rev_g_desktop(model)) {
/*
* RevG desktop CPUs (i.e. no socket S1G1 or
* ASB1 parts) need additional offset,
file:6c53d987de1088dc5f4a199b6a0c44236b220ac4 -> file:b0d03640af4b55ab14a610b90788c7b0dfafb693
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1286,6 +1286,7 @@ static int lm85_probe(struct i2c_client
switch (data->type) {
case adm1027:
case adt7463:
+ case adt7468:
case emc6d100:
case emc6d102:
data->freq_map = adm1027_freq_map;
file:4a64b85d4ec955d383932fe25d5edb726981a650 -> file:68e69a49633cc2ac5cbbf9572b829c8bc6aadd76
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_upda
static int __init pc87360_device_add(unsigned short address)
{
- struct resource res = {
- .name = "pc87360",
- .flags = IORESOURCE_IO,
- };
- int err, i;
+ struct resource res[3];
+ int err, i, res_count;
pdev = platform_device_alloc("pc87360", address);
if (!pdev) {
@@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(uns
goto exit;
}
+ memset(res, 0, 3 * sizeof(struct resource));
+ res_count = 0;
for (i = 0; i < 3; i++) {
if (!extra_isa[i])
continue;
- res.start = extra_isa[i];
- res.end = extra_isa[i] + PC87360_EXTENT - 1;
+ res[res_count].start = extra_isa[i];
+ res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
+ res[res_count].name = "pc87360",
+ res[res_count].flags = IORESOURCE_IO,
- err = acpi_check_resource_conflict(&res);
+ err = acpi_check_resource_conflict(&res[res_count]);
if (err)
goto exit_device_put;
- err = platform_device_add_resources(pdev, &res, 1);
- if (err) {
- printk(KERN_ERR "pc87360: Device resource[%d] "
- "addition failed (%d)\n", i, err);
- goto exit_device_put;
- }
+ res_count++;
+ }
+
+ err = platform_device_add_resources(pdev, res, res_count);
+ if (err) {
+ printk(KERN_ERR "pc87360: Device resources addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
}
err = platform_device_add(pdev);
file:f7346a9bd95f21134fcdfd20508bc3317f75bdbf -> file:62a5ce527aad3ee5b0e738fb853a415321d55d94
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, in
static int pca_isa_waitforcompletion(void *pd)
{
- long ret = ~0;
unsigned long timeout;
+ long ret;
if (irq > -1) {
ret = wait_event_timeout(pca_wait,
@@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(voi
} else {
/* Do polling */
timeout = jiffies + pca_isa_ops.timeout;
- while (((pca_isa_readbyte(pd, I2C_PCA_CON)
- & I2C_PCA_CON_SI) == 0)
- && (ret = time_before(jiffies, timeout)))
+ do {
+ ret = time_before(jiffies, timeout);
+ if (pca_isa_readbyte(pd, I2C_PCA_CON)
+ & I2C_PCA_CON_SI)
+ break;
udelay(100);
+ } while (ret);
}
+
return ret > 0;
}
file:5b2213df5ed039dfb008c0cdf3aa6d2019543630 -> file:fd295dd949861ecd6c024e836cc405ba3d7625f7
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void
static int i2c_pca_pf_waitforcompletion(void *pd)
{
struct i2c_pca_pf_data *i2c = pd;
- long ret = ~0;
unsigned long timeout;
+ long ret;
if (i2c->irq) {
ret = wait_event_timeout(i2c->wait,
@@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(
} else {
/* Do polling */
timeout = jiffies + i2c->adap.timeout;
- while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
- & I2C_PCA_CON_SI) == 0)
- && (ret = time_before(jiffies, timeout)))
+ do {
+ ret = time_before(jiffies, timeout);
+ if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
+ & I2C_PCA_CON_SI)
+ break;
udelay(100);
+ } while (ret);
}
return ret > 0;
@@ -221,7 +224,7 @@ static int __devinit i2c_pca_pf_probe(st
if (irq) {
ret = request_irq(irq, i2c_pca_pf_handler,
- IRQF_TRIGGER_FALLING, i2c->adap.name, i2c);
+ IRQF_TRIGGER_FALLING, pdev->name, i2c);
if (ret)
goto e_reqirq;
}
file:64207df8da82fe5627664777518617bd0facc990 -> file:2de76cc08f61d335a816d752c0bf3d9f955ee1bc
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -506,15 +506,22 @@ int ide_cd_queue_pc(ide_drive_t *drive,
return (flags & REQ_FAILED) ? -EIO : 0;
}
-static void ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
+/*
+ * returns true if rq has been completed
+ */
+static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
{
unsigned int nr_bytes = cmd->nbytes - cmd->nleft;
if (cmd->tf_flags & IDE_TFLAG_WRITE)
nr_bytes -= cmd->last_xfer_len;
- if (nr_bytes > 0)
+ if (nr_bytes > 0) {
ide_complete_rq(drive, 0, nr_bytes);
+ return true;
+ }
+
+ return false;
}
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
@@ -679,7 +686,8 @@ out_end:
}
if (uptodate == 0 && rq->bio)
- ide_cd_error_cmd(drive, cmd);
+ if (ide_cd_error_cmd(drive, cmd))
+ return ide_stopped;
/* make sure it's fully ended */
if (blk_fs_request(rq) == 0) {
file:66b41351910ad390d5ec70dbb18c5ab99a196f45 -> file:675fc042bc6003d2bcf291bddc63f579e53a0f4c
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -486,7 +486,8 @@ static int send_connect(struct iwch_ep *
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
skb->priority = CPL_PRIORITY_SETUP;
set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -1303,7 +1304,8 @@ static void accept_cr(struct iwch_ep *ep
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
rpl = cplhdr(skb);
rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
file:9758103b62847b4e557c6a5555745fcff77ad2e4 -> file:a60d4703d2bad5035c1c1cb37b0349ae954cc2ac
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/smp_lock.h>
+#include "input-compat.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION("Input core");
@@ -760,6 +761,40 @@ static int input_attach_handler(struct i
return error;
}
+#ifdef CONFIG_COMPAT
+
+static int input_bits_to_string(char *buf, int buf_size,
+ unsigned long bits, bool skip_empty)
+{
+ int len = 0;
+
+ if (INPUT_COMPAT_TEST) {
+ u32 dword = bits >> 32;
+ if (dword || !skip_empty)
+ len += snprintf(buf, buf_size, "%x ", dword);
+
+ dword = bits & 0xffffffffUL;
+ if (dword || !skip_empty || len)
+ len += snprintf(buf + len, max(buf_size - len, 0),
+ "%x", dword);
+ } else {
+ if (bits || !skip_empty)
+ len += snprintf(buf, buf_size, "%lx", bits);
+ }
+
+ return len;
+}
+
+#else /* !CONFIG_COMPAT */
+
+static int input_bits_to_string(char *buf, int buf_size,
+ unsigned long bits, bool skip_empty)
+{
+ return bits || !skip_empty ?
+ snprintf(buf, buf_size, "%lx", bits) : 0;
+}
+
+#endif
#ifdef CONFIG_PROC_FS
@@ -828,14 +863,25 @@ static void input_seq_print_bitmap(struc
unsigned long *bitmap, int max)
{
int i;
-
- for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
+ bool skip_empty = true;
+ char buf[18];
seq_printf(seq, "B: %s=", name);
- for (; i >= 0; i--)
- seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : "");
+
+ for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
+ if (input_bits_to_string(buf, sizeof(buf),
+ bitmap[i], skip_empty)) {
+ skip_empty = false;
+ seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
+ }
+ }
+
+ /*
+ * If no output was produced print a single 0.
+ */
+ if (skip_empty)
+ seq_puts(seq, "0");
+
seq_putc(seq, '\n');
}
@@ -1124,14 +1170,23 @@ static int input_print_bitmap(char *buf,
{
int i;
int len = 0;
+ bool skip_empty = true;
- for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
+ for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
+ len += input_bits_to_string(buf + len, max(buf_size - len, 0),
+ bitmap[i], skip_empty);
+ if (len) {
+ skip_empty = false;
+ if (i > 0)
+ len += snprintf(buf + len, max(buf_size - len, 0), " ");
+ }
+ }
- for (; i >= 0; i--)
- len += snprintf(buf + len, max(buf_size - len, 0),
- "%lx%s", bitmap[i], i > 0 ? " " : "");
+ /*
+ * If no output was produced print a single 0.
+ */
+ if (len == 0)
+ len = snprintf(buf, buf_size, "%d", 0);
if (add_cr)
len += snprintf(buf + len, max(buf_size - len, 0), "\n");
@@ -1146,7 +1201,8 @@ static ssize_t input_dev_show_cap_##bm(s
{ \
struct input_dev *input_dev = to_input_dev(dev); \
int len = input_print_bitmap(buf, PAGE_SIZE, \
- input_dev->bm##bit, ev##_MAX, 1); \
+ input_dev->bm##bit, ev##_MAX, \
+ true); \
return min_t(int, len, PAGE_SIZE); \
} \
static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
@@ -1210,7 +1266,7 @@ static int input_add_uevent_bm_var(struc
len = input_print_bitmap(&env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen,
- bitmap, max, 0);
+ bitmap, max, false);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
file:b1bd6dd322864d0bd04c2f9a48c9f80b3217dcce -> file:93c60e0f2b8e6a2432f06aec1ea81da0fdd6af70
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -481,6 +481,9 @@ static int joydev_handle_JSIOCSAXMAP(str
memcpy(joydev->abspam, abspam, len);
+ for (i = 0; i < joydev->nabs; i++)
+ joydev->absmap[joydev->abspam[i]] = i;
+
out:
kfree(abspam);
return retval;
file:ba09e4dd84928d37223e5b1ae66bc13a4a60a9c5 -> file:21ef4b59a8183d24abb7916b84420b38d8d4562d
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -329,6 +329,13 @@ static const struct dmi_system_id __init
},
},
{
+ /* Sony Vaio VPCZ122GX */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCZ122GX"),
+ },
+ },
+ {
/* Sony Vaio FS-115b */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
file:1df02d25aca5e4843cc6b02988f93dc0c05f2803 -> file:16f5ab2ee034be2a24b087e175ba4ea4e1d6bb17
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1412,8 +1412,8 @@ static int __init i8042_init(void)
static void __exit i8042_exit(void)
{
- platform_driver_unregister(&i8042_driver);
platform_device_unregister(i8042_platform_device);
+ platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
panic_blink = NULL;
file:1081091bbfaf74e95285c02bc8bf85ab075e5018 -> file:2655e3aab8952603fc0e4a50f3e8879525d01b52
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data)
pr_debug("%s: SCIOGETSPID: ioctl received\n",
sc_adapter[card]->devicename);
- spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
+ spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
if (!spid) {
kfree(rcvmsg);
return -ENOMEM;
@@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data)
kfree(rcvmsg);
return status;
}
- strcpy(spid, rcvmsg->msg_data.byte_array);
+ strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE);
/*
* Package the switch type and send to user space
@@ -272,12 +272,12 @@ int sc_ioctl(int card, scs_ioctl *data)
return status;
}
- dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL);
+ dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL);
if (!dn) {
kfree(rcvmsg);
return -ENOMEM;
}
- strcpy(dn, rcvmsg->msg_data.byte_array);
+ strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE);
kfree(rcvmsg);
/*
@@ -348,7 +348,7 @@ int sc_ioctl(int card, scs_ioctl *data)
pr_debug("%s: SCIOSTAT: ioctl received\n",
sc_adapter[card]->devicename);
- bi = kmalloc (sizeof(boardInfo), GFP_KERNEL);
+ bi = kzalloc(sizeof(boardInfo), GFP_KERNEL);
if (!bi) {
kfree(rcvmsg);
return -ENOMEM;
file:a5e5f2fbf96351baca4fb443a472f1f58692d379 -> file:47831536deb13a8a687d631f405b734eb639a88d
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1317,7 +1317,8 @@ void bitmap_endwrite(struct bitmap *bitm
{
if (!bitmap) return;
if (behind) {
- atomic_dec(&bitmap->behind_writes);
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
}
@@ -1629,6 +1630,7 @@ int bitmap_create(mddev_t *mddev)
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
init_waitqueue_head(&bitmap->overflow_wait);
+ init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
file:7e38d13ddcacce54b47206f191af739ce84d436c -> file:86950bc527afd7c696f5fc2b89543b46cca33193
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -254,6 +254,9 @@ struct bitmap {
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+#ifndef __GENKSYMS__
+ wait_queue_head_t behind_wait;
+#endif
};
/* the bitmap API */
file:d19854c98184de70a6ba7bb5432543eecd212a04 -> file:818b617ab3b28b1fcb687266f23a886ff4651e74
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -249,40 +249,46 @@ static void __hash_remove(struct hash_ce
static void dm_hash_remove_all(int keep_open_devices)
{
- int i, dev_skipped, dev_removed;
+ int i, dev_skipped;
struct hash_cell *hc;
- struct list_head *tmp, *n;
+ struct mapped_device *md;
+
+retry:
+ dev_skipped = 0;
down_write(&_hash_lock);
-retry:
- dev_skipped = dev_removed = 0;
for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_safe (tmp, n, _name_buckets + i) {
- hc = list_entry(tmp, struct hash_cell, name_list);
+ list_for_each_entry(hc, _name_buckets + i, name_list) {
+ md = hc->md;
+ dm_get(md);
- if (keep_open_devices &&
- dm_lock_for_deletion(hc->md)) {
+ if (keep_open_devices && dm_lock_for_deletion(md)) {
+ dm_put(md);
dev_skipped++;
continue;
}
+
__hash_remove(hc);
- dev_removed = 1;
- }
- }
- /*
- * Some mapped devices may be using other mapped devices, so if any
- * still exist, repeat until we make no further progress.
- */
- if (dev_skipped) {
- if (dev_removed)
- goto retry;
+ up_write(&_hash_lock);
- DMWARN("remove_all left %d open device(s)", dev_skipped);
+ dm_put(md);
+
+ /*
+ * Some mapped devices may be using other mapped
+ * devices, so repeat until we make no further
+ * progress. If a new mapped device is created
+ * here it will also get removed.
+ */
+ goto retry;
+ }
}
up_write(&_hash_lock);
+
+ if (dev_skipped)
+ DMWARN("remove_all left %d open device(s)", dev_skipped);
}
static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
file:32d0b878ecccb5ac3b7878d68adc677ebc6aa880 -> file:f336c695908217610a0d4a66e0ae2e27a68e85a0
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -691,6 +691,7 @@ static struct priority_group *parse_prio
if (as->argc < nr_params) {
ti->error = "not enough path parameters";
+ r = -EINVAL;
goto bad;
}
file:01470794d9c31dc2368b3304560580904756b02c -> file:0352746824596940713760f57bff737e10aae0b8
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1122,7 +1122,7 @@ super_90_rdev_size_change(mdk_rdev_t *rd
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
- return num_sectors / 2; /* kB for sysfs */
+ return num_sectors;
}
@@ -1485,7 +1485,7 @@ super_1_rdev_size_change(mdk_rdev_t *rde
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
- return num_sectors / 2; /* kB for sysfs */
+ return num_sectors;
}
static struct super_type super_types[] = {
file:5ccad2822b115361c7c875430fb747755e4658ce -> file:968cb14b63c0dc82cc5a40dd9b6a451581c14241
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -845,6 +845,15 @@ static int make_request(struct request_q
}
mirror = conf->mirrors + rdisk;
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /* Reading from a write-mostly device must
+ * take care not to over-take any writes
+ * that are 'behind'
+ */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
r1_bio->read_disk = rdisk;
read_bio = bio_clone(bio, GFP_NOIO);
@@ -922,9 +931,13 @@ static int make_request(struct request_q
set_bit(R1BIO_Degraded, &r1_bio->state);
}
- /* do behind I/O ? */
+ /* do behind I/O ?
+ * Not if there are too many, or cannot allocate memory,
+ * or a reader on WriteMostly is waiting for behind writes
+ * to flush */
if (bitmap &&
atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
+ !waitqueue_active(&bitmap->behind_wait) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1175,6 +1188,7 @@ static int raid1_remove_disk(mddev_t *md
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
+ !mddev->recovery_disabled &&
mddev->degraded < conf->raid_disks) {
err = -EBUSY;
goto abort;
@@ -2105,15 +2119,13 @@ static int stop(mddev_t *mddev)
{
conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap;
- int behind_wait = 0;
/* wait for behind writes to complete */
- while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- behind_wait++;
- printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ); /* wait a second */
+ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+ printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop.\n", mdname(mddev));
/* need to kick something here to make sure I/O goes? */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
}
raise_barrier(conf);
file:6c2a35b42d42279b894358874bfd08a4558695f0 -> file:1b4e232bce3c77d75461592a7228a2f31aff8721
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -824,11 +824,29 @@ static int make_request(struct request_q
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+
+ /* Each of these 'make_request' calls will call 'wait_barrier'.
+ * If the first succeeds but the second blocks due to the resync
+ * thread raising the barrier, we will deadlock because the
+ * IO to the underlying device will be queued in generic_make_request
+ * and will never complete, so will never reduce nr_pending.
+ * So increment nr_waiting here so no new raise_barriers will
+ * succeed, and so the second wait_barrier cannot block.
+ */
+ spin_lock_irq(&conf->resync_lock);
+ conf->nr_waiting++;
+ spin_unlock_irq(&conf->resync_lock);
+
if (make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
if (make_request(q, &bp->bio2))
generic_make_request(&bp->bio2);
+ spin_lock_irq(&conf->resync_lock);
+ conf->nr_waiting--;
+ wake_up(&conf->wait_barrier);
+ spin_unlock_irq(&conf->resync_lock);
+
bio_pair_release(bp);
return 0;
bad_map:
file:319c459459e0c5a0fb0f3323d0780b59a8ec65c6 -> file:dd30b9dad4a6ece711f09ff5cb8cd6464f681852
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -225,14 +225,16 @@ void cx231xx_pre_card_setup(struct cx231
dev->board.name, dev->model);
/* set the direction for GPIO pins */
- cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
- cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
- cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
+ if (dev->board.tuner_gpio) {
+ cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
+ cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
+ cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
- /* request some modules if any required */
+ /* request some modules if any required */
- /* reset the Tuner */
- cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+ /* reset the Tuner */
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+ }
/* set the mode to Analog mode initially */
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
file:fa6bb85cb4b06ebf7626f5dd6557b9d345107bee -> file:6b61bb67e9d1179c855b925099e9fc7d8720a604
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -457,6 +457,8 @@ static int ivtvfb_ioctl(struct fb_info *
struct fb_vblank vblank;
u32 trace;
+ memset(&vblank, 0, sizeof(struct fb_vblank));
+
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
trace = read_reg(0x028c0) >> 16;
file:f87757fccc72114bdafe8ad4454f8e9a3b49b20e -> file:09d42236f7486f89cba15d64067958fe9b975986
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -420,19 +420,6 @@ int saa7134_set_dmabits(struct saa7134_d
ctrl |= SAA7134_MAIN_CTRL_TE5;
irq |= SAA7134_IRQ1_INTE_RA2_1 |
SAA7134_IRQ1_INTE_RA2_0;
-
- /* dma: setup channel 5 (= TS) */
-
- saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
- saa_writeb(SAA7134_TS_DMA1,
- ((dev->ts.nr_packets - 1) >> 8) & 0xff);
- /* TSNOPIT=0, TSCOLAP=0 */
- saa_writeb(SAA7134_TS_DMA2,
- (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
- saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
- saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
- SAA7134_RS_CONTROL_ME |
- (dev->ts.pt_ts.dma >> 12));
}
/* set task conditions + field handling */
file:03488ba4c99c07f66a15e989414b700054ae1281 -> file:b9817d74943fa014b9f4861c58e002dec19dcd43
--- a/drivers/media/video/saa7134/saa7134-ts.c
+++ b/drivers/media/video/saa7134/saa7134-ts.c
@@ -250,6 +250,19 @@ int saa7134_ts_start(struct saa7134_dev
BUG_ON(dev->ts_started);
+ /* dma: setup channel 5 (= TS) */
+ saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
+ saa_writeb(SAA7134_TS_DMA1,
+ ((dev->ts.nr_packets - 1) >> 8) & 0xff);
+ /* TSNOPIT=0, TSCOLAP=0 */
+ saa_writeb(SAA7134_TS_DMA2,
+ (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
+ saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
+ saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
+ SAA7134_RS_CONTROL_ME |
+ (dev->ts.pt_ts.dma >> 12));
+
+ /* reset hardware TS buffers */
saa_writeb(SAA7134_TS_SERIAL1, 0x00);
saa_writeb(SAA7134_TS_SERIAL1, 0x03);
saa_writeb(SAA7134_TS_SERIAL1, 0x00);
file:450ac705c05735fda966fd0c8ed068104b2142fb -> file:eb2ce2630a771bea0a9f11c0c9570f4c69f1a3c9
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -436,7 +436,8 @@ static int uvc_parse_format(struct uvc_d
/* Parse the frame descriptors. Only uncompressed, MJPEG and frame
* based formats have frame descriptors.
*/
- while (buflen > 2 && buffer[2] == ftype) {
+ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
+ buffer[2] == ftype) {
frame = &format->frame[format->nframes];
if (ftype != UVC_VS_FRAME_FRAME_BASED)
n = buflen > 25 ? buffer[25] : 0;
@@ -513,12 +514,14 @@ static int uvc_parse_format(struct uvc_d
buffer += buffer[0];
}
- if (buflen > 2 && buffer[2] == UVC_VS_STILL_IMAGE_FRAME) {
+ if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
+ buffer[2] == UVC_VS_STILL_IMAGE_FRAME) {
buflen -= buffer[0];
buffer += buffer[0];
}
- if (buflen > 2 && buffer[2] == UVC_VS_COLORFORMAT) {
+ if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
+ buffer[2] == UVC_VS_COLORFORMAT) {
if (buflen < 6) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d COLORFORMAT error\n",
@@ -759,6 +762,11 @@ static int uvc_parse_streaming(struct uv
buffer += buffer[0];
}
+ if (buflen)
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface "
+ "%d has %u bytes of trailing descriptor garbage.\n",
+ dev->udev->devnum, alts->desc.bInterfaceNumber, buflen);
+
/* Parse the alternate settings to find the maximum bandwidth. */
for (i = 0; i < intf->num_altsetting; ++i) {
struct usb_host_endpoint *ep;
file:997975d5e024b06934e0a4d1d39bba4c091be85b -> file:64076ffec0e000bda9aa20027f3502305dd04d01
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -193,17 +193,24 @@ static int put_video_window32(struct vid
struct video_code32 {
char loadwhat[16]; /* name or tag of file being passed */
compat_int_t datasize;
- unsigned char *data;
+ compat_uptr_t data;
};
-static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
+static struct video_code __user *get_microcode32(struct video_code32 *kp)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
- copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
- get_user(kp->datasize, &up->datasize) ||
- copy_from_user(kp->data, up->data, up->datasize))
- return -EFAULT;
- return 0;
+ struct video_code __user *up;
+
+ up = compat_alloc_user_space(sizeof(*up));
+
+ /*
+ * NOTE! We don't actually care if these fail. If the
+ * user address is invalid, the native ioctl will do
+ * the error handling for us
+ */
+ (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
+ (void) put_user(kp->datasize, &up->datasize);
+ (void) put_user(compat_ptr(kp->data), &up->data);
+ return up;
}
#define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32)
@@ -741,7 +748,7 @@ static long do_video_ioctl(struct file *
struct video_tuner vt;
struct video_buffer vb;
struct video_window vw;
- struct video_code vc;
+ struct video_code32 vc;
struct video_audio va;
#endif
struct v4l2_format v2f;
@@ -820,8 +827,11 @@ static long do_video_ioctl(struct file *
break;
case VIDIOCSMICROCODE:
- err = get_microcode32(&karg.vc, up);
- compatible_arg = 0;
+ /* Copy the 32-bit "video_code32" to kernel space */
+ if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
+ return -EFAULT;
+ /* Convert the 32-bit version to a 64-bit version in user space */
+ up = get_microcode32(&karg.vc);
break;
case VIDIOCSFREQ:
file:bd83fa0a4970fb13a5ddef788d1c68a5a7b21300 -> file:46bd7e2a952ceec89217323ac8f03ad032aa9803
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1330,13 +1330,14 @@ static void mspro_block_remove(struct me
struct mspro_block_data *msb = memstick_get_drvdata(card);
unsigned long flags;
- del_gendisk(msb->disk);
- dev_dbg(&card->dev, "mspro block remove\n");
spin_lock_irqsave(&msb->q_lock, flags);
msb->eject = 1;
blk_start_queue(msb->queue);
spin_unlock_irqrestore(&msb->q_lock, flags);
+ del_gendisk(msb->disk);
+ dev_dbg(&card->dev, "mspro block remove\n");
+
blk_cleanup_queue(msb->queue);
msb->queue = NULL;
file:f6227340a5d0791324b2e26495e11e512bca5b5c -> file:56d98eb20a255c673f4491d64bf89e65c27d382e
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2439,6 +2439,8 @@ mptscsih_slave_configure(struct scsi_dev
ioc->name,sdev->tagged_supported, sdev->simple_tags,
sdev->ordered_tags));
+ blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
+
return 0;
}
file:1eac626e710a38dddab8673b1cc5f751a1df7366 -> file:68e4cd7d321ac7715a5bf47420ce1bc0c4bc8639
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -284,8 +284,11 @@ enclosure_component_register(struct encl
cdev->groups = enclosure_groups;
err = device_register(cdev);
- if (err)
- ERR_PTR(err);
+ if (err) {
+ ecomp->number = -1;
+ put_device(cdev);
+ return ERR_PTR(err);
+ }
return ecomp;
}
file:65877bc5edaae9e67dd391b665de91670556fd20 -> file:55748d6a62658b26926184d4ec55d3b9ec611dbc
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -433,18 +433,23 @@ xpc_discovery(void)
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
- max_regions = 64;
region_size = xp_region_size;
- switch (region_size) {
- case 128:
- max_regions *= 2;
- case 64:
- max_regions *= 2;
- case 32:
- max_regions *= 2;
- region_size = 16;
- DBUG_ON(!is_shub2());
+ if (is_uv())
+ max_regions = 256;
+ else {
+ max_regions = 64;
+
+ switch (region_size) {
+ case 128:
+ max_regions *= 2;
+ case 64:
+ max_regions *= 2;
+ case 32:
+ max_regions *= 2;
+ region_size = 16;
+ DBUG_ON(!is_shub2());
+ }
}
for (region = 0; region < max_regions; region++) {
file:c76677afda1b50591b855759fa711253d99458ee -> file:8e08d71df1048b252fa2274b7925795ae839dc05
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -203,6 +203,7 @@ xpc_create_gru_mq_uv(unsigned int mq_siz
enum xp_retval xp_ret;
int ret;
int nid;
+ int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
@@ -258,9 +259,11 @@ xpc_create_gru_mq_uv(unsigned int mq_siz
goto out_5;
}
+ nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
+
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
- nid, mmr_value->vector, mmr_value->dest);
+ nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
@@ -409,6 +412,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
+ int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
@@ -473,6 +477,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
@@ -489,6 +496,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
@@ -503,6 +513,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
@@ -520,6 +533,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
@@ -537,6 +553,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -613,6 +632,7 @@ xpc_handle_activate_IRQ_uv(int irq, void
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+ part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
@@ -945,11 +965,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_he
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
+
+ head->n_entries--;
+ BUG_ON(head->n_entries < 0);
+
+ first->next = NULL;
}
- head->n_entries--;
- BUG_ON(head->n_entries < 0);
spin_unlock_irqrestore(&head->lock, irq_flags);
- first->next = NULL;
return first;
}
@@ -1018,7 +1040,8 @@ xpc_make_first_contact_uv(struct xpc_par
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
- while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
+ while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
+ (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
@@ -1421,7 +1444,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_p
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
- BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
@@ -1645,8 +1667,6 @@ xpc_received_payload_uv(struct xpc_chann
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
-
- msg->hdr.msg_slot_number += ch->remote_nentries;
}
static struct xpc_arch_operations xpc_arch_ops_uv = {
file:91991b460c4547c7a90a583ca987f087c6babd53 -> file:f43edfd064c147c0bcfe9f36a7b1e5a90a2bf61e
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -161,6 +161,7 @@ tmio_mmc_start_command(struct tmio_mmc_h
static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
+ void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
@@ -170,8 +171,8 @@ static inline void tmio_mmc_pio_irq(stru
return;
}
- buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
- host->sg_off);
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+ buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
@@ -188,7 +189,7 @@ static inline void tmio_mmc_pio_irq(stru
host->sg_off += count;
- tmio_mmc_kunmap_atomic(host, &flags);
+ tmio_mmc_kunmap_atomic(sg_virt, &flags);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
file:9fa9985949743be7920eccb0a1627036ea8dcc37 -> file:ee8fa89b2b57b2b194e8ae442eee12d8716e3141
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -102,10 +102,7 @@
#define ack_mmc_irqs(host, i) \
do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_STATUS); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_STATUS, mask); \
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
@@ -200,19 +197,17 @@ static inline int tmio_mmc_next_sg(struc
return --host->sg_len;
}
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
- struct scatterlist *sg = host->sg_ptr;
-
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
unsigned long *flags)
{
- kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
file:6ea520ae2410a9113690641670d488321ac3da23 -> file:776183fb2b59c59d3a6edbf718e92598b59bb799
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -316,7 +316,7 @@ static struct pxa3xx_nand_flash *builtin
#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
+#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
/* convert nand flash controller clock cycles to nano-seconds */
#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
file:c71e12d05f6ebe25715f5ee9784568aadb1d5f80 -> file:9fb3a0bd994a10f2ce1a63afe355711fc67e65b2
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
return retval;
}
+static irqreturn_t el2_probe_interrupt(int irq, void *seen)
+{
+ *(bool *)seen = true;
+ return IRQ_HANDLED;
+}
+
static int
el2_open(struct net_device *dev)
{
@@ -391,22 +397,35 @@ el2_open(struct net_device *dev)
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
- retval = request_irq(*irqp, NULL, 0, "bogus", dev);
- if (retval >= 0) {
+ bool seen;
+
+ retval = request_irq(*irqp, el2_probe_interrupt, 0,
+ dev->name, &seen);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+
/* Twinkle the interrupt, and check if it's seen. */
- unsigned long cookie = probe_irq_on();
+ seen = false;
+ smp_wmb();
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
- if (*irqp == probe_irq_off(cookie) /* It's a good IRQ line! */
- && ((retval = request_irq(dev->irq = *irqp,
- eip_interrupt, 0, dev->name, dev)) == 0))
- break;
- } else {
- if (retval != -EBUSY)
- return retval;
- }
+ msleep(1);
+ free_irq(*irqp, el2_probe_interrupt);
+ if (!seen)
+ continue;
+
+ retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
+ dev->name, dev);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
} while (*++irqp);
+
if (*irqp == 0) {
+ err_disable:
outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
return -EAGAIN;
}
file:00569dc1313cd5c2a1f7ead4603ac8deb7bec4ff -> file:403bfb6d13ee2cb6d982b6f7a0a0893ddbe16ecc
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2856,10 +2856,11 @@ static int atl1_resume(struct pci_dev *p
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
- adapter->cmb.cmb->int_stats = 0;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ adapter->cmb.cmb->int_stats = 0;
atl1_up(adapter);
+ }
netif_device_attach(netdev);
return 0;
file:4869adb695865a3f7dc4ff3bacd1ee5a260b296d -> file:137cb031df6cdc9b36e97abc06851cd62124e943
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2175,8 +2175,6 @@ static int __devinit b44_init_one(struct
dev->irq = sdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
- netif_carrier_off(dev);
-
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
dev_err(sdev->dev,
@@ -2216,6 +2214,8 @@ static int __devinit b44_init_one(struct
goto err_out_powerdown;
}
+ netif_carrier_off(dev);
+
ssb_set_drvdata(sdev, dev);
/* Chip reset provides power to the b44 MAC & PCI cores, which
file:a9aa957f3a7e6d347426e041ea7ca6bdeeedb764 -> file:4874b2bd6bbd6ffc501fc7263486b9bf705e332a
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -247,6 +247,9 @@ static const struct flash_spec flash_570
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
+static void bnx2_init_napi(struct bnx2 *bp);
+static void bnx2_del_napi(struct bnx2 *bp);
+
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{
u32 diff;
@@ -6173,6 +6176,7 @@ bnx2_open(struct net_device *dev)
bnx2_disable_int(bp);
bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_init_napi(bp);
bnx2_napi_enable(bp);
rc = bnx2_alloc_mem(bp);
if (rc)
@@ -6234,6 +6238,7 @@ open_err:
bnx2_free_skbs(bp);
bnx2_free_irq(bp);
bnx2_free_mem(bp);
+ bnx2_del_napi(bp);
return rc;
}
@@ -6441,6 +6446,7 @@ bnx2_close(struct net_device *dev)
bnx2_free_irq(bp);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
+ bnx2_del_napi(bp);
bp->link_up = 0;
netif_carrier_off(bp->dev);
bnx2_set_power_state(bp, PCI_D3hot);
@@ -8016,12 +8022,21 @@ bnx2_bus_string(struct bnx2 *bp, char *s
return str;
}
-static void __devinit
+static void
+bnx2_del_napi(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++)
+ netif_napi_del(&bp->bnx2_napi[i].napi);
+}
+
+static void
bnx2_init_napi(struct bnx2 *bp)
{
int i;
- for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ for (i = 0; i < bp->irq_nvecs; i++) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
int (*poll)(struct napi_struct *, int);
@@ -8090,7 +8105,6 @@ bnx2_init_one(struct pci_dev *pdev, cons
dev->ethtool_ops = &bnx2_ethtool_ops;
bp = netdev_priv(dev);
- bnx2_init_napi(bp);
pci_set_drvdata(pdev, dev);
file:c3fa31c9f2a7440f893f60ba10e6007ac9150251 -> file:d3854ac22cbf3cb3fda6ec33c19c3dbe7a545c55
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2451,6 +2451,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff
if (!(dev->flags & IFF_MASTER))
goto out;
+ if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ goto out;
+
read_lock(&bond->lock);
slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
orig_dev);
file:9b5936f072dcc48232e9dd4e9c580652ad6ca0dc -> file:71143751d8fd52a1ed78bc9c58a7a6f6ac03780d
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -370,6 +370,9 @@ static int rlb_arp_recv(struct sk_buff *
goto out;
}
+ if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
+ goto out;
+
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
goto out;
file:34e776c5f06b4391e1a7bf42ff1cffb72c8b9ce7 -> file:2b378e75b1b02389a848dc4e76542b09bd13e4df
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1274,6 +1274,7 @@ static void cxgb_down(struct adapter *ad
free_irq_resources(adapter);
quiesce_rx(adapter);
+ t3_sge_stop(adapter);
flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
}
@@ -2274,6 +2275,8 @@ static int cxgb_extension_ioctl(struct n
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
+ memset(&edata, 0, sizeof(struct ch_reg));
+
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
file:e8e87a723fada3a24da353d1938b7c974b203aae -> file:11f3b7c7422fc1a5b2c9a3bbdd5716f73574745e
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -304,7 +304,7 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
file:433f4dde9ea86d2db5c69e2ece5f802901ca1f7f -> file:d177a02dd20103f4859e546467b57546ef086d95
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -3073,13 +3073,18 @@ static int e1000_test_msi(struct e1000_a
/* disable SERR in case the MSI write causes a master abort */
pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
- pci_write_config_word(adapter->pdev, PCI_COMMAND,
- pci_cmd & ~PCI_COMMAND_SERR);
+ if (pci_cmd & PCI_COMMAND_SERR)
+ pci_write_config_word(adapter->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
err = e1000_test_msi_interrupt(adapter);
- /* restore previous setting of command word */
- pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ /* re-enable SERR */
+ if (pci_cmd & PCI_COMMAND_SERR) {
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_SERR;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ }
/* success ! */
if (!err)
file:f5b96cadeb2544994c4aadeeb45f30113c082116 -> file:fd57fb421e912af88a39a54f253b4007e7e66079
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -554,6 +554,8 @@ static int eql_g_master_cfg(struct net_d
equalizer_t *eql;
master_config_t mc;
+ memset(&mc, 0, sizeof(master_config_t));
+
if (eql_is_master(dev)) {
eql = netdev_priv(dev);
mc.max_slaves = eql->max_slaves;
file:5bf31f1509c93acca119f3e87175f97505bd4d31 -> file:934a28fde6118b99fa8aa959df9d972e302fd32b
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1621,7 +1621,7 @@ static int gfar_clean_tx_ring(struct net
if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT))
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
else
dev_kfree_skb_any(skb);
@@ -1703,7 +1703,7 @@ struct sk_buff * gfar_new_skb(struct net
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- skb = __skb_dequeue(&priv->rx_recycle);
+ skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = netdev_alloc_skb(dev,
priv->rx_buffer_size + RXBUF_ALIGNMENT);
@@ -1862,7 +1862,7 @@ int gfar_clean_rx_ring(struct net_device
* recycle list.
*/
skb->data = skb->head + NET_SKB_PAD;
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
}
} else {
/* Increment the number of packets */
file:3bb3a6da36c26f57240cc2fbae42bce68b27315b -> file:a893f45db817498cdc4251e2bf955472fd88dd15
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1578,6 +1578,16 @@ jme_free_irq(struct jme_adapter *jme)
}
}
+static inline void
+jme_phy_on(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr &= ~BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+}
+
static int
jme_open(struct net_device *netdev)
{
@@ -1598,10 +1608,12 @@ jme_open(struct net_device *netdev)
jme_start_irq(jme);
- if (test_bit(JME_FLAG_SSET, &jme->flags))
+ if (test_bit(JME_FLAG_SSET, &jme->flags)) {
+ jme_phy_on(jme);
jme_set_settings(netdev, &jme->old_ecmd);
- else
+ } else {
jme_reset_phy_processor(jme);
+ }
jme_reset_link(jme);
@@ -3013,10 +3025,12 @@ jme_resume(struct pci_dev *pdev)
jme_clear_pm(jme);
pci_restore_state(pdev);
- if (test_bit(JME_FLAG_SSET, &jme->flags))
+ if (test_bit(JME_FLAG_SSET, &jme->flags)) {
+ jme_phy_on(jme);
jme_set_settings(netdev, &jme->old_ecmd);
- else
+ } else {
jme_reset_phy_processor(jme);
+ }
jme_start_irq(jme);
netif_device_attach(netdev);
file:8a0904368e0838027e7a23621918203ebdd02d5e -> file:0f3ae462d4317ec670a72423431eccebf5a4bf04
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1199,7 +1199,6 @@ netxen_process_rcv(struct netxen_adapter
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1261,8 +1260,6 @@ netxen_process_lro(struct netxen_adapter
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
file:8b14c6eda7c3379e67eda5fac94960e0e538b46b -> file:9ee9f01a929b99711c7c19e3dd9227aa525ff3fe
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -135,7 +135,7 @@
#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
-#define MCAST_MAX 4 /* Max number multicast addresses to filter */
+#define MCAST_MAX 3 /* Max number multicast addresses to filter */
/* Descriptor status */
#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
@@ -985,9 +985,6 @@ static void r6040_multicast_list(struct
crc >>= 26;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
- /* Write the index of the hash table */
- for (i = 0; i < 4; i++)
- iowrite16(hash_table[i] << 14, ioaddr + MCR1);
/* Fill the MAC hash tables with their values */
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
@@ -995,6 +992,7 @@ static void r6040_multicast_list(struct
iowrite16(hash_table[3], ioaddr + MAR3);
}
/* Multicast Address 1~4 case */
+ dmi = dev->mc_list;
for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
adrp = (u16 *)dmi->dmi_addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
@@ -1003,9 +1001,9 @@ static void r6040_multicast_list(struct
dmi = dmi->next;
}
for (i = dev->mc_count; i < MCAST_MAX; i++) {
- iowrite16(0xffff, ioaddr + MID_0L + 8*i);
- iowrite16(0xffff, ioaddr + MID_0M + 8*i);
- iowrite16(0xffff, ioaddr + MID_0H + 8*i);
+ iowrite16(0xffff, ioaddr + MID_1L + 8*i);
+ iowrite16(0xffff, ioaddr + MID_1M + 8*i);
+ iowrite16(0xffff, ioaddr + MID_1H + 8*i);
}
}
file:211b587195cc845ea89eefce9a044ccab4708081 -> file:7dd213239f2bd56dc47bc70133055161c9c89117
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -557,6 +557,11 @@ static void mdio_write(void __iomem *ioa
break;
udelay(25);
}
+ /*
+ * According to hardware specs a 20us delay is required after write
+ * complete indication, but before sending next command.
+ */
+ udelay(20);
}
static int mdio_read(void __iomem *ioaddr, int reg_addr)
@@ -576,6 +581,12 @@ static int mdio_read(void __iomem *ioadd
}
udelay(25);
}
+ /*
+ * According to hardware specs a 20us delay is required after read
+ * complete indication, but before sending next command.
+ */
+ udelay(20);
+
return value;
}
@@ -3988,7 +3999,7 @@ static inline void rtl8169_map_to_asic(s
static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
struct net_device *dev,
struct RxDesc *desc, int rx_buf_sz,
- unsigned int align)
+ unsigned int align, gfp_t gfp)
{
struct sk_buff *skb;
dma_addr_t mapping;
@@ -3996,7 +4007,7 @@ static struct sk_buff *rtl8169_alloc_rx_
pad = align ? align : NET_IP_ALIGN;
- skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
+ skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
if (!skb)
goto err_out;
@@ -4027,7 +4038,7 @@ static void rtl8169_rx_clear(struct rtl8
}
static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
- u32 start, u32 end)
+ u32 start, u32 end, gfp_t gfp)
{
u32 cur;
@@ -4042,7 +4053,7 @@ static u32 rtl8169_rx_fill(struct rtl816
skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
tp->RxDescArray + i,
- tp->rx_buf_sz, tp->align);
+ tp->rx_buf_sz, tp->align, gfp);
if (!skb)
break;
@@ -4070,7 +4081,7 @@ static int rtl8169_init_ring(struct net_
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
- if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+ if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
goto err_out;
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4573,7 +4584,7 @@ static int rtl8169_rx_interrupt(struct n
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+ delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
if (!delta && count && netif_msg_intr(tp))
printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
tp->dirty_rx += delta;
file:8f5414348e8629569a6fa12ff08f710d425abe6a -> file:5b07e002f4e9079aeab31b92967c679a390dbdd1
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -40,6 +40,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/mii.h>
+#include <linux/dmi.h>
#include <asm/irq.h>
#include "skge.h"
@@ -3890,6 +3891,8 @@ static void __devinit skge_show_addr(str
dev->name, dev->dev_addr);
}
+static int only_32bit_dma;
+
static int __devinit skge_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3911,7 +3914,7 @@ static int __devinit skge_probe(struct p
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4168,8 +4171,21 @@ static struct pci_driver skge_driver = {
.shutdown = skge_shutdown,
};
+static struct dmi_system_id skge_32bit_dma_boards[] = {
+ {
+ .ident = "Gigabyte nForce boards",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
+ {}
+};
+
static int __init skge_init_module(void)
{
+ if (dmi_check_system(skge_32bit_dma_boards))
+ only_32bit_dma = 1;
skge_debug_init();
return pci_register_driver(&skge_driver);
}
file:f9cdcbcb77d4989efe930f1014a9c32128081216 -> file:b496fa6811e897554cab3c3fb43c8d52358f5be3
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -85,8 +85,7 @@ struct smsc911x_data {
*/
spinlock_t mac_lock;
- /* spinlock to ensure 16-bit accesses are serialised.
- * unused with a 32-bit bus */
+ /* spinlock to ensure register accesses are serialised */
spinlock_t dev_lock;
struct phy_device *phy_dev;
@@ -119,37 +118,33 @@ struct smsc911x_data {
unsigned int hashlo;
};
-/* The 16-bit access functions are significantly slower, due to the locking
- * necessary. If your bus hardware can be configured to do this for you
- * (in response to a single 32-bit operation from software), you should use
- * the 32-bit access functions instead. */
-
-static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
{
if (pdata->config.flags & SMSC911X_USE_32BIT)
return readl(pdata->ioaddr + reg);
- if (pdata->config.flags & SMSC911X_USE_16BIT) {
- u32 data;
- unsigned long flags;
-
- /* these two 16-bit reads must be performed consecutively, so
- * must not be interrupted by our own ISR (which would start
- * another read operation) */
- spin_lock_irqsave(&pdata->dev_lock, flags);
- data = ((readw(pdata->ioaddr + reg) & 0xFFFF) |
+ if (pdata->config.flags & SMSC911X_USE_16BIT)
+ return ((readw(pdata->ioaddr + reg) & 0xFFFF) |
((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
- spin_unlock_irqrestore(&pdata->dev_lock, flags);
-
- return data;
- }
BUG();
return 0;
}
-static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
- u32 val)
+static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+{
+ u32 data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+ data = __smsc911x_reg_read(pdata, reg);
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+
+ return data;
+}
+
+static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+ u32 val)
{
if (pdata->config.flags & SMSC911X_USE_32BIT) {
writel(val, pdata->ioaddr + reg);
@@ -157,44 +152,54 @@ static inline void smsc911x_reg_write(st
}
if (pdata->config.flags & SMSC911X_USE_16BIT) {
- unsigned long flags;
-
- /* these two 16-bit writes must be performed consecutively, so
- * must not be interrupted by our own ISR (which would start
- * another read operation) */
- spin_lock_irqsave(&pdata->dev_lock, flags);
writew(val & 0xFFFF, pdata->ioaddr + reg);
writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
- spin_unlock_irqrestore(&pdata->dev_lock, flags);
return;
}
BUG();
}
+static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+ u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+ __smsc911x_reg_write(pdata, reg, val);
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
/* Writes a packet to the TX_DATA_FIFO */
static inline void
smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
unsigned int wordcount)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
while (wordcount--)
- smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++));
- return;
+ __smsc911x_reg_write(pdata, TX_DATA_FIFO,
+ swab32(*buf++));
+ goto out;
}
if (pdata->config.flags & SMSC911X_USE_32BIT) {
writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
- return;
+ goto out;
}
if (pdata->config.flags & SMSC911X_USE_16BIT) {
while (wordcount--)
- smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
- return;
+ __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
+ goto out;
}
BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
/* Reads a packet out of the RX_DATA_FIFO */
@@ -202,24 +207,31 @@ static inline void
smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
unsigned int wordcount)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
while (wordcount--)
- *buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO));
- return;
+ *buf++ = swab32(__smsc911x_reg_read(pdata,
+ RX_DATA_FIFO));
+ goto out;
}
if (pdata->config.flags & SMSC911X_USE_32BIT) {
readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
- return;
+ goto out;
}
if (pdata->config.flags & SMSC911X_USE_16BIT) {
while (wordcount--)
- *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO);
- return;
+ *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO);
+ goto out;
}
BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
file:4fdfa2ae5418e6c030be80b5568e3c12dcbbf3f9 -> file:0f77aca7280a2f1a7d780a910a9758853a6cb9ce
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1006,7 +1006,8 @@ static int tun_set_iff(struct net *net,
if (err < 0)
goto err_free_sk;
- if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
+ if (!net_eq(dev_net(tun->dev), &init_net) ||
+ device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
printk(KERN_ERR "Failed to create tun sysfs files\n");
file:6ce7f775bb7482a362d4dc1320570847bcfe7511 -> file:e644f9afa32a494e1e68dcf5bcbdf5a8ecc5440c
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -54,6 +54,7 @@ static const char driver_name [] = "asix
#define AX_CMD_WRITE_IPG0 0x12
#define AX_CMD_WRITE_IPG1 0x13
#define AX_CMD_READ_NODE_ID 0x13
+#define AX_CMD_WRITE_NODE_ID 0x14
#define AX_CMD_WRITE_IPG2 0x14
#define AX_CMD_WRITE_MULTI_FILTER 0x16
#define AX88172_CMD_READ_NODE_ID 0x17
@@ -165,6 +166,7 @@ static const char driver_name [] = "asix
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
u8 phymode;
u8 ledmode;
u8 eeprom_len;
@@ -728,6 +730,30 @@ static int asix_ioctl (struct net_device
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
+static int asix_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
@@ -915,7 +941,7 @@ static const struct net_device_ops ax887
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_multicast_list = asix_set_multicast,
@@ -1208,7 +1234,7 @@ static const struct net_device_ops ax881
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = asix_set_multicast,
.ndo_do_ioctl = asix_ioctl,
file:43bc3fcc0d8523e837a07dcd40f3b3ca738fe218 -> file:f450bc9a89ac23851a3889594e3bebb7b67a43c7
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1634,6 +1634,8 @@ static int hso_get_count(struct hso_seri
struct uart_icount cnow;
struct hso_tiocmget *tiocmget = serial->tiocmget;
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
if (!tiocmget)
return -ENOENT;
spin_lock_irq(&serial->serial_lock);
file:7ee61d426dbb5dedb31c9e3effa6057c898490e9 -> file:2c4914a52789db7824d314dda2cf4fb97c93898e
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -48,6 +48,7 @@
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
@@ -448,6 +449,26 @@ ath5k_pci_probe(struct pci_dev *pdev,
int ret;
u8 csz;
+ /*
+ * L0s needs to be disabled on all ath5k cards.
+ *
+ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
+ * by default in the future in 2.6.36) this will also mean both L1 and
+ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
+ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
+ * though but cannot currently undue the effect of a blacklist, for
+ * details you can read pcie_aspm_sanity_check() and see how it adjusts
+ * the device link capability.
+ *
+ * It may be possible in the future to implement some PCI API to allow
+ * drivers to override blacklists for pre 1.1 PCIe but for now it is
+ * best to accept that both L0s and L1 will be disabled completely for
+ * distributions shipping with CONFIG_PCIEASPM rather than having this
+ * issue present. Motivation for adding this new API will be to help
+ * with power consumption for some of these devices.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
@@ -1267,6 +1288,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc
PCI_DMA_TODEVICE);
rate = ieee80211_get_tx_rate(sc->hw, info);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
file:4fe33f7eee9ddd9494db595285df623fe41d7245 -> file:a5daa0d318cb20ff8e0f6cea30d1688119e79d56
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -60,7 +60,7 @@
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
+#define CTL_MODE_M 0xf
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
file:4071fc91da0a94509b4ba3361da0f26278f89bc2 -> file:95105782794c2bfd1e57db98b1cb8824d581e409
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -714,7 +714,7 @@ static void ath9k_hw_get_def_gain_bounda
vpdTableI[i][sizeCurrVpdTable - 2]);
vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
- if (tgtIndex > maxIndex) {
+ if (tgtIndex >= maxIndex) {
while ((ss <= tgtIndex) &&
(k < (AR5416_NUM_PDADC_VALUES - 1))) {
tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
file:2185f9771f3835626aa68671be2989f4d9539da4 -> file:aed75eb4d2b7a209023ca86c173a78c2a0671ea7
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -929,7 +929,8 @@ int ath9k_hw_init(struct ath_hw *ah)
if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
- (AR_SREV_9280(ah) && !ah->is_pciexpress)) {
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+ !ah->is_pciexpress)) {
ah->config.serialize_regmode =
SER_REG_MODE_ON;
} else {
@@ -2402,7 +2403,8 @@ int ath9k_hw_reset(struct ath_hw *ah, st
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* For chips on which RTC reset is done, save TSF before it gets cleared */
- if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ if (AR_SREV_9100(ah) ||
+ (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -2432,7 +2434,7 @@ int ath9k_hw_reset(struct ath_hw *ah, st
}
/* Restore TSF */
- if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ if (tsf)
ath9k_hw_settsf64(ah, tsf);
if (AR_SREV_9280_10_OR_LATER(ah))
@@ -2452,6 +2454,17 @@ int ath9k_hw_reset(struct ath_hw *ah, st
if (r)
return r;
+ /*
+ * Some AR91xx SoC devices frequently fail to accept TSF writes
+ * right after the chip reset. When that happens, write a new
+ * value after the initvals have been applied, with an offset
+ * based on measured time difference
+ */
+ if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
+ tsf += 1500;
+ ath9k_hw_settsf64(ah, tsf);
+ }
+
/* Setup MFP options for CCMP */
if (AR_SREV_9280_20_OR_LATER(ah)) {
/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
file:e499fb6e00c40b3e447287c978ac615aff418021 -> file:6a04681631d1cf1605bf7316a6efc7462645fd53
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -423,6 +423,14 @@ static void ath_tx_complete_aggr(struct
bf = bf_next;
}
+ /* prepend un-acked frames to the beginning of the pending frame queue */
+ if (!list_empty(&bf_pending)) {
+ spin_lock_bh(&txq->axq_lock);
+ list_splice(&bf_pending, &tid->buf_q);
+ ath_tx_queue_tid(txq, tid);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
if (tid->state & AGGR_CLEANUP) {
if (tid->baw_head == tid->baw_tail) {
tid->state &= ~AGGR_ADDBA_COMPLETE;
@@ -435,14 +443,6 @@ static void ath_tx_complete_aggr(struct
return;
}
- /* prepend un-acked frames to the beginning of the pending frame queue */
- if (!list_empty(&bf_pending)) {
- spin_lock_bh(&txq->axq_lock);
- list_splice(&bf_pending, &tid->buf_q);
- ath_tx_queue_tid(txq, tid);
- spin_unlock_bh(&txq->axq_lock);
- }
-
rcu_read_unlock();
if (needreset)
file:c1dd857697a7f6bd75599f3e7dd02c024e780564 -> file:21cf5215f6f60e7273fcfb5f1814aaaf9d743e92
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@ enum ctl_group {
#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
file:8fdd41f4b4f2121a65f70d322bfe3951d1e233e0 -> file:e131161137ae721871e4efa7bb3ba9f0a8ba1afa
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -329,6 +329,7 @@ static int prism2_pci_probe(struct pci_d
dev->irq = pdev->irq;
hw_priv->mem_start = mem;
+ dev->base_addr = (unsigned long) mem;
prism2_pci_cor_sreset(local);
file:db2946e0df5435d6675c4eb7edefea24993af644 -> file:faa286ff69391e0e1ca7de8a5a771fb7ed71fc03
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -497,11 +497,10 @@ void iwl_bg_scan_check(struct work_struc
return;
mutex_lock(&priv->mutex);
- if (test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting "
- "adapter (%dms)\n",
- jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
+ if (test_bit(STATUS_SCANNING, &priv->status) &&
+ !test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n",
+ jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
iwl_send_scan_abort(priv);
@@ -797,12 +796,11 @@ void iwl_bg_abort_scan(struct work_struc
!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
return;
- mutex_lock(&priv->mutex);
-
- cancel_delayed_work_sync(&priv->scan_check);
- set_bit(STATUS_SCAN_ABORTING, &priv->status);
- iwl_send_scan_abort(priv);
+ cancel_delayed_work(&priv->scan_check);
+ mutex_lock(&priv->mutex);
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status))
+ iwl_send_scan_abort(priv);
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL(iwl_bg_abort_scan);
file:8e3818f6832e12987b6294eb4fdb304e0454ede1 -> file:2c31eb4c21a2e4850732184330cf6959d18f0a85
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -261,8 +261,10 @@ static int p54_generate_channel_lists(st
list->max_entries = max_channel_num;
list->channels = kzalloc(sizeof(struct p54_channel_entry) *
max_channel_num, GFP_KERNEL);
- if (!list->channels)
+ if (!list->channels) {
+ ret = -ENOMEM;
goto free;
+ }
for (i = 0; i < max_channel_num; i++) {
if (i < priv->iq_autocal_len) {
file:805284df3896be8fd0272646964fa52315c442d9 -> file:ab406c90172cea6aacfbf876ddc812b01161e0f6
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -32,8 +32,17 @@ MODULE_ALIAS("prism54usb");
MODULE_FIRMWARE("isl3886usb");
MODULE_FIRMWARE("isl3887usb");
+/*
+ * Note:
+ *
+ * Always update our wiki's device list (located at:
+ * http://wireless.kernel.org/en/users/Drivers/p54/devices ),
+ * whenever you add a new device.
+ */
+
static struct usb_device_id p54u_table[] __devinitdata = {
/* Version 1 devices (pci chip + net2280) */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
{USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
{USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
@@ -45,7 +54,9 @@ static struct usb_device_id p54u_table[]
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
+ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
+ {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
{USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
@@ -58,6 +69,7 @@ static struct usb_device_id p54u_table[]
{USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
{USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
{USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
@@ -77,6 +89,7 @@ static struct usb_device_id p54u_table[]
{USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
+ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
@@ -929,8 +942,8 @@ static int __devinit p54u_probe(struct u
#ifdef CONFIG_PM
/* ISL3887 needs a full reset on resume */
udev->reset_resume = 1;
+#endif /* CONFIG_PM */
err = p54u_device_reset(dev);
-#endif
priv->hw_type = P54U_3887;
dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
file:9d147ded8741556e81124497c196d480c8b17dc5 -> file:0edd7b493aa7ea73a74c4faedf784464e86cafc9
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -445,7 +445,7 @@ static void p54_rx_frame_sent(struct p54
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
- (!payload->status))
+ !(payload->status & P54_TX_FAILED))
info->flags |= IEEE80211_TX_STAT_ACK;
if (payload->status & P54_TX_PSM_CANCELLED)
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
file:baa051d5bfbe0bcd84bd9f28530ac73f6d4863d8 -> file:1a11d955f2151a92547344920c25886cafc7ff1f
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1619,6 +1619,7 @@ static void backend_changed(struct xenbu
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
+ netif_notify_peers(netdev);
break;
case XenbusStateClosing:
file:c9e2ae90f19508db8f74f91440636689d50321d8 -> file:5c4df24eae4bb0066fa34a616d9324996f4d5c07
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -140,16 +140,6 @@ static struct notifier_block module_load
.notifier_call = module_load_notify,
};
-
-static void end_sync(void)
-{
- end_cpu_work();
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-}
-
-
int sync_start(void)
{
int err;
@@ -157,7 +147,7 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- start_cpu_work();
+ mutex_lock(&buffer_mutex);
err = task_handoff_register(&task_free_nb);
if (err)
@@ -172,7 +162,10 @@ int sync_start(void)
if (err)
goto out4;
+ start_cpu_work();
+
out:
+ mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -181,7 +174,6 @@ out3:
out2:
task_handoff_unregister(&task_free_nb);
out1:
- end_sync();
free_cpumask_var(marked_cpus);
goto out;
}
@@ -189,11 +181,20 @@ out1:
void sync_stop(void)
{
+ /* flush buffers */
+ mutex_lock(&buffer_mutex);
+ end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- end_sync();
+ mutex_unlock(&buffer_mutex);
+ flush_scheduled_work();
+
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+
free_cpumask_var(marked_cpus);
}
file:1f1f5a8016a237b57e1d4aa01acf742718ba2659 -> file:5e2ac4aea949c381f492cf264f827dfe4fc59aae
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -121,8 +121,6 @@ void end_cpu_work(void)
cancel_delayed_work(&b->work);
}
-
- flush_scheduled_work();
}
/*
file:9581d3619450d609ca69e305d4fcad7148f9ab93 -> file:cc31baa31659782183de1980513265f3882edb31
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -182,16 +182,18 @@ static int led_proc_read(char *page, cha
static int led_proc_write(struct file *file, const char *buf,
unsigned long count, void *data)
{
- char *cur, lbuf[count + 1];
+ char *cur, lbuf[32];
int d;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- memset(lbuf, 0, count + 1);
+ if (count >= sizeof(lbuf))
+ count = sizeof(lbuf)-1;
if (copy_from_user(lbuf, buf, count))
return -EFAULT;
+ lbuf[count] = 0;
cur = lbuf;
file:2498602151e6cb081189d81ac62eb2850c52abfb -> file:ba83495fb5a6e458f75100c34d54adc13260686a
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
+/* page table handling */
+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+ return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+ return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+ return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+ return (pfn + level_size(level) - 1) & level_mask(level);
+}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
@@ -449,8 +492,6 @@ void free_iova_mem(struct iova *iova)
}
-static inline int width_to_agaw(int width);
-
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
@@ -664,51 +705,6 @@ out:
spin_unlock_irqrestore(&iommu->lock, flags);
}
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
- return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
- return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
- return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn)
{
file:f9cf3173b23dcc9e8bea22d1ff6679523bcb0944 -> file:0fb1d0542339281533895b53bbf3aba8988c64af
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -195,6 +195,9 @@ void unmask_msi_irq(unsigned int irq)
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
struct msi_desc *entry = get_irq_desc_msi(desc);
+
+ BUG_ON(entry->dev->current_state != PCI_D0);
+
if (entry->msi_attrib.is_msix) {
void __iomem *base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
@@ -228,10 +231,32 @@ void read_msi_msg(unsigned int irq, stru
read_msi_msg_desc(desc, msg);
}
+void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+{
+ struct msi_desc *entry = get_irq_desc_msi(desc);
+
+ /* Assert that the cache is valid, assuming that
+ * valid messages are not all-zeroes. */
+ BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
+ entry->msg.data));
+
+ *msg = entry->msg;
+}
+
+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ get_cached_msi_msg_desc(desc, msg);
+}
+
void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
struct msi_desc *entry = get_irq_desc_msi(desc);
- if (entry->msi_attrib.is_msix) {
+
+ if (entry->dev->current_state != PCI_D0) {
+ /* Don't touch the hardware now */
+ } else if (entry->msi_attrib.is_msix) {
void __iomem *base;
base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
file:0f6382f090ee575f04add353066be31abbb05336 -> file:3a3b9110db3ea564d9587aba7a2a3fdd873c78c6
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -662,17 +662,21 @@ void pci_remove_legacy_files(struct pci_
#ifdef HAVE_PCI_MMAP
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size;
+ unsigned long nr, start, size, pci_start;
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (start < size && size - start >= nr)
+ pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
+ pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
return 1;
- WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
- current->comm, start, start+nr, pci_name(pdev), resno, size);
return 0;
}
@@ -702,8 +706,14 @@ pci_mmap_resource(struct kobject *kobj,
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
- if (!pci_mmap_fits(pdev, i, vma))
+ if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
+ "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
+ current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
+ pci_name(pdev), i,
+ pci_resource_start(pdev, i), pci_resource_len(pdev, i));
return -EINVAL;
+ }
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
file:d92d1954a2fb17ea0415772531ee27563ec02da3 -> file:bfc3337adcd1b170b2c23dc5bea3e9316d0e4807
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -13,8 +13,13 @@ extern int pci_create_sysfs_dev_files(st
extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
+enum pci_mmap_api {
+ PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
+ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
+};
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
file:593bb844b8db68258a41647f709f1d5d6807640d -> file:a03ad8cfc6b99ed938b5b84cf970ec25de88ce78
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -259,7 +259,7 @@ static int proc_bus_pci_mmap(struct file
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (pci_mmap_fits(dev, i, vma))
+ if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
file:c0de0b999c9128001489f2d4818709983bbe3bc9 -> file:4633fc228603b5477f91b7a9a7979659fedf4280
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -155,6 +155,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NE
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+ u32 pmbase;
+ u16 pm1a;
+
+ pci_read_config_dword(dev, 0x40, &pmbase);
+ pmbase = pmbase & 0xff80;
+ pm1a = inw(pmbase);
+
+ if (pm1a & 0x10) {
+ dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+ outw(0x10, pmbase);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
+/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
static void __devinit quirk_nopcipci(struct pci_dev *dev)
@@ -2084,6 +2104,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -2356,6 +2377,9 @@ static void __devinit __nv_msi_ht_cap_qu
int pos;
int found;
+ if (!pci_msi_enabled())
+ return;
+
/* check if there is HT MSI cap or enabled on this device */
found = ht_check_msi_mapping(dev);
file:d919e96c0afd62f5bde18da2ca0a974b63b66079 -> file:7905285f707ee7a4c0823d809e53d1a3260b2b56
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -39,7 +39,7 @@ module_param(io_speed, int, 0444);
#ifdef CONFIG_PCMCIA_PROBE
#include <asm/irq.h>
/* mask of IRQs already reserved by other cards, we should avoid using them */
-static u8 pcmcia_used_irq[NR_IRQS];
+static u8 pcmcia_used_irq[32];
#endif
@@ -719,6 +719,9 @@ int pcmcia_request_irq(struct pcmcia_dev
for (try = 0; try < 64; try++) {
irq = try % 32;
+ if (irq > NR_IRQS)
+ continue;
+
/* marked as available by driver, and not blocked by userspace? */
if (!((mask >> irq) & 1))
continue;
file:936bae560fa1f730255bde8a94c9493c953efdfd -> file:dc628cb2e762803b6f3374d33a99feae5b08640c
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_s
empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ break;
case SOURCE_VOLTAGE:
full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
file:8fefe5a73558895f5bb0df35b865366ea02e8618 -> file:4b38eaa9f5f0c2b3f59b2429f763e1616dcdba0f
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -271,14 +271,14 @@ static int olpc_bat_get_property(struct
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -299,7 +299,7 @@ static int olpc_bat_get_property(struct
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
@@ -313,7 +313,7 @@ static int olpc_bat_get_property(struct
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
file:138124fcfcade55165aca7ee1088cad8d7ec74d7 -> file:126f240715a426ecdc115f3fc514173c1442d5cb
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -618,6 +618,7 @@ void __irq_entry do_IRQ(struct pt_regs *
old_regs = set_irq_regs(regs);
s390_idle_check();
irq_enter();
+ __get_cpu_var(s390_idle).nohz_delay = 1;
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
file:63b521d615f2bd992ffb030cbb2f04f2b817e18e -> file:3e89f8e06cbcb997c6f7cb9534dd715beddd1fb0
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_sof
tinfo->curr.transport_version = 2;
tinfo->goal.transport_version = 2;
tinfo->goal.ppr_options = 0;
- /*
- * Remove any SCBs in the waiting for selection
- * queue that may also be for this target so
- * that command ordering is preserved.
- */
- ahd_freeze_devq(ahd, scb);
- ahd_qinfifo_requeue_tail(ahd, scb);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting
+ * for selection queue that may
+ * also be for this target so that
+ * command ordering is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
printerror = 0;
}
} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_sof
MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_CUR|AHD_TRANS_GOAL,
/*paused*/TRUE);
- /*
- * Remove any SCBs in the waiting for selection
- * queue that may also be for this target so that
- * command ordering is preserved.
- */
- ahd_freeze_devq(ahd, scb);
- ahd_qinfifo_requeue_tail(ahd, scb);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting for
+ * selection queue that may also be for
+ * this target so that command ordering
+ * is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
printerror = 0;
} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
&& ppr_busfree == 0) {
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_sof
/*ppr_options*/0,
AHD_TRANS_CUR|AHD_TRANS_GOAL,
/*paused*/TRUE);
- /*
- * Remove any SCBs in the waiting for selection
- * queue that may also be for this target so that
- * command ordering is preserved.
- */
- ahd_freeze_devq(ahd, scb);
- ahd_qinfifo_requeue_tail(ahd, scb);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting for
+ * selection queue that may also be for
+ * this target so that command ordering
+ * is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
printerror = 0;
} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
&& ahd_sent_msg(ahd, AHDMSG_1B,
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_sof
* the message phases. We check it last in case we
* had to send some other message that caused a busfree.
*/
- if (printerror != 0
+ if (scb != NULL && printerror != 0
&& (lastphase == P_MESGIN || lastphase == P_MESGOUT)
&& ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
file:9e8fce0f0c1b3f1022b5658a2ad503fcc810abf6 -> file:bb96d7496215a9908029684d57fee6db730d36ea
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4174,6 +4174,14 @@ static int ioc_general(void __user *arg,
ha = gdth_find_ha(gen.ionode);
if (!ha)
return -EFAULT;
+
+ if (gen.data_len > INT_MAX)
+ return -EINVAL;
+ if (gen.sense_len > INT_MAX)
+ return -EINVAL;
+ if (gen.data_len + gen.sense_len > INT_MAX)
+ return -EINVAL;
+
if (gen.data_len + gen.sense_len != 0) {
if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
FALSE, &paddr)))
file:bb2c696c006acc6f59d82b0aeb1d06980e5dc45c -> file:2d66fac56180e930bda9fb66125ddb93da062713
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1969,7 +1969,7 @@ static int ibmvfc_wait_for_ops(struct ib
DECLARE_COMPLETION_ONSTACK(comp);
int wait;
unsigned long flags;
- signed long timeout = init_timeout * HZ;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
ENTER;
do {
@@ -2720,6 +2720,7 @@ static struct ibmvfc_async_crq *ibmvfc_n
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -2742,6 +2743,7 @@ static struct ibmvfc_crq *ibmvfc_next_cr
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -2790,12 +2792,14 @@ static void ibmvfc_tasklet(void *data)
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
}
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -2803,10 +2807,12 @@ static void ibmvfc_tasklet(void *data)
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
} else
done = 1;
}
file:007fa1c9ef14eedbab72484577bb5683fe9dda19 -> file:ef8e9f878973776de7992607c97f92d425c979b1
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -38,6 +38,7 @@
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
#define IBMVFC_INIT_TIMEOUT 120
+#define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
file:816ab97eb16da1c3269999e72912303dbe2f2c99 -> file:0ee989fbb0fc34959ae5cf2e017c4dfc4e1d6944
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -346,6 +346,7 @@ static int sas_ata_scr_read(struct ata_l
static struct ata_port_operations sas_sata_ops = {
.phy_reset = sas_ata_phy_reset,
.post_internal_cmd = sas_ata_post_internal,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
file:41d712e828ebe5b5fc199224477ddf335374d176 -> file:b87fc30fad6ba17f1f8158acdcb436e6e589afb8
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2432,7 +2432,8 @@ scsi_internal_device_unblock(struct scsi
sdev->sdev_state = SDEV_RUNNING;
else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
sdev->sdev_state = SDEV_CREATED;
- else
+ else if (sdev->sdev_state != SDEV_CANCEL &&
+ sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags);
file:392d8db33905cbe31f1e2f96644552d9b692eb27 -> file:ad136c2be5016d7bdd09387612c835d4edd10256
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -954,10 +954,11 @@ static void __scsi_remove_target(struct
list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
sdev->id != starget->id ||
- sdev->sdev_state == SDEV_DEL)
+ scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
+ scsi_device_put(sdev);
spin_lock_irqsave(shost->host_lock, flags);
goto restart;
}
file:7694a95bdb5361868a112ee6804bb2824f9f92c1 -> file:81a9d25ecaba4eac3dcd1e20cac06cf3e22d925d
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2049,11 +2049,10 @@ static void sd_probe_async(void *data, a
index = sdkp->index;
dev = &sdp->sdev_gendev;
- if (index < SD_MAX_DISKS) {
- gd->major = sd_major((index & 0xf0) >> 4);
- gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
- gd->minors = SD_MINORS;
- }
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = SD_MINORS;
+
gd->fops = &sd_fops;
gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
@@ -2142,6 +2141,12 @@ static int sd_probe(struct device *dev)
if (error)
goto out_put;
+ if (index >= SD_MAX_DISKS) {
+ error = -ENODEV;
+ sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
+ goto out_free_index;
+ }
+
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error)
goto out_free_index;
file:ef9c6a04ad8f01af0d402f9f0f03a08175c04524 -> file:744d3f6e470987c773f72a733dbbc146fa358706
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -24,6 +24,7 @@ static const struct pci_device_id b43_pc
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
file:8e194d557cace47e5efa2e12c1411544d410ba69 -> file:64abd11f6fbbef64184011c9b4b7d0e0fb182ea8
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -495,9 +495,9 @@ static void ssb_pmu_resources_init(struc
chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
}
-/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
void ssb_pmu_init(struct ssb_chipcommon *cc)
{
+ struct ssb_bus *bus = cc->dev->bus;
u32 pmucap;
if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
@@ -509,12 +509,15 @@ void ssb_pmu_init(struct ssb_chipcommon
ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
- if (cc->pmu.rev == 1)
- chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
- ~SSB_CHIPCO_PMU_CTL_NOILPONW);
- else
- chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
- SSB_CHIPCO_PMU_CTL_NOILPONW);
+ if (cc->pmu.rev >= 1) {
+ if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
+ chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
+ ~SSB_CHIPCO_PMU_CTL_NOILPONW);
+ } else {
+ chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
+ SSB_CHIPCO_PMU_CTL_NOILPONW);
+ }
+ }
ssb_pmu_pll_init(cc);
ssb_pmu_resources_init(cc);
}
file:17a178139548c781b4c78e55641b6e0455165b45 -> file:321d9ef17b965a252f6f385c80347238ed3d70ff
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -22,7 +22,6 @@
#include "ssb_private.h"
-bool ssb_is_sprom_available(struct ssb_bus *bus);
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCICORESWITCH_DEBUG 0
@@ -168,7 +167,7 @@ err_pci:
}
/* Get the word-offset for a SSB_SPROM_XXX define. */
-#define SPOFF(offset) ((offset) / sizeof(u16))
+#define SPOFF(offset) (((offset) - SSB_SPROM_BASE1) / sizeof(u16))
/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
#define SPEX16(_outvar, _offset, _mask, _shift) \
out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -253,11 +252,6 @@ static int sprom_do_read(struct ssb_bus
{
int i;
- /* Check if SPROM can be read */
- if (ioread16(bus->mmio + bus->sprom_offset) == 0xFFFF) {
- ssb_printk(KERN_ERR PFX "Unable to read SPROM\n");
- return -ENODEV;
- }
for (i = 0; i < bus->sprom_size; i++)
sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
@@ -652,23 +646,17 @@ static int ssb_pci_sprom_get(struct ssb_
if (!buf)
goto out;
bus->sprom_size = SSB_SPROMSIZE_WORDS_R123;
- err = sprom_do_read(bus, buf);
- if (err)
- goto out_free;
+ sprom_do_read(bus, buf);
err = sprom_check_crc(buf, bus->sprom_size);
if (err) {
/* try for a 440 byte SPROM - revision 4 and higher */
kfree(buf);
buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
+ if (!buf)
goto out;
- }
bus->sprom_size = SSB_SPROMSIZE_WORDS_R4;
- err = sprom_do_read(bus, buf);
- if (err)
- goto out_free;
+ sprom_do_read(bus, buf);
err = sprom_check_crc(buf, bus->sprom_size);
if (err) {
/* All CRC attempts failed.
file:43c57b7688abe658223c49834c56f6fb0be6d180 -> file:8a05725cb1d05ed6966eb8ce465bd194fe332820
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -609,13 +609,13 @@ static ssize_t class_set_picture(struct
#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
-static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
get_enabled, set_enabled);
-static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
+static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
-static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
class_get_enabled, class_set_enabled);
-static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
+static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
static int asus_oled_probe(struct usb_interface *interface,
const struct usb_device_id *id)
file:d63c889ce5574a6017cb627389eaf26b81fe47ee -> file:1d6834d271fea06d529bb50ec6f2045842a5eee5
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -16,6 +16,7 @@ config COMEDI_DEBUG
config COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
depends on COMEDI && PCI
+ select COMEDI_8255
default N
---help---
Enable lots of comedi PCI drivers to be built
@@ -23,6 +24,7 @@ config COMEDI_PCI_DRIVERS
config COMEDI_PCMCIA_DRIVERS
tristate "Comedi PCMCIA drivers"
depends on COMEDI && PCMCIA && PCCARD
+ select COMEDI_8255
default N
---help---
Enable lots of comedi PCMCIA and PCCARD drivers to be built
@@ -33,3 +35,6 @@ config COMEDI_USB_DRIVERS
default N
---help---
Enable lots of comedi USB drivers to be built
+
+config COMEDI_8255
+ tristate
file:df2854d543ccc3857cc6e0efc95bfe7994aaf1d5 -> file:33b1d526837668bee5c457ccc104cd38b7b1f4f6
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -8,8 +8,10 @@ obj-$(CONFIG_COMEDI) += comedi_test.o
obj-$(CONFIG_COMEDI) += comedi_parport.o
obj-$(CONFIG_COMEDI) += pcm_common.o
+# Comedi 8255 module
+obj-$(CONFIG_COMEDI_8255) += 8255.o
+
# Comedi PCI drivers
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += 8255.o
obj-$(CONFIG_COMEDI_PCI_DRIVERS) += acl7225b.o
obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_035.o
obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_1032.o
file:ef8fcc8c67bd7db85cbc14607967a70e26487147 -> file:f6e04f83cd232cf0cbd3af979d02949eb8cfa432
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -202,7 +202,7 @@ static void usb_tranzport_abort_transfer
t->value = temp; \
return count; \
} \
- static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
+ static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
show_int(enable);
show_int(offline);
file:f69ae33a91e3788a8db90f9d67f738a3a1430454 -> file:3a38103ecfbd7c74126650df841d6f5f535e4304
--- a/drivers/staging/hv/RingBuffer.c
+++ b/drivers/staging/hv/RingBuffer.c
@@ -192,7 +192,7 @@ Description:
static inline u64
GetRingBufferIndices(RING_BUFFER_INFO* RingInfo)
{
- return ((u64)RingInfo->RingBuffer->WriteIndex << 32) || RingInfo->RingBuffer->ReadIndex;
+ return (u64)RingInfo->RingBuffer->WriteIndex << 32;
}
file:69c14066c479e1a7106e5a2dcacecc6f34f3515c -> file:3d8ff086fc7f40cc1c986234d394e90b789cddf6
--- a/drivers/staging/hv/StorVscApi.h
+++ b/drivers/staging/hv/StorVscApi.h
@@ -28,10 +28,10 @@
#include "VmbusApi.h"
/* Defines */
-#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-#define STORVSC_MAX_IO_REQUESTS 64
+#define STORVSC_MAX_IO_REQUESTS 128
/*
* In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
file:4c3c8bc4bc38984a47c791a88a21f6cb9edfc79d -> file:547261d2537c662da0a2a5e9eeeeab7d230fb76b
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -392,6 +392,9 @@ static const struct net_device_ops devic
.ndo_start_xmit = netvsc_start_xmit,
.ndo_get_stats = netvsc_get_stats,
.ndo_set_multicast_list = netvsc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
};
static int netvsc_probe(struct device *device)
file:d49dc21d4cb4a4117c570e5818dee8b2ec5a19eb -> file:2a4b147b0b388aa1b718eb2fa1a471bfca64e0f7
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -532,7 +532,7 @@ static unsigned int copy_to_bounce_buffe
ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (srclen) {
@@ -593,7 +593,7 @@ static unsigned int copy_from_bounce_buf
destlen = orig_sgl[i].length;
ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (destlen) {
@@ -652,6 +652,7 @@ static int storvsc_queuecommand(struct s
unsigned int request_size = 0;
int i;
struct scatterlist *sgl;
+ unsigned int sg_count = 0;
DPRINT_ENTER(STORVSC_DRV);
@@ -736,6 +737,7 @@ static int storvsc_queuecommand(struct s
request->DataBuffer.Length = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -770,11 +772,12 @@ static int storvsc_queuecommand(struct s
scsi_sg_count(scmnd));
sgl = cmd_request->bounce_sgl;
+ sg_count = cmd_request->bounce_sgl_count;
}
request->DataBuffer.Offset = sgl[0].offset;
- for (i = 0; i < scsi_sg_count(scmnd); i++) {
+ for (i = 0; i < sg_count; i++) {
DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n",
i, sgl[i].length, sgl[i].offset);
request->DataBuffer.PfnArray[i] =
file:7852d4a960c50fec42842826c209a135eb9dd71d -> file:bc1ffbed3c8a30ae3421633f52a48826a8c3319d
--- a/drivers/staging/line6/Kconfig
+++ b/drivers/staging/line6/Kconfig
@@ -2,6 +2,7 @@ config LINE6_USB
tristate "Line6 USB support"
depends on USB && SND
select SND_RAWMIDI
+ select SND_PCM
help
This is a driver for the guitar amp, cab, and effects modeller
PODxt Pro by Line6 (and similar devices), supporting the
file:23ad08e17f84c49fbf0ccbde4bb2227f7233d3e8 -> file:13cb4c06b3c28afee025757c1a2fe28774d4357a
--- a/drivers/staging/line6/control.c
+++ b/drivers/staging/line6/control.c
@@ -259,108 +259,108 @@ VARIAX_PARAM_R(float, mix2);
VARIAX_PARAM_R(float, mix1);
VARIAX_PARAM_R(int, pickup_wiring);
-static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak);
-static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position, pod_set_wah_position);
-static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain);
-static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position);
-static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold);
-static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan);
-static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup);
-static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model, pod_set_amp_model);
-static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive);
-static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass);
-static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid);
-static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
-static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble);
-static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid, pod_set_highmid);
-static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol);
-static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix);
-static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup);
-static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency);
-static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence, pod_set_presence);
-static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass);
-static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable);
-static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold);
-static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time);
-static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable);
-static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable);
-static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time);
-static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable);
-static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1);
-static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1);
-static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value);
-static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass);
-static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2);
-static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix);
-static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3);
-static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable);
-static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type);
-static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay);
-static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone);
-static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
-static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post);
-static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency);
-static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass);
-static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable);
-static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
-static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
-static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
-static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post);
-static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post);
-static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model, pod_set_di_model);
-static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay, pod_set_di_delay);
-static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable);
-static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value);
-static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2);
-static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3);
-static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4);
-static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5);
-static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix);
-static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post);
-static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model);
-static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency);
-static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass);
-static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision);
-static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision);
-static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable);
-static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap);
-static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign);
-static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency);
-static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner);
-static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection);
-static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model);
-static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model);
-static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel);
-static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency);
-static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency);
-static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value);
-static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2);
-static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3);
-static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4);
-static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5);
-static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6);
-static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select);
-static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4);
-static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5);
-static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post);
-static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model, pod_set_delay_model);
-static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model);
-static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb);
-static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb);
-static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model, pod_set_wah_model);
-static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume);
-static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off);
-static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select);
-static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage);
-static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain);
-static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
-static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain);
-static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
-static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain);
-static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
-static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
-static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain);
-static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
+static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak);
+static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position);
+static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain);
+static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position);
+static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold);
+static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan);
+static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup);
+static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model);
+static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive);
+static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass);
+static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid);
+static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
+static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble);
+static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid);
+static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol);
+static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix);
+static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup);
+static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency);
+static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence);
+static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass);
+static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable);
+static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold);
+static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time);
+static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable);
+static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable);
+static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time);
+static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable);
+static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1);
+static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1);
+static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value);
+static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass);
+static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2);
+static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix);
+static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3);
+static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable);
+static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type);
+static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay);
+static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone);
+static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
+static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post);
+static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency);
+static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass);
+static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable);
+static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
+static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
+static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
+static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post);
+static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post);
+static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model);
+static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay);
+static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable);
+static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value);
+static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2);
+static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3);
+static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4);
+static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5);
+static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix);
+static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post);
+static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model);
+static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency);
+static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass);
+static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision);
+static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision);
+static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable);
+static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap);
+static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign);
+static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency);
+static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner);
+static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection);
+static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model);
+static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model);
+static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel);
+static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency);
+static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency);
+static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value);
+static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2);
+static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3);
+static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4);
+static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5);
+static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6);
+static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select);
+static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4);
+static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5);
+static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post);
+static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model);
+static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model);
+static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb);
+static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb);
+static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model);
+static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume);
+static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off);
+static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select);
+static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage);
+static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain);
+static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
+static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain);
+static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
+static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain);
+static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
+static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
+static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain);
+static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write);
static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, line6_nop_write);
static DEVICE_ATTR(pickup1_type, S_IRUGO, variax_get_pickup1_type, line6_nop_write);
file:89a2b17e9cafc78744752ae41edcc062184db98b -> file:a3b877edbaefb0df7c495e5df5b56d774c377949
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -349,8 +349,8 @@ static ssize_t midi_set_midi_mask_receiv
return count;
}
-static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
-static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
+static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
+static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
/* MIDI device destructor */
static int snd_line6_midi_free(struct snd_device *device)
file:4c5b9d58400041f588241ee48fe250c192652753 -> file:875d75a74f4ba28f371b58d6341c3ac3672febd1
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -912,33 +912,33 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1, 1);
#undef GET_SYSTEM_PARAM
/* POD special files: */
-static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, pod_set_channel);
+static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel);
static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
-static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump);
-static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
-static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish);
+static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
+static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
+static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write);
-static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
-static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
+static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
+static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
-static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_amp_setup);
-static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, pod_set_retrieve_channel);
-static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_effects_setup);
-static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, pod_set_routing);
+static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup);
+static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel);
+static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup);
+static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing);
static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write);
-static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, pod_set_store_amp_setup);
-static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, pod_set_store_channel);
-static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, pod_set_store_effects_setup);
-static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
-static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
+static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup);
+static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel);
+static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup);
+static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
+static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
#if CREATE_RAW_FILE
-static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
+static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
#endif
/*
file:eaa1229002aac2d8e46c54589296ebf6f029ebff -> file:714687633dc8d8055048e58ebee20ed6808c9d47
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -117,8 +117,8 @@ static ssize_t toneport_set_led_green(st
return count;
}
-static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read, toneport_set_led_red);
-static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read, toneport_set_led_green);
+static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_red);
+static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_green);
static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2)
file:f9d96984733a8b71aa116d0895037968e3d0c457 -> file:12af54da4636a7e62cab7464920e42e5ba7b4e41
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -366,17 +366,17 @@ static ssize_t variax_set_raw2(struct de
#endif
/* Variax workbench special files: */
-static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, variax_set_model);
-static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, variax_set_volume);
-static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone);
+static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model);
+static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume);
+static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone);
static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write);
static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write);
static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write);
-static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, variax_set_active);
+static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active);
#if CREATE_RAW_FILE
-static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
-static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2);
+static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
+static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
#endif
file:4ce399b6d237546961b826734ca8c77003915eb4 -> file:c39a25f500ef9d011436e444cf7d397f8cb7d252
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -2181,6 +2181,7 @@ int panel_init(void)
if (pprt) {
parport_release(pprt);
parport_unregister_device(pprt);
+ pprt = NULL;
}
parport_unregister_driver(&panel_driver);
printk(KERN_ERR "Panel driver version " PANEL_VERSION
@@ -2230,6 +2231,7 @@ static void __exit panel_cleanup_module(
/* TODO: free all input signals */
parport_release(pprt);
parport_unregister_device(pprt);
+ pprt = NULL;
}
parport_unregister_driver(&panel_driver);
}
file:cd07059b25b51dc29da0651b769f5e57f743e803 -> file:c30773b5c59e715c9f4714a88339c0fe4db5bc47
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -356,8 +356,12 @@ HwHSSIThreeWire(
}
udelay(10);
}
- if (TryCnt == TC_3W_POLL_MAX_TRY_CNT)
- panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp);
+ if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) {
+ printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:"
+ " %#X RE|WE bits are not clear!!\n", u1bTmp);
+ dump_stack();
+ return 0;
+ }
// RTL8187S HSSI Read/Write Function
u1bTmp = read_nic_byte(dev, RF_SW_CONFIG);
@@ -397,13 +401,23 @@ HwHSSIThreeWire(
int idx;
int ByteCnt = nDataBufBitCnt / 8;
//printk("%d\n",nDataBufBitCnt);
- if ((nDataBufBitCnt % 8) != 0)
- panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n",
- nDataBufBitCnt);
-
- if (nDataBufBitCnt > 64)
- panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n",
- nDataBufBitCnt);
+ if ((nDataBufBitCnt % 8) != 0) {
+ printk(KERN_ERR "rtl8187se: "
+ "HwThreeWire(): nDataBufBitCnt(%d)"
+ " should be multiple of 8!!!\n",
+ nDataBufBitCnt);
+ dump_stack();
+ nDataBufBitCnt += 8;
+ nDataBufBitCnt &= ~7;
+ }
+
+ if (nDataBufBitCnt > 64) {
+ printk(KERN_ERR "rtl8187se: HwThreeWire():"
+ " nDataBufBitCnt(%d) should <= 64!!!\n",
+ nDataBufBitCnt);
+ dump_stack();
+ nDataBufBitCnt = 64;
+ }
for(idx = 0; idx < ByteCnt; idx++)
{
file:a2566f1075d557014e4bcd443fb02ed36202dc72 -> file:af3832b03e4b95cb12394ea0ed6b5232d091c9ac
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -38,21 +38,13 @@ static int event_handler(struct usbip_de
ud->eh_ops.shutdown(ud);
ud->event &= ~USBIP_EH_SHUTDOWN;
-
- break;
}
- /* Stop the error handler. */
- if (ud->event & USBIP_EH_BYE)
- return -1;
-
/* Reset the device. */
if (ud->event & USBIP_EH_RESET) {
ud->eh_ops.reset(ud);
ud->event &= ~USBIP_EH_RESET;
-
- break;
}
/* Mark the device as unusable. */
@@ -60,13 +52,11 @@ static int event_handler(struct usbip_de
ud->eh_ops.unusable(ud);
ud->event &= ~USBIP_EH_UNUSABLE;
-
- break;
}
- /* NOTREACHED */
- printk(KERN_ERR "%s: unknown event\n", __func__);
- return -1;
+ /* Stop the error handler. */
+ if (ud->event & USBIP_EH_BYE)
+ return -1;
}
return 0;
file:6e91fc2bd850a2bf9c3e2f11f40fb935b93df873 -> file:c2018029059c5ce35b17a1965b443f586c1ceec7
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -163,6 +163,8 @@ void rh_port_disconnect(int rhport)
* spin_unlock(&vdev->ud.lock); */
spin_unlock_irqrestore(&the_controller->lock, flags);
+
+ usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
}
file:574e0b0a9c28387e2b6e1181c8ef0b8ab8c0a1ca -> file:a078f6f50d7046ac1aa89f1026931055b1ec5a33
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -767,9 +767,14 @@ static int wpa_set_associate(PSDevice pD
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- if (param->u.wpa_associate.wpa_ie &&
- copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len) {
+ if (!param->u.wpa_associate.wpa_ie)
+ return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+ return -EINVAL;
+ if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+ return -EFAULT;
+ }
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
file:d171b563e94c7e2bb10c092834799a434b6b8956 -> file:ae2c0c06dd371bd9ebf6af87c2267c37a73c321b
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2259,7 +2259,7 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot);
+static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
static ssize_t read_human_status(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2322,7 +2322,7 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, read_human_status, NULL);
+static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2354,7 +2354,7 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL);
+static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
#define UEA_ATTR(name, reset) \
\
file:0e64037a8de14d59e2c3454adbb1988443b66c60 -> file:e3017c46d5ec1db2427d9525e7041eccffbbb90c
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -971,7 +971,8 @@ static int acm_probe(struct usb_interfac
}
if (!buflen) {
- if (intf->cur_altsetting->endpoint->extralen &&
+ if (intf->cur_altsetting->endpoint &&
+ intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
dev_dbg(&intf->dev,
"Seeking extra descriptors on endpoint\n");
@@ -1464,6 +1465,17 @@ err_out:
}
#endif /* CONFIG_PM */
+
+#define NOKIA_PCSUITE_ACM_INFO(x) \
+ USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+ USB_CDC_ACM_PROTO_VENDOR)
+
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+ USB_CDC_ACM_PROTO_VENDOR)
+
/*
* USB driver structure.
*/
@@ -1521,6 +1533,76 @@ static struct usb_device_id acm_ids[] =
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+
+ /* Nokia S60 phones expose two ACM channels. The first is
+ * a modem and is picked up by the standard AT-command
+ * information below. The second is 'vendor-specific' but
+ * is treated as a serial device at the S60 end, so we want
+ * to expose it on Linux too. */
+ { NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
+ { NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
+ { NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
+ { NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
+ { NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
+ { NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
+ { NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i */
+ { NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
+ { NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
+ { NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic & */
+ { NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
+ { NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
+ { NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
+ { NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
+ { NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
+ { NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */
+ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
+ { NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
+ { NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
+ { NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
+ { NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3 */
+ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
+ { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
+ { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
+ { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
+ { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
+ { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
+ { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
+
+ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+
+ /* control interfaces without any protocol set */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ USB_CDC_PROTO_NONE) },
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
@@ -1536,7 +1618,6 @@ static struct usb_device_id acm_ids[] =
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
- /* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
{ }
};
file:2f12e2da0e980cc112bad471ce3d877a44a1d426 -> file:582aa87c4b0db444c16a376629c90adc6f77e704
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -946,10 +946,11 @@ static int proc_getdriver(struct dev_sta
static int proc_connectinfo(struct dev_state *ps, void __user *arg)
{
- struct usbdevfs_connectinfo ci;
+ struct usbdevfs_connectinfo ci = {
+ .devnum = ps->dev->devnum,
+ .slow = ps->dev->speed == USB_SPEED_LOW
+ };
- ci.devnum = ps->dev->devnum;
- ci.slow = ps->dev->speed == USB_SPEED_LOW;
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
file:222ee07ea680f303d9ce0d5b94daaeaa9aa22c1d -> file:8164ba53cab79f266462f8b8d3d655de2a31e470
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
int usb_register_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
- int retval = -EINVAL;
+ int retval;
int minor_base = class_driver->minor_base;
- int minor = 0;
+ int minor;
char name[20];
char *temp;
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interfac
*/
minor_base = 0;
#endif
- intf->minor = -1;
-
- dbg ("looking for a minor, starting at %d", minor_base);
if (class_driver->fops == NULL)
- goto exit;
+ return -EINVAL;
+ if (intf->minor >= 0)
+ return -EADDRINUSE;
+
+ retval = init_usb_class();
+ if (retval)
+ return retval;
+
+ dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
down_write(&minor_rwsem);
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interfac
continue;
usb_minors[minor] = class_driver->fops;
-
- retval = 0;
+ intf->minor = minor;
break;
}
up_write(&minor_rwsem);
-
- if (retval)
- goto exit;
-
- retval = init_usb_class();
- if (retval)
- goto exit;
-
- intf->minor = minor;
+ if (intf->minor < 0)
+ return -EXFULL;
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interfac
"%s", temp);
if (IS_ERR(intf->usb_dev)) {
down_write(&minor_rwsem);
- usb_minors[intf->minor] = NULL;
+ usb_minors[minor] = NULL;
+ intf->minor = -1;
up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
-exit:
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
file:ed3aa7a91afe3f6ac30a14404af7dea9a2ba595c -> file:12254e1f4659454bf6a0f3709cfa4da2ad91a833
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/usb/quirks.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -1768,7 +1769,6 @@ int usb_new_device(struct usb_device *ud
if (udev->parent)
usb_autoresume_device(udev->parent);
- usb_detect_quirks(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
@@ -2821,13 +2821,16 @@ hub_port_init (struct usb_hub *hub, stru
else
i = udev->descriptor.bMaxPacketSize0;
if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
- if (udev->speed != USB_SPEED_FULL ||
+ if (udev->speed == USB_SPEED_LOW ||
!(i == 8 || i == 16 || i == 32 || i == 64)) {
- dev_err(&udev->dev, "ep0 maxpacket = %d\n", i);
+ dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
retval = -EMSGSIZE;
goto fail;
}
- dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+ if (udev->speed == USB_SPEED_FULL)
+ dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+ else
+ dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
@@ -3063,6 +3066,10 @@ static void hub_port_connect_change(stru
if (status < 0)
goto loop;
+ usb_detect_quirks(udev);
+ if (udev->quirks & USB_QUIRK_DELAY_INIT)
+ msleep(1000);
+
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
* a "powered" LED, users should notice we didn't enable it
file:980a8d27fa50688c48585bfecb2cf849b21c0f34 -> file:409cc94a1331f0dfe0f42f420947b406bf65b1af
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1185,13 +1185,6 @@ void usb_disable_device(struct usb_devic
{
int i;
- dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
- skip_ep0 ? "non-ep0" : "all");
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
-
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
@@ -1221,6 +1214,13 @@ void usb_disable_device(struct usb_devic
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
}
+
+ dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
+ skip_ep0 ? "non-ep0" : "all");
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, true);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
}
/**
@@ -1792,6 +1792,7 @@ free_interfaces:
intf->dev.groups = usb_interface_groups;
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+ intf->minor = -1;
device_initialize(&intf->dev);
mark_quiesced(intf);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
file:a61f1607f90e017d88bca76f9c27c9678f7689d5 -> file:80b062b28ce57720bcfd16ae73023332822b1e4c
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -38,6 +38,9 @@ static const struct usb_device_id usb_qu
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Harmony 700-series */
+ { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
file:0885d4abdc6265d0b7775da9b9f5503fc2be0dff -> file:da9a2b83567092c52c1123a300e0b16a713e3929
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -137,6 +137,16 @@ void usb_anchor_urb(struct urb *urb, str
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);
+/* Callers must hold anchor->lock */
+static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
+{
+ urb->anchor = NULL;
+ list_del(&urb->anchor_list);
+ usb_put_urb(urb);
+ if (list_empty(&anchor->urb_list))
+ wake_up(&anchor->wait);
+}
+
/**
* usb_unanchor_urb - unanchors an URB
* @urb: pointer to the urb to anchor
@@ -156,17 +166,14 @@ void usb_unanchor_urb(struct urb *urb)
return;
spin_lock_irqsave(&anchor->lock, flags);
- if (unlikely(anchor != urb->anchor)) {
- /* we've lost the race to another thread */
- spin_unlock_irqrestore(&anchor->lock, flags);
- return;
- }
- urb->anchor = NULL;
- list_del(&urb->anchor_list);
+ /*
+ * At this point, we could be competing with another thread which
+ * has the same intention. To protect the urb from being unanchored
+ * twice, only the winner of the race gets the job.
+ */
+ if (likely(anchor == urb->anchor))
+ __usb_unanchor_urb(urb, anchor);
spin_unlock_irqrestore(&anchor->lock, flags);
- usb_put_urb(urb);
- if (list_empty(&anchor->urb_list))
- wake_up(&anchor->wait);
}
EXPORT_SYMBOL_GPL(usb_unanchor_urb);
@@ -725,20 +732,11 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
- unsigned long flags;
- spin_lock_irqsave(&anchor->lock, flags);
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- usb_get_urb(victim);
- spin_unlock_irqrestore(&anchor->lock, flags);
- /* this will unanchor the URB */
+ while ((victim = usb_get_from_anchor(anchor)) != NULL) {
usb_unlink_urb(victim);
usb_put_urb(victim);
- spin_lock_irqsave(&anchor->lock, flags);
}
- spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
@@ -775,12 +773,11 @@ struct urb *usb_get_from_anchor(struct u
victim = list_entry(anchor->urb_list.next, struct urb,
anchor_list);
usb_get_urb(victim);
- spin_unlock_irqrestore(&anchor->lock, flags);
- usb_unanchor_urb(victim);
+ __usb_unanchor_urb(victim, anchor);
} else {
- spin_unlock_irqrestore(&anchor->lock, flags);
victim = NULL;
}
+ spin_unlock_irqrestore(&anchor->lock, flags);
return victim;
}
@@ -802,12 +799,7 @@ void usb_scuttle_anchored_urbs(struct us
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev, struct urb,
anchor_list);
- usb_get_urb(victim);
- spin_unlock_irqrestore(&anchor->lock, flags);
- /* this may free the URB */
- usb_unanchor_urb(victim);
- usb_put_urb(victim);
- spin_lock_irqsave(&anchor->lock, flags);
+ __usb_unanchor_urb(victim, anchor);
}
spin_unlock_irqrestore(&anchor->lock, flags);
}
file:4e970cf0e29ae364b34c5e7436906e95db313952 -> file:946cbcfbcfbf6173a124e4c1676c6b245edf55ad
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -2013,6 +2013,9 @@ static int __init usba_udc_probe(struct
} else {
disable_irq(gpio_to_irq(udc->vbus_pin));
}
+ } else {
+ /* gpio_request fail so use -EINVAL for gpio_is_valid */
+ udc->vbus_pin = -EINVAL;
}
}
file:48267bc0b2e00c151bf05e8ed6caa2662e1de9a5 -> file:33ac6acbdb780f0886a546fc3b31ed5dd9ab31ef
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -291,9 +291,13 @@ gen_ndis_query_resp (int configNr, u32 O
/* mandatory */
case OID_GEN_VENDOR_DESCRIPTION:
pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
- length = strlen (rndis_per_dev_params [configNr].vendorDescr);
- memcpy (outbuf,
- rndis_per_dev_params [configNr].vendorDescr, length);
+ if ( rndis_per_dev_params [configNr].vendorDescr ) {
+ length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+ memcpy (outbuf,
+ rndis_per_dev_params [configNr].vendorDescr, length);
+ } else {
+ outbuf[0] = 0;
+ }
retval = 0;
break;
file:5aeabd8bda513a09388d96a3b2922d90d736903c -> file:8198fc0e4ac6b6592585bb17addd2698900d1535
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1008,10 +1008,11 @@ rescan:
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
- /* periodic qh self-unlinks on empty */
- if (!tmp)
- goto nogood;
- unlink_async (ehci, qh);
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ unlink_async(ehci, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
@@ -1028,7 +1029,6 @@ idle_timeout:
}
/* else FALL THROUGH */
default:
-nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
file:36f96da129f5c637208031ec04913be3f0cc6e67 -> file:ab26c2be366aa8836c7f7bee692df25cf995fd47
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct of_device *
}
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
- return 0;
+ if (rv)
+ goto err_ehci;
+ return 0;
+
+err_ehci:
+ if (ehci->has_amcc_usb23)
+ iounmap(ehci->ohci_hcctrl_reg);
iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- if (ehci->has_amcc_usb23)
- iounmap(ehci->ohci_hcctrl_reg);
err_rmr:
usb_put_hcd(hcd);
file:317e8dc7c4f30513d61e511e8e486fe239928e29 -> file:6416a0fca012b0ad80366559ce7a34028d4bdaf3
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -124,7 +124,7 @@ static void next_trb(struct xhci_hcd *xh
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
- *trb = (*trb)++;
+ (*trb)++;
}
}
file:5720bfef6a389ada6709be2ffac3aca06a1d969a -> file:49deeb6c3b528135ed8fc207a101e12e857f26e7
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -195,11 +195,9 @@ static ssize_t get_port1_handler(struct
return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
}
-static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO,
- get_port0_handler, set_port0_handler);
+static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
-static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO,
- get_port1_handler, set_port1_handler);
+static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
static int cypress_probe(struct usb_interface *interface,
file:e75bb87ee92b87f39d67b883bb22f4f77fce3a79 -> file:02ff0405d746b87aa8e3423d173bf0de1112d3d2
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -552,6 +552,7 @@ static long iowarrior_ioctl(struct file
/* needed for power consumption */
struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
+ memset(&info, 0, sizeof(info));
/* directly from the descriptor */
info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
info.product = dev->product_id;
file:be41ec35c655fb2610499416bf4fa4d3ce223532 -> file:cafbd15142754977a377806864628650ca5421ac
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned
#else
x.sisusb_conactive = 0;
#endif
+ memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
if (copy_to_user((void __user *)arg, &x, sizeof(x)))
retval = -EFAULT;
file:2e14102955c5936def7394c9faea1306696f18bb -> file:d509dcb29b38a5e7579856ba9057dd974c02d717
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -85,7 +85,7 @@ static ssize_t set_speed(struct device *
return count;
}
-static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed);
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
static int tv_probe(struct usb_interface *interface,
const struct usb_device_id *id)
file:06cb71942dc7ee87e52c56c1be0410d64a707be4 -> file:9650de93a3d40c6e67dce43f845bb393fab819be
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -94,7 +94,7 @@ static ssize_t set_##value(struct device
change_color(led); \
return count; \
} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value);
show_set(blue);
show_set(red);
show_set(green);
file:3db255537e7913d2de5d0b03781fc20768af2a91 -> file:cd8726c30444d7098230db70ef3cf0d9677c82a3
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -185,7 +185,7 @@ static ssize_t set_attr_##name(struct de
\
return count; \
} \
-static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name);
static ssize_t show_attr_text(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -216,7 +216,7 @@ static ssize_t set_attr_text(struct devi
return count;
}
-static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text);
+static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text);
static ssize_t show_attr_decimals(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -265,8 +265,7 @@ static ssize_t set_attr_decimals(struct
return count;
}
-static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO,
- show_attr_decimals, set_attr_decimals);
+static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals);
static ssize_t show_attr_textmode(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -312,8 +311,7 @@ static ssize_t set_attr_textmode(struct
return -EINVAL;
}
-static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO,
- show_attr_textmode, set_attr_textmode);
+static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
file:a9f06d76960ffa936858adaf321d5c53d39605dd -> file:d6a2ef374d832447442db0207fd9a19967c94fd1
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1382,7 +1382,6 @@ static void iso_callback (struct urb *ur
break;
}
}
- simple_free_urb (urb);
ctx->pending--;
if (ctx->pending == 0) {
@@ -1499,6 +1498,7 @@ test_iso_queue (struct usbtest_dev *dev,
}
simple_free_urb (urbs [i]);
+ urbs[i] = NULL;
context.pending--;
context.submit_error = 1;
break;
@@ -1508,6 +1508,10 @@ test_iso_queue (struct usbtest_dev *dev,
wait_for_completion (&context.done);
+ for (i = 0; i < param->sglen; i++) {
+ if (urbs[i])
+ simple_free_urb(urbs[i]);
+ }
/*
* Isochronous transfers are expected to fail sometimes. As an
* arbitrary limit, we will report an error if any submissions
file:10f3205798e880ce4f1351735666bac903e34398 -> file:9231b25d725a0b18e0208d55d914e31ac850304d
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -971,7 +971,7 @@ static int mon_bin_ioctl(struct inode *i
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
- mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
+ mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
kfree(rp->b_vec);
rp->b_vec = vec;
rp->b_size = size;
file:fcec87ea709e6d682f4c045d79c19c2c4a1fbcda -> file:51e8f0f734a0028e0f82f78e6e29a3bde26c79d4
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -248,8 +248,10 @@ int __init musb_platform_init(struct mus
usb_nop_xceiv_register();
musb->xceiv = otg_get_transceiver();
- if (!musb->xceiv)
+ if (!musb->xceiv) {
+ gpio_free(musb->config->gpio_vrsel);
return -ENODEV;
+ }
if (ANOMALY_05000346) {
bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
file:74073f9a43f09cafc6afed82e5a9cadd17304d62 -> file:c6f5ee4575cf26ce8ab8d5fcdcee8aa04648c905
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -577,11 +577,19 @@ static void rxstate(struct musb *musb, s
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned fifo_count = 0;
- u16 len = musb_ep->packet_sz;
+ u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
+
+ len = musb_ep->packet_sz;
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -749,9 +757,15 @@ void musb_g_rx(struct musb *musb, u8 epn
u16 csr;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
@@ -1074,7 +1088,7 @@ struct free_record {
/*
* Context: controller locked, IRQs blocked.
*/
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
DBG(3, "<== %s request %p len %u on hw_ep%d\n",
req->tx ? "TX/IN" : "RX/OUT",
file:59502da9f739ce179bca5faa549147d6c34ad078 -> file:76711f2a451bce034013e1c4168031acef6004a9
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_g_giveback(struct musb_
extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
#endif /* __MUSB_GADGET_H */
file:067e5a95b1491334db8fdbc76554f2f0aae634e3 -> file:53c04448a8b89dac89e4f4764ee63837a03fac13
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -369,6 +369,7 @@ stall:
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
+ struct musb_request *request;
void __iomem *regs;
int is_in;
u16 csr;
@@ -411,6 +412,14 @@ stall:
csr);
}
+ /* Maybe start the first request in the queue */
+ request = to_musb_request(
+ next_request(musb_ep));
+ if (!musb_ep->busy && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
/* select ep0 again */
musb_ep_select(mbase, 0);
handled = 1;
file:e3e087efa944dcea2971e3bf1256cd7d3763862e -> file:9f8f0d0443c73001bf4397fcfb19046415e0c136
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static int cp210x_carrier_raised(struct
static int debug;
static struct usb_device_id id_table [] = {
+ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
@@ -90,6 +91,7 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -111,6 +113,7 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -124,9 +127,14 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
+ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
@@ -219,8 +227,8 @@ static struct usb_serial_driver cp210x_d
#define BITS_STOP_2 0x0002
/* CP210X_SET_BREAK */
-#define BREAK_ON 0x0000
-#define BREAK_OFF 0x0001
+#define BREAK_ON 0x0001
+#define BREAK_OFF 0x0000
/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
file:8de85729b3ab2fe0c4da90da7ac8ac3e8e36975b -> file:d5556349db3b46044cc53ef0810d3d78c6e7eeaa
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -162,6 +162,9 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
@@ -179,9 +182,11 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -201,6 +206,7 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
+ { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
@@ -676,7 +682,6 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
{ USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
- { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -698,6 +703,7 @@ static struct usb_device_id id_table_com
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -717,8 +723,37 @@ static struct usb_device_id id_table_com
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
+
+ /* Papouch devices based on FTDI chip */
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) },
+
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
@@ -752,6 +787,23 @@ static struct usb_device_id id_table_com
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+ { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) },
+ { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+ { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1406,7 +1458,7 @@ static void ftdi_set_max_packet_size(str
}
/* set max packet size based on descriptor */
- priv->max_packet_size = ep_desc->wMaxPacketSize;
+ priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -2315,8 +2367,6 @@ static void ftdi_set_termios(struct tty_
"urb failed to set to rts/cts flow control\n");
}
- /* raise DTR/RTS */
- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
} else {
/*
* Xon/Xoff code
@@ -2364,8 +2414,6 @@ static void ftdi_set_termios(struct tty_
}
}
- /* lower DTR/RTS */
- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
return;
}
file:af54af2a65f28e10d2956a21734586f7026fc29c -> file:a807d497799b25191dc3225abdf0540297c6cc16
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -40,6 +40,11 @@
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+/* US Interface Navigator (http://www.usinterface.com/) */
+#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
+#define FTDI_USINT_WKEY_PID 0xb811 /* Navigator WKEY and FSK lines */
+#define FTDI_USINT_RS232_PID 0xb812 /* Navigator RS232 and CONFIG lines */
+
/* OOCDlink by Joern Kaipf <joernk@web.de>
* (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -56,6 +61,7 @@
#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+#define FTDI_OPENDCC_GBM_PID 0xBFDC
/*
* RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
@@ -105,6 +111,12 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+/* Lenz LI-USB Computer Interface. */
+#define FTDI_LENZ_LIUSB_PID 0xD780
+
+/* Vardaan Enterprises Serial Interface VEUSB422R3 */
+#define FTDI_VARDAAN_PID 0xF070
+
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
@@ -127,6 +139,18 @@
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
/*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
+#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
+#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
+#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
+#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
+#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
+#define FTDI_CHAMSYS_WING_PID 0xDAFF
+
+/*
* Westrex International devices submitted by Cory Lee
*/
#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
@@ -707,6 +731,7 @@
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
/*
* Bayer Ascensia Contour blood glucose meter USB-converter cable.
@@ -991,6 +1016,12 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
+ * Ionics PlugComputer
+ */
+#define IONICS_VID 0x1c0c
+#define IONICS_PLUGCOMPUTER_PID 0x0102
+
+/*
* Dresden Elektronic Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
@@ -1003,9 +1034,34 @@
*/
#define PAPOUCH_VID 0x5050 /* Vendor ID */
+#define PAPOUCH_SB485_PID 0x0100 /* Papouch SB485 USB-485/422 Converter */
+#define PAPOUCH_AP485_PID 0x0101 /* AP485 USB-RS485 Converter */
+#define PAPOUCH_SB422_PID 0x0102 /* Papouch SB422 USB-RS422 Converter */
+#define PAPOUCH_SB485_2_PID 0x0103 /* Papouch SB485 USB-485/422 Converter */
+#define PAPOUCH_AP485_2_PID 0x0104 /* AP485 USB-RS485 Converter */
+#define PAPOUCH_SB422_2_PID 0x0105 /* Papouch SB422 USB-RS422 Converter */
+#define PAPOUCH_SB485S_PID 0x0106 /* Papouch SB485S USB-485/422 Converter */
+#define PAPOUCH_SB485C_PID 0x0107 /* Papouch SB485C USB-485/422 Converter */
+#define PAPOUCH_LEC_PID 0x0300 /* LEC USB Converter */
+#define PAPOUCH_SB232_PID 0x0301 /* Papouch SB232 USB-RS232 Converter */
#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
-#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
+#define PAPOUCH_IRAMP_PID 0x0500 /* Papouch IRAmp Duplex */
+#define PAPOUCH_DRAK5_PID 0x0700 /* Papouch DRAK5 */
+#define PAPOUCH_QUIDO8x8_PID 0x0800 /* Papouch Quido 8/8 Module */
+#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Papouch Quido 4/4 Module */
+#define PAPOUCH_QUIDO2x2_PID 0x0a00 /* Papouch Quido 2/2 Module */
+#define PAPOUCH_QUIDO10x1_PID 0x0b00 /* Papouch Quido 10/1 Module */
+#define PAPOUCH_QUIDO30x3_PID 0x0c00 /* Papouch Quido 30/3 Module */
+#define PAPOUCH_QUIDO60x3_PID 0x0d00 /* Papouch Quido 60(100)/3 Module */
+#define PAPOUCH_QUIDO2x16_PID 0x0e00 /* Papouch Quido 2/16 Module */
+#define PAPOUCH_QUIDO3x32_PID 0x0f00 /* Papouch Quido 3/32 Module */
+#define PAPOUCH_DRAK6_PID 0x1000 /* Papouch DRAK6 */
+#define PAPOUCH_UPSUSB_PID 0x8000 /* Papouch UPS-USB adapter */
+#define PAPOUCH_MU_PID 0x8001 /* MU controller */
+#define PAPOUCH_SIMUKEY_PID 0x8002 /* Papouch SimuKey */
#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
+#define PAPOUCH_GMUX_PID 0x8004 /* Papouch GOLIATH MUX */
+#define PAPOUCH_GMSR_PID 0x8005 /* Papouch GOLIATH MSR */
/*
* Marvell SheevaPlug
@@ -1039,3 +1095,26 @@
#define XVERVE_SIGNALYZER_SH2_PID 0xBCA2
#define XVERVE_SIGNALYZER_SH4_PID 0xBCA4
+/*
+ * Segway Robotic Mobility Platform USB interface (using VID 0x0403)
+ * Submitted by John G. Rogers
+ */
+#define SEGWAY_RMP200_PID 0xe729
+
+
+/*
+ * Accesio USB Data Acquisition products (http://www.accesio.com/)
+ */
+#define ACCESIO_COM4SM_PID 0xD578
+
+/* www.sciencescope.co.uk educational dataloggers */
+#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
+#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
+#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
+
+/*
+ * Milkymist One JTAG/Serial
+ */
+#define QIHARDWARE_VID 0x20B7
+#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
+
file:d4cc0f7af400d0133653e4e0515b25a131e2a37f -> file:fbdbac5ae410a5ebade345e42d7cd025bf54b4e8
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1157,7 +1157,7 @@ static int download_fw(struct edgeport_s
/* Check if we have an old version in the I2C and
update if necessary */
- if (download_cur_ver != download_new_ver) {
+ if (download_cur_ver < download_new_ver) {
dbg("%s - Update I2C dld from %d.%d to %d.%d",
__func__,
firmware_version->Ver_Major,
file:763e32a44be02eb790c0c7f66ed25e0bae947f19 -> file:f3a73e7b948ce4c1809993a1fa32233b12c1228d
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1466,6 +1466,9 @@ static int mos7720_ioctl(struct tty_stru
case TIOCGICOUNT:
cnow = mos7720_port->icount;
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
file:a861cd2cb9318fd4692413686eb6117b50b7b588 -> file:9fdcee2eca99c3941f2bc92f83c7045d35e2a92c
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -120,15 +120,20 @@
* by making a change here, in moschip_port_id_table, and in
* moschip_id_table_combined
*/
-#define USB_VENDOR_ID_BANDB 0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define USB_VENDOR_ID_BANDB 0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
+#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
+#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
+#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
+#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +189,18 @@ static struct usb_device_id moschip_port
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
@@ -200,13 +210,18 @@ static __devinitdata struct usb_device_i
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
@@ -280,12 +295,19 @@ static int mos7840_get_reg_sync(struct u
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ *val = buf[0];
dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
- *val = (*val) & 0x00ff;
+
+ kfree(buf);
return ret;
}
@@ -338,6 +360,11 @@ static int mos7840_get_uart_reg(struct u
struct usb_device *dev = port->serial->dev;
int ret = 0;
__u16 Wval;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
/* dbg("application number is %4x",
(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -361,9 +388,11 @@ static int mos7840_get_uart_reg(struct u
}
}
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
- *val = (*val) & 0x00ff;
+ *val = buf[0];
+
+ kfree(buf);
return ret;
}
@@ -2258,6 +2287,9 @@ static int mos7840_ioctl(struct tty_stru
case TIOCGICOUNT:
cnow = mos7840_port->icount;
smp_rmb();
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
file:5ceaa4c6be0901de3cbe2305ae3775904fec701c -> file:061a083856738d711bf94902bc30c85e01e30bb8
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -24,6 +24,7 @@ static int debug;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
+ { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
file:80f59b6350cbdcee5a9864ef51990c51e2309ad4 -> file:db7cf086604623c8d5b8b9aafea21d63e7e04170
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -99,8 +99,8 @@ static void opticon_bulk_callback(struct
available_room = tty_buffer_request_room(tty,
data_length);
if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
+ tty_insert_flip_string(tty, data + 2,
+ data_length);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
@@ -134,7 +134,7 @@ exit:
priv->bulk_address),
priv->bulk_in_buffer, priv->buffer_size,
opticon_bulk_callback, priv);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
file:25860233012747c32b735998cef1c0de9638c51f -> file:c2e69839ec5f7713fc1681ada83ac93a196324cc
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -165,7 +165,10 @@ static int option_resume(struct usb_ser
#define HUAWEI_PRODUCT_E143D 0x143D
#define HUAWEI_PRODUCT_E143E 0x143E
#define HUAWEI_PRODUCT_E143F 0x143F
+#define HUAWEI_PRODUCT_K4505 0x1464
+#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_E14AC 0x14AC
+#define HUAWEI_PRODUCT_ETS1220 0x1803
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -372,6 +375,10 @@ static int option_resume(struct usb_ser
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+/* Celot products */
+#define CELOT_VENDOR_ID 0x211f
+#define CELOT_PRODUCT_CT680M 0x6801
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -469,7 +476,10 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -572,6 +582,7 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
@@ -583,38 +594,52 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
+ /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
+ /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
@@ -830,6 +855,8 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -868,10 +895,9 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-
{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1007,6 +1033,13 @@ static int option_probe(struct usb_seria
serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
return -ENODEV;
+ /* Don't bind network interfaces on Huawei K3765 & K4505 */
+ if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
+ (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
+ serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) &&
+ serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
+ return -ENODEV;
+
data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
file:9ec1a49e23622a6787d93ec3280f62ce14aac93e -> file:ecb17081e003e3bebab25e1eeb5bf9810b3f0964
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -95,6 +95,7 @@ static struct usb_device_id id_table []
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ } /* Terminating entry */
file:d640dc951568dc705c3d7c2ae22ea7321d9afe11 -> file:01bc64b3eef301978553e59d2099c3d14451b58c
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -127,6 +127,10 @@
#define CRESSI_VENDOR_ID 0x04b8
#define CRESSI_EDY_PRODUCT_ID 0x0521
+/* Zeagle dive computer interface */
+#define ZEAGLE_VENDOR_ID 0x04b8
+#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
+
/* Sony, USB data cable for CMD-Jxx mobile phones */
#define SONY_VENDOR_ID 0x054c
#define SONY_QN3USB_PRODUCT_ID 0x0437
file:4395c4100ec2096313933d029e8b25191eefe6c9 -> file:38e3c3aa05a181a45101368bb681eb65b7df451d
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -120,7 +120,7 @@ static ssize_t show_truinst(struct devic
}
return result;
}
-static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
+static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
int sierra_ms_init(struct us_data *us)
{
file:6615ac7fa60a6124e31257a2ce1b09cf871ed719 -> file:5e20e6ec17197649b54db2f3f5cda0557dfdfd52
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -196,12 +196,12 @@ static int backlight_suspend(struct devi
{
struct backlight_device *bd = to_backlight_device(dev);
- if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
- mutex_lock(&bd->ops_lock);
+ mutex_lock(&bd->ops_lock);
+ if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
bd->props.state |= BL_CORE_SUSPENDED;
backlight_update_status(bd);
- mutex_unlock(&bd->ops_lock);
}
+ mutex_unlock(&bd->ops_lock);
return 0;
}
@@ -210,12 +210,12 @@ static int backlight_resume(struct devic
{
struct backlight_device *bd = to_backlight_device(dev);
- if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
- mutex_lock(&bd->ops_lock);
+ mutex_lock(&bd->ops_lock);
+ if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
bd->props.state &= ~BL_CORE_SUSPENDED;
backlight_update_status(bd);
- mutex_unlock(&bd->ops_lock);
}
+ mutex_unlock(&bd->ops_lock);
return 0;
}
file:d25df51bb0d2b3160efe478fb4e64f968014ea04 -> file:c0a446560ab406f0184a8571204c9b5ba50e4194
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/dmi.h>
-
+#include <linux/pci.h>
#include <video/vga.h>
static struct fb_var_screeninfo efifb_defined __initdata = {
@@ -39,16 +39,31 @@ enum {
M_I20, /* 20-Inch iMac */
M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
M_MB, /* MacBook */
M_MB_2, /* MacBook, 2nd rev. */
M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
M_MBA, /* MacBook Air */
M_MBP, /* MacBook Pro */
M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
M_MBP_SR, /* MacBook Pro (Santa Rosa) */
M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
M_UNKNOWN /* placeholder */
};
@@ -63,13 +78,28 @@ static struct efifb_dmi_info {
[M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
[M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
[M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+ [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+ [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
[M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+ [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+ [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
[M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+ [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
[M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
[M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+ [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+ [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+ [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
};
@@ -90,7 +120,12 @@ static struct dmi_system_id __initdata d
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -99,13 +134,23 @@ static struct dmi_system_id __initdata d
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
{},
};
@@ -113,7 +158,7 @@ static int set_system(const struct dmi_s
{
struct efifb_dmi_info *info = id->driver_data;
if (info->base == 0)
- return -ENODEV;
+ return 0;
printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
"(%dx%d, stride %d)\n", id->ident,
@@ -121,18 +166,55 @@ static int set_system(const struct dmi_s
info->stride);
/* Trust the bootloader over the DMI tables */
- if (screen_info.lfb_base == 0)
+ if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+ int found_bar = 0;
+#endif
screen_info.lfb_base = info->base;
- if (screen_info.lfb_linelength == 0)
- screen_info.lfb_linelength = info->stride;
- if (screen_info.lfb_width == 0)
- screen_info.lfb_width = info->width;
- if (screen_info.lfb_height == 0)
- screen_info.lfb_height = info->height;
- if (screen_info.orig_video_isVGA == 0)
- screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
- return 0;
+#if defined(CONFIG_PCI)
+ /* make sure that the address in the table is actually on a
+ * VGA device's PCI BAR */
+
+ for_each_pci_dev(dev) {
+ int i;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ start = pci_resource_start(dev, i);
+ if (start == 0)
+ break;
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
+ }
+ }
+ }
+ if (!found_bar)
+ screen_info.lfb_base = 0;
+#endif
+ }
+ if (screen_info.lfb_base) {
+ if (screen_info.lfb_linelength == 0)
+ screen_info.lfb_linelength = info->stride;
+ if (screen_info.lfb_width == 0)
+ screen_info.lfb_width = info->width;
+ if (screen_info.lfb_height == 0)
+ screen_info.lfb_height = info->height;
+ if (screen_info.orig_video_isVGA == 0)
+ screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+ } else {
+ screen_info.lfb_linelength = 0;
+ screen_info.lfb_width = 0;
+ screen_info.lfb_height = 0;
+ screen_info.orig_video_isVGA = 0;
+ return 0;
+ }
+ return 1;
}
static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
file:a4e05e4d7501256e5311a1a2fc7e808c3d05f1f5 -> file:e1836d7140a8cd10938d36f32a50fd8fa5b2581e
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *i
break;
case FBIOGET_VBLANK:
+
+ memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
sisvbblank.count = 0;
sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
file:4cd50497264d3c65d092e1ca9c4fd55d2a7d0e22 -> file:3803745d6eee75068f072929c13db42f10eaa1eb
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(stru
static int __devinit e3d_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct device_node *of_node;
+ const char *device_type;
struct fb_info *info;
struct e3d_info *ep;
unsigned int line_length;
int err;
+ of_node = pci_device_to_OF_node(pdev);
+ if (!of_node) {
+ printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
+ pci_name(pdev));
+ return -ENODEV;
+ }
+
+ device_type = of_get_property(of_node, "device_type", NULL);
+ if (!device_type) {
+ printk(KERN_INFO "e3d: Ignoring secondary output device "
+ "at %s\n", pci_name(pdev));
+ return -ENODEV;
+ }
+
err = pci_enable_device(pdev);
if (err < 0) {
printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
@@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(st
ep->info = info;
ep->pdev = pdev;
spin_lock_init(&ep->lock);
- ep->of_node = pci_device_to_OF_node(pdev);
- if (!ep->of_node) {
- printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
- pci_name(pdev));
- err = -ENODEV;
- goto err_release_fb;
- }
+ ep->of_node = of_node;
/* Read the PCI base register of the frame buffer, which we
* need in order to interpret the RAMDAC_VID_*FB* values in
file:9d4f3a49ba4a5038a333c940351dd85cf5df0deb -> file:12b5990bf456fdf7e04b9311cc2b3552b1127e68
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -277,11 +277,12 @@ static int hw_bitblt_2(void __iomem *eng
writel(tmp, engine + 0x1C);
}
- if (op != VIA_BITBLT_COLOR)
+ if (op == VIA_BITBLT_FILL) {
+ writel(fg_color, engine + 0x58);
+ } else if (op == VIA_BITBLT_MONO) {
writel(fg_color, engine + 0x4C);
-
- if (op == VIA_BITBLT_MONO)
writel(bg_color, engine + 0x50);
+ }
if (op == VIA_BITBLT_FILL)
ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
file:da03c074e32aad8b909c4b11c57a4c5a9f9d9285 -> file:4d553d0b8d7a450b9337a9b4375856c5b304217c
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long ar
{
struct viafb_ioctl_info viainfo;
+ memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
+
viainfo.viafb_id = VIAID;
viainfo.vendor_id = PCI_VIA_VENDOR_ID;
file:2376f688ec8b69addc7c4f0439bd62e127d8482b -> file:5a1dad2964d635d85d17d5fa907d2dd512b1dbb4
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -857,9 +857,9 @@ unsigned long w100fb_gpio_read(int port)
void w100fb_gpio_write(int port, unsigned long value)
{
if (port==W100_GPIO_PORT_A)
- value = writel(value, remapped_regs + mmGPIO_DATA);
+ writel(value, remapped_regs + mmGPIO_DATA);
else
- value = writel(value, remapped_regs + mmGPIO_DATA2);
+ writel(value, remapped_regs + mmGPIO_DATA2);
}
EXPORT_SYMBOL(w100fb_gpio_read);
EXPORT_SYMBOL(w100fb_gpio_write);
file:ce602dd09bc18c60381c68f0f1310bb168817bed -> file:141701598b3ec8d5def730020d805e82f4e8ccf5
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -106,6 +106,7 @@ static inline unsigned long *cpu_evtchn_
#define VALID_EVTCHN(chn) ((chn) != 0)
static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_percpu_chip;
/* Constructor for packed IRQ information. */
static struct irq_info mk_unbound_info(void)
@@ -254,7 +255,7 @@ static void init_evtchn_cpu_bindings(voi
}
#endif
- memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
+ memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
}
static inline void clear_evtchn(int port)
@@ -362,7 +363,7 @@ int bind_evtchn_to_irq(unsigned int evtc
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "event");
+ handle_edge_irq, "event");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
@@ -388,8 +389,8 @@ static int bind_ipi_to_irq(unsigned int
if (irq < 0)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "ipi");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "ipi");
bind_ipi.vcpu = cpu;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -429,8 +430,8 @@ static int bind_virq_to_irq(unsigned int
irq = find_unbound_irq();
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "virq");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -535,6 +536,7 @@ int bind_ipi_to_irqhandler(enum ipi_vect
if (irq < 0)
return irq;
+ irqflags |= IRQF_NO_SUSPEND;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -928,6 +930,16 @@ static struct irq_chip xen_dynamic_chip
.retrigger = retrigger_dynirq,
};
+static struct irq_chip xen_percpu_chip __read_mostly = {
+ .name = "xen-percpu",
+
+ .disable = disable_dynirq,
+ .mask = disable_dynirq,
+ .unmask = enable_dynirq,
+
+ .ack = ack_dynirq,
+};
+
void __init xen_init_IRQ(void)
{
int i;
file:f4ca0c7eb51cb12899132cb755679d9b362a6b18 -> file:aeabd95f698fdda0db494fbd0d99248409c2912a
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -140,7 +140,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin
fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
# Directories which we _might_ need to create, so we have a rule for them.
-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
+firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@)
cmd_mkdir = mkdir -p $@
file:02a2c9340573cf5946e0083187353d4517319844 -> file:b84a7695358dd2a4cb8afcd03a8b07a07e407eac
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1639,6 +1639,9 @@ SYSCALL_DEFINE3(io_submit, aio_context_t
if (unlikely(nr < 0))
return -EINVAL;
+ if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
+ nr = LONG_MAX/sizeof(*iocbpp);
+
if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
return -EFAULT;
file:c4e83537ead77501f03d7fcb3a748727f2624d5e -> file:42b60b04ea061fc01419b2330899f307e4bed895
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -723,7 +723,7 @@ static int __init init_misc_binfmt(void)
{
int err = register_filesystem(&bm_fs_type);
if (!err) {
- err = register_binfmt(&misc_format);
+ err = insert_binfmt(&misc_format);
if (err)
unregister_filesystem(&bm_fs_type);
}
file:e0c9e71cc40422db3051e09741e10d1a9e738661 -> file:e696713687c5a5d50790e306e6824fc392fa6f4a
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -371,6 +371,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask,
{
struct bio *bio;
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
gfp_mask);
if (unlikely(!bio))
@@ -701,8 +704,12 @@ static void bio_free_map_data(struct bio
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
gfp_t gfp_mask)
{
- struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
+ struct bio_map_data *bmd;
+ if (iov_count > UIO_MAXIOV)
+ return NULL;
+
+ bmd = kmalloc(sizeof(*bmd), gfp_mask);
if (!bmd)
return NULL;
@@ -831,6 +838,12 @@ struct bio *bio_copy_user_iov(struct req
end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
start = uaddr >> PAGE_SHIFT;
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
+
nr_pages += end - start;
len += iov[i].iov_len;
}
@@ -958,6 +971,12 @@ static struct bio *__bio_map_user_iov(st
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
+
nr_pages += end - start;
/*
* buffer must be aligned to at least hardsector size for now
@@ -985,7 +1004,7 @@ static struct bio *__bio_map_user_iov(st
unsigned long start = uaddr >> PAGE_SHIFT;
const int local_nr_pages = end - start;
const int page_limit = cur_page + local_nr_pages;
-
+
ret = get_user_pages_fast(uaddr, local_nr_pages,
write_to_vm, &pages[cur_page]);
if (ret < local_nr_pages) {
file:9b9e3dc56926029ec85bc6218a9c921a6ffe32c8 -> file:e65efa2f267a89f04cd69262eb0cef53e4646253
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1175,10 +1175,12 @@ static int __blkdev_get(struct block_dev
/*
* hooks: /n/, see "layering violations".
*/
- ret = devcgroup_inode_permission(bdev->bd_inode, perm);
- if (ret != 0) {
- bdput(bdev);
- return ret;
+ if (!for_part) {
+ ret = devcgroup_inode_permission(bdev->bd_inode, perm);
+ if (ret != 0) {
+ bdput(bdev);
+ return ret;
+ }
}
lock_kernel();
file:38ebe789b5628e9ddb6a5076b523612b5b988476 -> file:12d7be8df561dfd93c0a3c86f7c6c7f44887dab8
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -94,7 +94,8 @@ static int btrfs_xattr_get_acl(struct in
/*
* Needs to be called with fs_mutex held
*/
-static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+static int btrfs_set_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct posix_acl *acl, int type)
{
int ret, size = 0;
const char *name;
@@ -111,12 +112,14 @@ static int btrfs_set_acl(struct inode *i
switch (type) {
case ACL_TYPE_ACCESS:
mode = inode->i_mode;
- ret = posix_acl_equiv_mode(acl, &mode);
- if (ret < 0)
- return ret;
- ret = 0;
- inode->i_mode = mode;
name = POSIX_ACL_XATTR_ACCESS;
+ if (acl) {
+ ret = posix_acl_equiv_mode(acl, &mode);
+ if (ret < 0)
+ return ret;
+ inode->i_mode = mode;
+ }
+ ret = 0;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
@@ -140,8 +143,7 @@ static int btrfs_set_acl(struct inode *i
goto out;
}
- ret = __btrfs_setxattr(inode, name, value, size, 0);
-
+ ret = __btrfs_setxattr(trans, inode, name, value, size, 0);
out:
kfree(value);
@@ -154,7 +156,7 @@ out:
static int btrfs_xattr_set_acl(struct inode *inode, int type,
const void *value, size_t size)
{
- int ret = 0;
+ int ret;
struct posix_acl *acl = NULL;
if (!is_owner_or_cap(inode))
@@ -170,7 +172,7 @@ static int btrfs_xattr_set_acl(struct in
}
}
- ret = btrfs_set_acl(inode, acl, type);
+ ret = btrfs_set_acl(NULL, inode, acl, type);
posix_acl_release(acl);
@@ -224,7 +226,8 @@ int btrfs_check_acl(struct inode *inode,
* stuff has been fixed to work with that. If the locking stuff changes, we
* need to re-evaluate the acl locking stuff.
*/
-int btrfs_init_acl(struct inode *inode, struct inode *dir)
+int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
int ret = 0;
@@ -249,7 +252,8 @@ int btrfs_init_acl(struct inode *inode,
mode_t mode;
if (S_ISDIR(inode->i_mode)) {
- ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT);
+ ret = btrfs_set_acl(trans, inode, acl,
+ ACL_TYPE_DEFAULT);
if (ret)
goto failed;
}
@@ -264,10 +268,11 @@ int btrfs_init_acl(struct inode *inode,
inode->i_mode = mode;
if (ret > 0) {
/* we need an acl */
- ret = btrfs_set_acl(inode, clone,
+ ret = btrfs_set_acl(trans, inode, clone,
ACL_TYPE_ACCESS);
}
}
+ posix_acl_release(clone);
}
failed:
posix_acl_release(acl);
@@ -297,7 +302,7 @@ int btrfs_acl_chmod(struct inode *inode)
ret = posix_acl_chmod_masq(clone, inode->i_mode);
if (!ret)
- ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS);
+ ret = btrfs_set_acl(NULL, inode, clone, ACL_TYPE_ACCESS);
posix_acl_release(clone);
@@ -323,7 +328,8 @@ int btrfs_acl_chmod(struct inode *inode)
return 0;
}
-int btrfs_init_acl(struct inode *inode, struct inode *dir)
+int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
return 0;
}
file:f6783a42f010965e344fddc68c2689a7d4a9fc14 -> file:3f1f50d9d916cf5e3c095101a3af0c220035f625
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,9 +44,6 @@ struct btrfs_inode {
*/
struct extent_io_tree io_failure_tree;
- /* held while inesrting or deleting extents from files */
- struct mutex extent_mutex;
-
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
@@ -166,7 +163,7 @@ static inline struct btrfs_inode *BTRFS_
static inline void btrfs_i_size_write(struct inode *inode, u64 size)
{
- inode->i_size = size;
+ i_size_write(inode, size);
BTRFS_I(inode)->disk_i_size = size;
}
file:ec96f3a6d536640919dd25a08c7ed22e4423ef15 -> file:c4bc570a396eebf5ede775dc39b004c9ff493227
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -37,6 +37,11 @@ static int balance_node_right(struct btr
struct extent_buffer *src_buf);
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
+static int setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr);
+
struct btrfs_path *btrfs_alloc_path(void)
{
@@ -451,9 +456,8 @@ static noinline int __btrfs_cow_block(st
extent_buffer_get(cow);
spin_unlock(&root->node_lock);
- btrfs_free_extent(trans, root, buf->start, buf->len,
- parent_start, root->root_key.objectid,
- level, 0);
+ btrfs_free_tree_block(trans, root, buf->start, buf->len,
+ parent_start, root->root_key.objectid, level);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
@@ -468,9 +472,8 @@ static noinline int __btrfs_cow_block(st
btrfs_set_node_ptr_generation(parent, parent_slot,
trans->transid);
btrfs_mark_buffer_dirty(parent);
- btrfs_free_extent(trans, root, buf->start, buf->len,
- parent_start, root->root_key.objectid,
- level, 0);
+ btrfs_free_tree_block(trans, root, buf->start, buf->len,
+ parent_start, root->root_key.objectid, level);
}
if (unlock_orig)
btrfs_tree_unlock(buf);
@@ -1030,8 +1033,8 @@ static noinline int balance_level(struct
btrfs_tree_unlock(mid);
/* once for the path */
free_extent_buffer(mid);
- ret = btrfs_free_extent(trans, root, mid->start, mid->len,
- 0, root->root_key.objectid, level, 1);
+ ret = btrfs_free_tree_block(trans, root, mid->start, mid->len,
+ 0, root->root_key.objectid, level);
/* once for the root ptr */
free_extent_buffer(mid);
return ret;
@@ -1095,10 +1098,10 @@ static noinline int balance_level(struct
1);
if (wret)
ret = wret;
- wret = btrfs_free_extent(trans, root, bytenr,
- blocksize, 0,
- root->root_key.objectid,
- level, 0);
+ wret = btrfs_free_tree_block(trans, root,
+ bytenr, blocksize, 0,
+ root->root_key.objectid,
+ level);
if (wret)
ret = wret;
} else {
@@ -1143,9 +1146,8 @@ static noinline int balance_level(struct
wret = del_ptr(trans, root, path, level + 1, pslot);
if (wret)
ret = wret;
- wret = btrfs_free_extent(trans, root, bytenr, blocksize,
- 0, root->root_key.objectid,
- level, 0);
+ wret = btrfs_free_tree_block(trans, root, bytenr, blocksize,
+ 0, root->root_key.objectid, level);
if (wret)
ret = wret;
} else {
@@ -2997,75 +2999,85 @@ again:
return ret;
}
-/*
- * This function splits a single item into two items,
- * giving 'new_key' to the new item and splitting the
- * old one at split_offset (from the start of the item).
- *
- * The path may be released by this operation. After
- * the split, the path is pointing to the old item. The
- * new item is going to be in the same node as the old one.
- *
- * Note, the item being split must be smaller enough to live alone on
- * a tree block with room for one extra struct btrfs_item
- *
- * This allows us to split the item in place, keeping a lock on the
- * leaf the entire time.
- */
-int btrfs_split_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *new_key,
- unsigned long split_offset)
+static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, int ins_len)
{
- u32 item_size;
+ struct btrfs_key key;
struct extent_buffer *leaf;
- struct btrfs_key orig_key;
- struct btrfs_item *item;
- struct btrfs_item *new_item;
- int ret = 0;
- int slot;
- u32 nritems;
- u32 orig_offset;
- struct btrfs_disk_key disk_key;
- char *buf;
+ struct btrfs_file_extent_item *fi;
+ u64 extent_len = 0;
+ u32 item_size;
+ int ret;
leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
- if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
- goto split;
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
+ key.type != BTRFS_EXTENT_CSUM_KEY);
+
+ if (btrfs_leaf_free_space(root, leaf) >= ins_len)
+ return 0;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ if (key.type == BTRFS_EXTENT_DATA_KEY) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_len = btrfs_file_extent_num_bytes(leaf, fi);
+ }
btrfs_release_path(root, path);
- path->search_for_split = 1;
path->keep_locks = 1;
-
- ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
+ path->search_for_split = 1;
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
path->search_for_split = 0;
+ if (ret < 0)
+ goto err;
+ ret = -EAGAIN;
+ leaf = path->nodes[0];
/* if our item isn't there or got smaller, return now */
- if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
- path->slots[0])) {
- path->keep_locks = 0;
- return -EAGAIN;
+ if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
+ goto err;
+
+ if (key.type == BTRFS_EXTENT_DATA_KEY) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
+ goto err;
}
btrfs_set_path_blocking(path);
- ret = split_leaf(trans, root, &orig_key, path,
- sizeof(struct btrfs_item), 1);
- path->keep_locks = 0;
+ ret = split_leaf(trans, root, &key, path, ins_len, 1);
BUG_ON(ret);
+ path->keep_locks = 0;
btrfs_unlock_up_safe(path, 1);
+ return 0;
+err:
+ path->keep_locks = 0;
+ return ret;
+}
+
+static noinline int split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key,
+ unsigned long split_offset)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_item *item;
+ struct btrfs_item *new_item;
+ int slot;
+ char *buf;
+ u32 nritems;
+ u32 item_size;
+ u32 orig_offset;
+ struct btrfs_disk_key disk_key;
+
leaf = path->nodes[0];
BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
-split:
- /*
- * make sure any changes to the path from split_leaf leave it
- * in a blocking state
- */
btrfs_set_path_blocking(path);
item = btrfs_item_nr(leaf, path->slots[0]);
@@ -3073,19 +3085,19 @@ split:
item_size = btrfs_item_size(leaf, item);
buf = kmalloc(item_size, GFP_NOFS);
+ if (!buf)
+ return -ENOMEM;
+
read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
path->slots[0]), item_size);
- slot = path->slots[0] + 1;
- leaf = path->nodes[0];
+ slot = path->slots[0] + 1;
nritems = btrfs_header_nritems(leaf);
-
if (slot != nritems) {
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
- btrfs_item_nr_offset(slot),
- (nritems - slot) * sizeof(struct btrfs_item));
-
+ btrfs_item_nr_offset(slot),
+ (nritems - slot) * sizeof(struct btrfs_item));
}
btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -3113,16 +3125,81 @@ split:
item_size - split_offset);
btrfs_mark_buffer_dirty(leaf);
- ret = 0;
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
- BUG();
- }
+ BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
kfree(buf);
+ return 0;
+}
+
+/*
+ * This function splits a single item into two items,
+ * giving 'new_key' to the new item and splitting the
+ * old one at split_offset (from the start of the item).
+ *
+ * The path may be released by this operation. After
+ * the split, the path is pointing to the old item. The
+ * new item is going to be in the same node as the old one.
+ *
+ * Note, the item being split must be smaller enough to live alone on
+ * a tree block with room for one extra struct btrfs_item
+ *
+ * This allows us to split the item in place, keeping a lock on the
+ * leaf the entire time.
+ */
+int btrfs_split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key,
+ unsigned long split_offset)
+{
+ int ret;
+ ret = setup_leaf_for_split(trans, root, path,
+ sizeof(struct btrfs_item));
+ if (ret)
+ return ret;
+
+ ret = split_item(trans, root, path, new_key, split_offset);
return ret;
}
/*
+ * This function duplicate a item, giving 'new_key' to the new item.
+ * It guarantees both items live in the same tree leaf and the new item
+ * is contiguous with the original item.
+ *
+ * This allows us to split file extent in place, keeping a lock on the
+ * leaf the entire time.
+ */
+int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key)
+{
+ struct extent_buffer *leaf;
+ int ret;
+ u32 item_size;
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ ret = setup_leaf_for_split(trans, root, path,
+ item_size + sizeof(struct btrfs_item));
+ if (ret)
+ return ret;
+
+ path->slots[0]++;
+ ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
+ item_size, item_size +
+ sizeof(struct btrfs_item), 1);
+ BUG_ON(ret);
+
+ leaf = path->nodes[0];
+ memcpy_extent_buffer(leaf,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
+ item_size);
+ return 0;
+}
+
+/*
* make the item pointed to by the path smaller. new_size indicates
* how small to make it, and from_end tells us if we just chop bytes
* off the end of the item or if we shift the item to chop bytes off
@@ -3714,8 +3791,8 @@ static noinline int btrfs_del_leaf(struc
*/
btrfs_unlock_up_safe(path, 0);
- ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
- 0, root->root_key.objectid, 0, 0);
+ ret = btrfs_free_tree_block(trans, root, leaf->start, leaf->len,
+ 0, root->root_key.objectid, 0);
return ret;
}
/*
file:444b3e9b92a4b88391fd610a76346530436baa01 -> file:9f806dd04c2704899bf28e5324eebaa14a923e27
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -310,6 +310,9 @@ struct btrfs_header {
#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) - \
sizeof(struct btrfs_file_extent_item))
+#define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
+ sizeof(struct btrfs_item) -\
+ sizeof(struct btrfs_dir_item))
/*
@@ -859,8 +862,9 @@ struct btrfs_fs_info {
struct mutex ordered_operations_mutex;
struct rw_semaphore extent_commit_sem;
- struct rw_semaphore subvol_sem;
+ struct rw_semaphore cleanup_work_sem;
+ struct rw_semaphore subvol_sem;
struct srcu_struct subvol_srcu;
struct list_head trans_list;
@@ -868,6 +872,9 @@ struct btrfs_fs_info {
struct list_head dead_roots;
struct list_head caching_block_groups;
+ spinlock_t delayed_iput_lock;
+ struct list_head delayed_iputs;
+
atomic_t nr_async_submits;
atomic_t async_submit_draining;
atomic_t nr_async_bios;
@@ -1034,12 +1041,12 @@ struct btrfs_root {
int ref_cows;
int track_dirty;
int in_radix;
+ int clean_orphans;
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
int defrag_running;
- int defrag_level;
char *name;
int in_sysfs;
@@ -1975,6 +1982,10 @@ struct extent_buffer *btrfs_alloc_free_b
u64 parent, u64 root_objectid,
struct btrfs_disk_key *key, int level,
u64 hint, u64 empty_size);
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u32 blocksize,
+ u64 parent, u64 root_objectid, int level);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u32 blocksize,
@@ -2089,6 +2100,10 @@ int btrfs_split_item(struct btrfs_trans_
struct btrfs_path *path,
struct btrfs_key *new_key,
unsigned long split_offset);
+int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key);
int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_path *p, int
ins_len, int cow);
@@ -2196,9 +2211,10 @@ int btrfs_delete_one_dir_name(struct btr
struct btrfs_path *path,
struct btrfs_dir_item *di);
int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- u16 name_len, const void *data, u16 data_len,
- u64 dir);
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid,
+ const char *name, u16 name_len,
+ const void *data, u16 data_len);
struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
@@ -2292,7 +2308,7 @@ int btrfs_truncate_inode_items(struct bt
struct inode *inode, u64 new_size,
u32 min_type);
-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
+int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
@@ -2332,6 +2348,8 @@ int btrfs_orphan_del(struct btrfs_trans_
void btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t size);
int btrfs_invalidate_inodes(struct btrfs_root *root);
+void btrfs_add_delayed_iput(struct inode *inode);
+void btrfs_run_delayed_iputs(struct btrfs_root *root);
extern const struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
@@ -2345,12 +2363,9 @@ int btrfs_drop_extent_cache(struct inode
int skip_pinned);
int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
extern const struct file_operations btrfs_file_operations;
-int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- u64 start, u64 end, u64 locked_end,
- u64 inline_limit, u64 *hint_block, int drop_cache);
+int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
+ u64 start, u64 end, u64 *hint_byte, int drop_cache);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
@@ -2380,7 +2395,8 @@ int btrfs_check_acl(struct inode *inode,
#else
#define btrfs_check_acl NULL
#endif
-int btrfs_init_acl(struct inode *inode, struct inode *dir);
+int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode);
/* relocation.c */
file:f3a6075519ccc1d96e42157f95769a5ad6641a12 -> file:e9103b3baa49f4309c94eca9e0d7b7069665359c
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -68,12 +68,12 @@ static struct btrfs_dir_item *insert_wit
* into the tree
*/
int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- u16 name_len, const void *data, u16 data_len,
- u64 dir)
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid,
+ const char *name, u16 name_len,
+ const void *data, u16 data_len)
{
int ret = 0;
- struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
unsigned long name_ptr, data_ptr;
struct btrfs_key key, location;
@@ -81,15 +81,11 @@ int btrfs_insert_xattr_item(struct btrfs
struct extent_buffer *leaf;
u32 data_size;
- key.objectid = dir;
+ BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
+
+ key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
key.offset = btrfs_name_hash(name, name_len);
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- if (name_len + data_len + sizeof(struct btrfs_dir_item) >
- BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item))
- return -ENOSPC;
data_size = sizeof(*dir_item) + name_len + data_len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
@@ -117,7 +113,6 @@ int btrfs_insert_xattr_item(struct btrfs
write_extent_buffer(leaf, data, data_ptr, data_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_free_path(path);
return ret;
}
file:02b6afbd745020fb51bf43d88c93025459687e12 -> file:2b59201b955ca533bcb10a251df07ff2d7c549d2
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -892,6 +892,8 @@ static int __setup_root(u32 nodesize, u3
root->stripesize = stripesize;
root->ref_cows = 0;
root->track_dirty = 0;
+ root->in_radix = 0;
+ root->clean_orphans = 0;
root->fs_info = fs_info;
root->objectid = objectid;
@@ -928,7 +930,6 @@ static int __setup_root(u32 nodesize, u3
root->defrag_trans_start = fs_info->generation;
init_completion(&root->kobj_unregister);
root->defrag_running = 0;
- root->defrag_level = 0;
root->root_key.objectid = objectid;
root->anon_super.s_root = NULL;
root->anon_super.s_dev = 0;
@@ -980,12 +981,12 @@ int btrfs_free_log_root_tree(struct btrf
while (1) {
ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY);
+ 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
if (ret)
break;
- clear_extent_dirty(&log_root_tree->dirty_log_pages,
- start, end, GFP_NOFS);
+ clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
+ EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
eb = fs_info->log_root_tree->node;
@@ -1210,8 +1211,10 @@ again:
ret = radix_tree_insert(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
root);
- if (ret == 0)
+ if (ret == 0) {
root->in_radix = 1;
+ root->clean_orphans = 1;
+ }
spin_unlock(&fs_info->fs_roots_radix_lock);
radix_tree_preload_end();
if (ret) {
@@ -1225,10 +1228,6 @@ again:
ret = btrfs_find_dead_roots(fs_info->tree_root,
root->root_key.objectid);
WARN_ON(ret);
-
- if (!(fs_info->sb->s_flags & MS_RDONLY))
- btrfs_orphan_cleanup(root);
-
return root;
fail:
free_fs_root(root);
@@ -1477,6 +1476,7 @@ static int cleaner_kthread(void *arg)
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
mutex_trylock(&root->fs_info->cleaner_mutex)) {
+ btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
}
@@ -1606,6 +1606,7 @@ struct btrfs_root *open_ctree(struct sup
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
+ INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->hashers);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
INIT_LIST_HEAD(&fs_info->ordered_operations);
@@ -1614,6 +1615,7 @@ struct btrfs_root *open_ctree(struct sup
spin_lock_init(&fs_info->new_trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
+ spin_lock_init(&fs_info->delayed_iput_lock);
init_completion(&fs_info->kobj_unregister);
fs_info->tree_root = tree_root;
@@ -1689,6 +1691,7 @@ struct btrfs_root *open_ctree(struct sup
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
init_rwsem(&fs_info->extent_commit_sem);
+ init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
@@ -1979,7 +1982,12 @@ struct btrfs_root *open_ctree(struct sup
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_recover_relocation(tree_root);
- BUG_ON(ret);
+ if (ret < 0) {
+ printk(KERN_WARNING
+ "btrfs: failed to recover relocation\n");
+ err = -EINVAL;
+ goto fail_trans_kthread;
+ }
}
location.objectid = BTRFS_FS_TREE_OBJECTID;
@@ -1990,6 +1998,12 @@ struct btrfs_root *open_ctree(struct sup
if (!fs_info->fs_root)
goto fail_trans_kthread;
+ if (!(sb->s_flags & MS_RDONLY)) {
+ down_read(&fs_info->cleanup_work_sem);
+ btrfs_orphan_cleanup(fs_info->fs_root);
+ up_read(&fs_info->cleanup_work_sem);
+ }
+
return tree_root;
fail_trans_kthread:
@@ -2386,8 +2400,14 @@ int btrfs_commit_super(struct btrfs_root
int ret;
mutex_lock(&root->fs_info->cleaner_mutex);
+ btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
+
+ /* wait until ongoing cleanup work done */
+ down_write(&root->fs_info->cleanup_work_sem);
+ up_write(&root->fs_info->cleanup_work_sem);
+
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret);
file:94627c4cc193343aacc82b3aca1a6e9eaaefd8dd -> file:559f72489b3bf02b4477369da854bf371cbbd0e4
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs
return (cache->flags & bits) == bits;
}
+void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+{
+ atomic_inc(&cache->count);
+}
+
+void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->count))
+ kfree(cache);
+}
+
/*
* this adds the block group to the fs_info rb tree for the block group
* cache
@@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btr
}
}
if (ret)
- atomic_inc(&ret->count);
+ btrfs_get_block_group(ret);
spin_unlock(&info->block_group_cache_lock);
return ret;
@@ -195,6 +206,14 @@ static int exclude_super_stripes(struct
int stripe_len;
int i, nr, ret;
+ if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
+ stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
+ cache->bytes_super += stripe_len;
+ ret = add_excluded_extent(root, cache->key.objectid,
+ stripe_len);
+ BUG_ON(ret);
+ }
+
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
@@ -255,7 +274,7 @@ static u64 add_new_free_space(struct btr
if (ret)
break;
- if (extent_start == start) {
+ if (extent_start <= start) {
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
@@ -399,6 +418,8 @@ err:
put_caching_control(caching_ctl);
atomic_dec(&block_group->space_info->caching_threads);
+ btrfs_put_block_group(block_group);
+
return 0;
}
@@ -439,6 +460,7 @@ static int cache_block_group(struct btrf
up_write(&fs_info->extent_commit_sem);
atomic_inc(&cache->space_info->caching_threads);
+ btrfs_get_block_group(cache);
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
cache->key.objectid);
@@ -478,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lo
return cache;
}
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
-{
- if (atomic_dec_and_test(&cache->count))
- kfree(cache);
-}
-
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
@@ -2574,7 +2590,7 @@ next_block_group(struct btrfs_root *root
if (node) {
cache = rb_entry(node, struct btrfs_block_group_cache,
cache_node);
- atomic_inc(&cache->count);
+ btrfs_get_block_group(cache);
} else
cache = NULL;
spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -2880,9 +2896,9 @@ static noinline void flush_delalloc_asyn
root = async->root;
info = async->info;
- btrfs_start_delalloc_inodes(root);
+ btrfs_start_delalloc_inodes(root, 0);
wake_up(&info->flush_wait);
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_wait_ordered_extents(root, 0, 0);
spin_lock(&info->lock);
info->flushing = 0;
@@ -2956,8 +2972,8 @@ static void flush_delalloc(struct btrfs_
return;
flush:
- btrfs_start_delalloc_inodes(root);
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0, 0);
spin_lock(&info->lock);
info->flushing = 0;
@@ -3454,14 +3470,6 @@ static int update_block_group(struct btr
else
old_val -= num_bytes;
btrfs_set_super_bytes_used(&info->super_copy, old_val);
-
- /* block accounting for root item */
- old_val = btrfs_root_used(&root->root_item);
- if (alloc)
- old_val += num_bytes;
- else
- old_val -= num_bytes;
- btrfs_set_root_used(&root->root_item, old_val);
spin_unlock(&info->delalloc_lock);
while (total) {
@@ -4049,6 +4057,21 @@ int btrfs_free_extent(struct btrfs_trans
return ret;
}
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u32 blocksize,
+ u64 parent, u64 root_objectid, int level)
+{
+ u64 used;
+ spin_lock(&root->node_lock);
+ used = btrfs_root_used(&root->root_item) - blocksize;
+ btrfs_set_root_used(&root->root_item, used);
+ spin_unlock(&root->node_lock);
+
+ return btrfs_free_extent(trans, root, bytenr, blocksize,
+ parent, root_objectid, level, 0);
+}
+
static u64 stripe_align(struct btrfs_root *root, u64 val)
{
u64 mask = ((u64)root->stripesize - 1);
@@ -4212,7 +4235,7 @@ search:
u64 offset;
int cached;
- atomic_inc(&block_group->count);
+ btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
have_block_group:
@@ -4300,7 +4323,7 @@ have_block_group:
btrfs_put_block_group(block_group);
block_group = last_ptr->block_group;
- atomic_inc(&block_group->count);
+ btrfs_get_block_group(block_group);
spin_unlock(&last_ptr->lock);
spin_unlock(&last_ptr->refill_lock);
@@ -4578,7 +4601,6 @@ int btrfs_reserve_extent(struct btrfs_tr
{
int ret;
u64 search_start = 0;
- struct btrfs_fs_info *info = root->fs_info;
data = btrfs_get_alloc_profile(root, data);
again:
@@ -4586,17 +4608,9 @@ again:
* the only place that sets empty_size is btrfs_realloc_node, which
* is not called recursively on allocations
*/
- if (empty_size || root->ref_cows) {
- if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
- ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- 2 * 1024 * 1024,
- BTRFS_BLOCK_GROUP_METADATA |
- (info->metadata_alloc_profile &
- info->avail_metadata_alloc_bits), 0);
- }
+ if (empty_size || root->ref_cows)
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes + 2 * 1024 * 1024, data, 0);
- }
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -4897,6 +4911,14 @@ static int alloc_tree_block(struct btrfs
extent_op);
BUG_ON(ret);
}
+
+ if (root_objectid == root->root_key.objectid) {
+ u64 used;
+ spin_lock(&root->node_lock);
+ used = btrfs_root_used(&root->root_item) + num_bytes;
+ btrfs_set_root_used(&root->root_item, used);
+ spin_unlock(&root->node_lock);
+ }
return ret;
}
@@ -4919,8 +4941,16 @@ struct extent_buffer *btrfs_init_new_buf
btrfs_set_buffer_uptodate(buf);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
- set_extent_dirty(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ /*
+ * we allow two log transactions at a time, use different
+ * EXENT bit to differentiate dirty pages.
+ */
+ if (root->log_transid % 2 == 0)
+ set_extent_dirty(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1, GFP_NOFS);
+ else
+ set_extent_new(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1, GFP_NOFS);
} else {
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
@@ -5372,10 +5402,6 @@ static noinline int walk_down_tree(struc
int ret;
while (level >= 0) {
- if (path->slots[level] >=
- btrfs_header_nritems(path->nodes[level]))
- break;
-
ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0)
break;
@@ -5383,6 +5409,10 @@ static noinline int walk_down_tree(struc
if (level == 0)
break;
+ if (path->slots[level] >=
+ btrfs_header_nritems(path->nodes[level]))
+ break;
+
ret = do_walk_down(trans, root, path, wc, &lookup_info);
if (ret > 0) {
path->slots[level]++;
@@ -7373,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs
wait_block_group_cache_done(block_group);
btrfs_remove_free_space_cache(block_group);
-
- WARN_ON(atomic_read(&block_group->count) != 1);
- kfree(block_group);
+ btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
}
file:96577e8bf9fdb62819ab2dbd5f9da91200624596 -> file:b177ed3196126d9fe8e5d0f1507075a421e535b9
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffe
spin_unlock(&tree->buffer_lock);
goto free_eb;
}
- spin_unlock(&tree->buffer_lock);
-
/* add one reference for the tree */
atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
return eb;
free_eb:
file:06550affbd27ea9181bd08db8d1ea18d9e8f42c0 -> file:a7fd9f3a750abe05ebd586e6db1dfda729a64aff
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -179,18 +179,14 @@ int btrfs_drop_extent_cache(struct inode
}
flags = em->flags;
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
- if (em->start <= start &&
- (!testend || em->start + em->len >= start + len)) {
+ if (testend && em->start + em->len >= start + len) {
free_extent_map(em);
write_unlock(&em_tree->lock);
break;
}
- if (start < em->start) {
- len = em->start - start;
- } else {
+ start = em->start + em->len;
+ if (testend)
len = start + len - (em->start + em->len);
- start = em->start + em->len;
- }
free_extent_map(em);
write_unlock(&em_tree->lock);
continue;
@@ -265,324 +261,253 @@ int btrfs_drop_extent_cache(struct inode
* If an extent intersects the range but is not entirely inside the range
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
- *
- * inline_limit is used to tell this code which offsets in the file to keep
- * if they contain inline extents.
*/
-noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- u64 start, u64 end, u64 locked_end,
- u64 inline_limit, u64 *hint_byte, int drop_cache)
+int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
+ u64 start, u64 end, u64 *hint_byte, int drop_cache)
{
- u64 extent_end = 0;
- u64 search_start = start;
- u64 ram_bytes = 0;
- u64 disk_bytenr = 0;
- u64 orig_locked_end = locked_end;
- u8 compression;
- u8 encryption;
- u16 other_encoding = 0;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
- struct btrfs_file_extent_item *extent;
+ struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct btrfs_key key;
- struct btrfs_file_extent_item old;
- int keep;
- int slot;
- int bookend;
- int found_type = 0;
- int found_extent;
- int found_inline;
+ struct btrfs_key new_key;
+ u64 search_start = start;
+ u64 disk_bytenr = 0;
+ u64 num_bytes = 0;
+ u64 extent_offset = 0;
+ u64 extent_end = 0;
+ int del_nr = 0;
+ int del_slot = 0;
+ int extent_type;
int recow;
int ret;
- inline_limit = 0;
if (drop_cache)
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+
while (1) {
recow = 0;
- btrfs_release_path(root, path);
ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
search_start, -1);
if (ret < 0)
- goto out;
- if (ret > 0) {
- if (path->slots[0] == 0) {
- ret = 0;
- goto out;
- }
- path->slots[0]--;
+ break;
+ if (ret > 0 && path->slots[0] > 0 && search_start == start) {
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
+ if (key.objectid == inode->i_ino &&
+ key.type == BTRFS_EXTENT_DATA_KEY)
+ path->slots[0]--;
}
+ ret = 0;
next_slot:
- keep = 0;
- bookend = 0;
- found_extent = 0;
- found_inline = 0;
- compression = 0;
- encryption = 0;
- extent = NULL;
leaf = path->nodes[0];
- slot = path->slots[0];
- ret = 0;
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
- key.offset >= end) {
- goto out;
- }
- if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
- key.objectid != inode->i_ino) {
- goto out;
- }
- if (recow) {
- search_start = max(key.offset, start);
- continue;
- }
- if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
- extent = btrfs_item_ptr(leaf, slot,
- struct btrfs_file_extent_item);
- found_type = btrfs_file_extent_type(leaf, extent);
- compression = btrfs_file_extent_compression(leaf,
- extent);
- encryption = btrfs_file_extent_encryption(leaf,
- extent);
- other_encoding = btrfs_file_extent_other_encoding(leaf,
- extent);
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- extent_end =
- btrfs_file_extent_disk_bytenr(leaf,
- extent);
- if (extent_end)
- *hint_byte = extent_end;
-
- extent_end = key.offset +
- btrfs_file_extent_num_bytes(leaf, extent);
- ram_bytes = btrfs_file_extent_ram_bytes(leaf,
- extent);
- found_extent = 1;
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- found_inline = 1;
- extent_end = key.offset +
- btrfs_file_extent_inline_len(leaf, extent);
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ BUG_ON(del_nr > 0);
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ ret = 0;
+ break;
}
+ leaf = path->nodes[0];
+ recow = 1;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid > inode->i_ino ||
+ key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+ break;
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(leaf, fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+ num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ extent_offset = btrfs_file_extent_offset(leaf, fi);
+ extent_end = key.offset +
+ btrfs_file_extent_num_bytes(leaf, fi);
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ extent_end = key.offset +
+ btrfs_file_extent_inline_len(leaf, fi);
} else {
+ WARN_ON(1);
extent_end = search_start;
}
- /* we found nothing we can drop */
- if ((!found_extent && !found_inline) ||
- search_start >= extent_end) {
- int nextret;
- u32 nritems;
- nritems = btrfs_header_nritems(leaf);
- if (slot >= nritems - 1) {
- nextret = btrfs_next_leaf(root, path);
- if (nextret)
- goto out;
- recow = 1;
- } else {
- path->slots[0]++;
- }
+ if (extent_end <= search_start) {
+ path->slots[0]++;
goto next_slot;
}
- if (end <= extent_end && start >= key.offset && found_inline)
- *hint_byte = EXTENT_MAP_INLINE;
+ search_start = max(key.offset, start);
+ if (recow) {
+ btrfs_release_path(root, path);
+ continue;
+ }
- if (found_extent) {
- read_extent_buffer(leaf, &old, (unsigned long)extent,
- sizeof(old));
- }
-
- if (end < extent_end && end >= key.offset) {
- bookend = 1;
- if (found_inline && start <= key.offset)
- keep = 1;
- }
-
- if (bookend && found_extent) {
- if (locked_end < extent_end) {
- ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1,
- GFP_NOFS);
- if (!ret) {
- btrfs_release_path(root, path);
- lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1,
- GFP_NOFS);
- locked_end = extent_end;
- continue;
- }
- locked_end = extent_end;
+ /*
+ * | - range to drop - |
+ * | -------- extent -------- |
+ */
+ if (start > key.offset && end < extent_end) {
+ BUG_ON(del_nr > 0);
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = start;
+ ret = btrfs_duplicate_item(trans, root, path,
+ &new_key);
+ if (ret == -EAGAIN) {
+ btrfs_release_path(root, path);
+ continue;
}
- disk_bytenr = le64_to_cpu(old.disk_bytenr);
- if (disk_bytenr != 0) {
+ if (ret < 0)
+ break;
+
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ extent_offset += start - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - start);
+ btrfs_mark_buffer_dirty(leaf);
+
+ if (disk_bytenr > 0) {
ret = btrfs_inc_extent_ref(trans, root,
- disk_bytenr,
- le64_to_cpu(old.disk_num_bytes), 0,
- root->root_key.objectid,
- key.objectid, key.offset -
- le64_to_cpu(old.offset));
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid,
+ new_key.objectid,
+ start - extent_offset);
BUG_ON(ret);
+ *hint_byte = disk_bytenr;
}
+ key.offset = start;
}
+ /*
+ * | ---- range to drop ----- |
+ * | -------- extent -------- |
+ */
+ if (start <= key.offset && end < extent_end) {
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = end;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
- if (found_inline) {
- u64 mask = root->sectorsize - 1;
- search_start = (extent_end + mask) & ~mask;
- } else
- search_start = extent_end;
-
- /* truncate existing extent */
- if (start > key.offset) {
- u64 new_num;
- u64 old_num;
- keep = 1;
- WARN_ON(start & (root->sectorsize - 1));
- if (found_extent) {
- new_num = start - key.offset;
- old_num = btrfs_file_extent_num_bytes(leaf,
- extent);
- *hint_byte =
- btrfs_file_extent_disk_bytenr(leaf,
- extent);
- if (btrfs_file_extent_disk_bytenr(leaf,
- extent)) {
- inode_sub_bytes(inode, old_num -
- new_num);
- }
- btrfs_set_file_extent_num_bytes(leaf,
- extent, new_num);
- btrfs_mark_buffer_dirty(leaf);
- } else if (key.offset < inline_limit &&
- (end > extent_end) &&
- (inline_limit < extent_end)) {
- u32 new_size;
- new_size = btrfs_file_extent_calc_inline_size(
- inline_limit - key.offset);
- inode_sub_bytes(inode, extent_end -
- inline_limit);
- btrfs_set_file_extent_ram_bytes(leaf, extent,
- new_size);
- if (!compression && !encryption) {
- btrfs_truncate_item(trans, root, path,
- new_size, 1);
- }
- }
- }
- /* delete the entire extent */
- if (!keep) {
- if (found_inline)
- inode_sub_bytes(inode, extent_end -
- key.offset);
- ret = btrfs_del_item(trans, root, path);
- /* TODO update progress marker and return */
- BUG_ON(ret);
- extent = NULL;
- btrfs_release_path(root, path);
- /* the extent will be freed later */
- }
- if (bookend && found_inline && start <= key.offset) {
- u32 new_size;
- new_size = btrfs_file_extent_calc_inline_size(
- extent_end - end);
- inode_sub_bytes(inode, end - key.offset);
- btrfs_set_file_extent_ram_bytes(leaf, extent,
- new_size);
- if (!compression && !encryption)
- ret = btrfs_truncate_item(trans, root, path,
- new_size, 0);
- BUG_ON(ret);
- }
- /* create bookend, splitting the extent in two */
- if (bookend && found_extent) {
- struct btrfs_key ins;
- ins.objectid = inode->i_ino;
- ins.offset = end;
- btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
-
- btrfs_release_path(root, path);
- path->leave_spinning = 1;
- ret = btrfs_insert_empty_item(trans, root, path, &ins,
- sizeof(*extent));
- BUG_ON(ret);
-
- leaf = path->nodes[0];
- extent = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- write_extent_buffer(leaf, &old,
- (unsigned long)extent, sizeof(old));
-
- btrfs_set_file_extent_compression(leaf, extent,
- compression);
- btrfs_set_file_extent_encryption(leaf, extent,
- encryption);
- btrfs_set_file_extent_other_encoding(leaf, extent,
- other_encoding);
- btrfs_set_file_extent_offset(leaf, extent,
- le64_to_cpu(old.offset) + end - key.offset);
- WARN_ON(le64_to_cpu(old.num_bytes) <
- (extent_end - end));
- btrfs_set_file_extent_num_bytes(leaf, extent,
+ extent_offset += end - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - end);
+ btrfs_mark_buffer_dirty(leaf);
+ if (disk_bytenr > 0) {
+ inode_sub_bytes(inode, end - key.offset);
+ *hint_byte = disk_bytenr;
+ }
+ break;
+ }
+
+ search_start = extent_end;
+ /*
+ * | ---- range to drop ----- |
+ * | -------- extent -------- |
+ */
+ if (start > key.offset && end >= extent_end) {
+ BUG_ON(del_nr > 0);
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
- /*
- * set the ram bytes to the size of the full extent
- * before splitting. This is a worst case flag,
- * but its the best we can do because we don't know
- * how splitting affects compression
- */
- btrfs_set_file_extent_ram_bytes(leaf, extent,
- ram_bytes);
- btrfs_set_file_extent_type(leaf, extent, found_type);
-
- btrfs_unlock_up_safe(path, 1);
- btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_set_lock_blocking(path->nodes[0]);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+ btrfs_mark_buffer_dirty(leaf);
+ if (disk_bytenr > 0) {
+ inode_sub_bytes(inode, extent_end - start);
+ *hint_byte = disk_bytenr;
+ }
+ if (end == extent_end)
+ break;
- path->leave_spinning = 0;
- btrfs_release_path(root, path);
- if (disk_bytenr != 0)
- inode_add_bytes(inode, extent_end - end);
+ path->slots[0]++;
+ goto next_slot;
}
- if (found_extent && !keep) {
- u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
+ /*
+ * | ---- range to drop ----- |
+ * | ------ extent ------ |
+ */
+ if (start <= key.offset && end >= extent_end) {
+ if (del_nr == 0) {
+ del_slot = path->slots[0];
+ del_nr = 1;
+ } else {
+ BUG_ON(del_slot + del_nr != path->slots[0]);
+ del_nr++;
+ }
- if (old_disk_bytenr != 0) {
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
inode_sub_bytes(inode,
- le64_to_cpu(old.num_bytes));
+ extent_end - key.offset);
+ extent_end = ALIGN(extent_end,
+ root->sectorsize);
+ } else if (disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root,
- old_disk_bytenr,
- le64_to_cpu(old.disk_num_bytes),
- 0, root->root_key.objectid,
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid,
key.objectid, key.offset -
- le64_to_cpu(old.offset));
+ extent_offset);
BUG_ON(ret);
- *hint_byte = old_disk_bytenr;
+ inode_sub_bytes(inode,
+ extent_end - key.offset);
+ *hint_byte = disk_bytenr;
}
- }
- if (search_start >= end) {
- ret = 0;
- goto out;
+ if (end == extent_end)
+ break;
+
+ if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
+ path->slots[0]++;
+ goto next_slot;
+ }
+
+ ret = btrfs_del_items(trans, root, path, del_slot,
+ del_nr);
+ BUG_ON(ret);
+
+ del_nr = 0;
+ del_slot = 0;
+
+ btrfs_release_path(root, path);
+ continue;
}
+
+ BUG_ON(1);
}
-out:
- btrfs_free_path(path);
- if (locked_end > orig_locked_end) {
- unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
- locked_end - 1, GFP_NOFS);
+
+ if (del_nr > 0) {
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ BUG_ON(ret);
}
+
+ btrfs_free_path(path);
return ret;
}
static int extent_mergeable(struct extent_buffer *leaf, int slot,
- u64 objectid, u64 bytenr, u64 *start, u64 *end)
+ u64 objectid, u64 bytenr, u64 orig_offset,
+ u64 *start, u64 *end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
@@ -598,6 +523,7 @@ static int extent_mergeable(struct exten
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
+ btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
@@ -620,23 +546,24 @@ static int extent_mergeable(struct exten
* two or three.
*/
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct inode *inode, u64 start, u64 end)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
+ struct btrfs_key new_key;
u64 bytenr;
u64 num_bytes;
u64 extent_end;
u64 orig_offset;
u64 other_start;
u64 other_end;
- u64 split = start;
- u64 locked_end = end;
- int extent_type;
- int split_end = 1;
+ u64 split;
+ int del_nr = 0;
+ int del_slot = 0;
+ int recow;
int ret;
btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -644,12 +571,11 @@ int btrfs_mark_extent_written(struct btr
path = btrfs_alloc_path();
BUG_ON(!path);
again:
+ recow = 0;
+ split = start;
key.objectid = inode->i_ino;
key.type = BTRFS_EXTENT_DATA_KEY;
- if (split == start)
- key.offset = split;
- else
- key.offset = split - 1;
+ key.offset = split;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0 && path->slots[0] > 0)
@@ -661,159 +587,158 @@ again:
key.type != BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(leaf, fi);
- BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
+ BUG_ON(btrfs_file_extent_type(leaf, fi) !=
+ BTRFS_FILE_EXTENT_PREALLOC);
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
BUG_ON(key.offset > start || extent_end < end);
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
+ memcpy(&new_key, &key, sizeof(new_key));
- if (key.offset == start)
- split = end;
+ if (start == key.offset && end < extent_end) {
+ other_start = 0;
+ other_end = start;
+ if (extent_mergeable(leaf, path->slots[0] - 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ new_key.offset = end;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - end);
+ btrfs_set_file_extent_offset(leaf, fi,
+ end - orig_offset);
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ end - other_start);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+ }
- if (key.offset == start && extent_end == end) {
- int del_nr = 0;
- int del_slot = 0;
+ if (start > key.offset && end == extent_end) {
other_start = end;
other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
- extent_end = other_end;
- del_slot = path->slots[0] + 1;
- del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
- 0, root->root_key.objectid,
- inode->i_ino, orig_offset);
- BUG_ON(ret);
- }
- other_start = 0;
- other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
- key.offset = other_start;
- del_slot = path->slots[0];
- del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
- 0, root->root_key.objectid,
- inode->i_ino, orig_offset);
- BUG_ON(ret);
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+ path->slots[0]++;
+ new_key.offset = start;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ other_end - start);
+ btrfs_set_file_extent_offset(leaf, fi,
+ start - orig_offset);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
}
- split_end = 0;
- if (del_nr == 0) {
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- goto done;
+ }
+
+ while (start > key.offset || end < extent_end) {
+ if (key.offset == start)
+ split = end;
+
+ new_key.offset = split;
+ ret = btrfs_duplicate_item(trans, root, path, &new_key);
+ if (ret == -EAGAIN) {
+ btrfs_release_path(root, path);
+ goto again;
}
+ BUG_ON(ret < 0);
- fi = btrfs_item_ptr(leaf, del_slot - 1,
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
- btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - key.offset);
+ split - key.offset);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - split);
btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
+ root->root_key.objectid,
+ inode->i_ino, orig_offset);
BUG_ON(ret);
- goto release;
- } else if (split == start) {
- if (locked_end < extent_end) {
- ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1, GFP_NOFS);
- if (!ret) {
- btrfs_release_path(root, path);
- lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1, GFP_NOFS);
- locked_end = extent_end;
- goto again;
- }
- locked_end = extent_end;
+
+ if (split == start) {
+ key.offset = start;
+ } else {
+ BUG_ON(start != key.offset);
+ path->slots[0]--;
+ extent_end = end;
}
- btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
- } else {
- BUG_ON(key.offset != start);
- key.offset = split;
- btrfs_set_file_extent_offset(leaf, fi, key.offset -
- orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
- btrfs_set_item_key_safe(trans, root, path, &key);
- extent_end = split;
- }
-
- if (extent_end == end) {
- split_end = 0;
- extent_type = BTRFS_FILE_EXTENT_REG;
+ recow = 1;
}
- if (extent_end == end && split == start) {
- other_start = end;
- other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
- path->slots[0]++;
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- key.offset = split;
- btrfs_set_item_key_safe(trans, root, path, &key);
- btrfs_set_file_extent_offset(leaf, fi, key.offset -
- orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- other_end - split);
- goto done;
+
+ other_start = end;
+ other_end = 0;
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ if (recow) {
+ btrfs_release_path(root, path);
+ goto again;
}
+ extent_end = other_end;
+ del_slot = path->slots[0] + 1;
+ del_nr++;
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ 0, root->root_key.objectid,
+ inode->i_ino, orig_offset);
+ BUG_ON(ret);
}
- if (extent_end == end && split == end) {
- other_start = 0;
- other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
- bytenr, &other_start, &other_end)) {
- path->slots[0]--;
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
- other_start);
- goto done;
+ other_start = 0;
+ other_end = start;
+ if (extent_mergeable(leaf, path->slots[0] - 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ if (recow) {
+ btrfs_release_path(root, path);
+ goto again;
}
+ key.offset = other_start;
+ del_slot = path->slots[0];
+ del_nr++;
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ 0, root->root_key.objectid,
+ inode->i_ino, orig_offset);
+ BUG_ON(ret);
}
+ if (del_nr == 0) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_mark_buffer_dirty(leaf);
+ } else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+ btrfs_mark_buffer_dirty(leaf);
- btrfs_mark_buffer_dirty(leaf);
-
- ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
- root->root_key.objectid,
- inode->i_ino, orig_offset);
- BUG_ON(ret);
- btrfs_release_path(root, path);
-
- key.offset = start;
- ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
- BUG_ON(ret);
-
- leaf = path->nodes[0];
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_set_file_extent_type(leaf, fi, extent_type);
- btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
- btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_compression(leaf, fi, 0);
- btrfs_set_file_extent_encryption(leaf, fi, 0);
- btrfs_set_file_extent_other_encoding(leaf, fi, 0);
-done:
- btrfs_mark_buffer_dirty(leaf);
-
-release:
- btrfs_release_path(root, path);
- if (split_end && split == start) {
- split = end;
- goto again;
- }
- if (locked_end > end) {
- unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
- GFP_NOFS);
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ BUG_ON(ret);
}
+out:
btrfs_free_path(path);
return 0;
}
@@ -1210,7 +1135,7 @@ int btrfs_sync_file(struct file *file, s
}
mutex_lock(&dentry->d_inode->i_mutex);
out:
- return ret > 0 ? EIO : ret;
+ return ret > 0 ? -EIO : ret;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
file:b3ad168a0bfc97906dd1fa556b61297250faae58 -> file:e03a836d50d08051ea99ae4869e1d8b765ff63f5
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -88,13 +88,14 @@ static noinline int cow_file_range(struc
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
-static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
+static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
int err;
- err = btrfs_init_acl(inode, dir);
+ err = btrfs_init_acl(trans, inode, dir);
if (!err)
- err = btrfs_xattr_security_init(inode, dir);
+ err = btrfs_xattr_security_init(trans, inode, dir);
return err;
}
@@ -188,8 +189,18 @@ static noinline int insert_inline_extent
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
+ /*
+ * we're an inline extent, so nobody can
+ * extend the file past i_size without locking
+ * a page we already have locked.
+ *
+ * We must do any isize and inode updates
+ * before we unlock the pages. Otherwise we
+ * could end up racing with unlink.
+ */
BTRFS_I(inode)->disk_i_size = inode->i_size;
btrfs_update_inode(trans, root, inode);
+
return 0;
fail:
btrfs_free_path(path);
@@ -230,8 +241,7 @@ static noinline int cow_file_range_inlin
return 1;
}
- ret = btrfs_drop_extents(trans, root, inode, start,
- aligned_end, aligned_end, start,
+ ret = btrfs_drop_extents(trans, inode, start, aligned_end,
&hint_byte, 1);
BUG_ON(ret);
@@ -416,7 +426,6 @@ again:
start, end,
total_compressed, pages);
}
- btrfs_end_transaction(trans, root);
if (ret == 0) {
/*
* inline extent creation worked, we don't need
@@ -430,9 +439,11 @@ again:
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_ACCOUNTING |
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
- ret = 0;
+
+ btrfs_end_transaction(trans, root);
goto free_pages_out;
}
+ btrfs_end_transaction(trans, root);
}
if (will_compress) {
@@ -543,7 +554,6 @@ static noinline int submit_compressed_ex
if (list_empty(&async_cow->extents))
return 0;
- trans = btrfs_join_transaction(root, 1);
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
@@ -590,19 +600,15 @@ retry:
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1,
GFP_NOFS);
- /*
- * here we're doing allocation and writeback of the
- * compressed pages
- */
- btrfs_drop_extent_cache(inode, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1, 0);
+ trans = btrfs_join_transaction(root, 1);
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint,
(u64)-1, &ins, 1);
+ btrfs_end_transaction(trans, root);
+
if (ret) {
int i;
for (i = 0; i < async_extent->nr_pages; i++) {
@@ -618,6 +624,14 @@ retry:
goto retry;
}
+ /*
+ * here we're doing allocation and writeback of the
+ * compressed pages
+ */
+ btrfs_drop_extent_cache(inode, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, 0);
+
em = alloc_extent_map(GFP_NOFS);
em->start = async_extent->start;
em->len = async_extent->ram_size;
@@ -649,8 +663,6 @@ retry:
BTRFS_ORDERED_COMPRESSED);
BUG_ON(ret);
- btrfs_end_transaction(trans, root);
-
/*
* clear dirty, set writeback and unlock the pages.
*/
@@ -672,13 +684,11 @@ retry:
async_extent->nr_pages);
BUG_ON(ret);
- trans = btrfs_join_transaction(root, 1);
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
- btrfs_end_transaction(trans, root);
return 0;
}
@@ -742,6 +752,7 @@ static noinline int cow_file_range(struc
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
+
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
@@ -1596,7 +1607,6 @@ static int insert_reserved_file_extent(s
struct inode *inode, u64 file_pos,
u64 disk_bytenr, u64 disk_num_bytes,
u64 num_bytes, u64 ram_bytes,
- u64 locked_end,
u8 compression, u8 encryption,
u16 other_encoding, int extent_type)
{
@@ -1622,9 +1632,8 @@ static int insert_reserved_file_extent(s
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
- ret = btrfs_drop_extents(trans, root, inode, file_pos,
- file_pos + num_bytes, locked_end,
- file_pos, &hint, 0);
+ ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
+ &hint, 0);
BUG_ON(ret);
ins.objectid = inode->i_ino;
@@ -1671,24 +1680,6 @@ static int insert_reserved_file_extent(s
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
-static noinline void reada_csum(struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_ordered_extent *ordered_extent)
-{
- struct btrfs_ordered_sum *sum;
- u64 bytenr;
-
- sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
- list);
- bytenr = sum->sums[0].bytenr;
-
- /*
- * we don't care about the results, the point of this search is
- * just to get the btree leaves into ram
- */
- btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
-}
-
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
@@ -1699,7 +1690,6 @@ static int btrfs_finish_ordered_io(struc
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_path *path;
int compressed = 0;
int ret;
@@ -1707,46 +1697,32 @@ static int btrfs_finish_ordered_io(struc
if (!ret)
return 0;
- /*
- * before we join the transaction, try to do some of our IO.
- * This will limit the amount of IO that we have to do with
- * the transaction running. We're unlikely to need to do any
- * IO if the file extents are new, the disk_i_size checks
- * covers the most common case.
- */
- if (start < BTRFS_I(inode)->disk_i_size) {
- path = btrfs_alloc_path();
- if (path) {
- ret = btrfs_lookup_file_extent(NULL, root, path,
- inode->i_ino,
- start, 0);
- ordered_extent = btrfs_lookup_ordered_extent(inode,
- start);
- if (!list_empty(&ordered_extent->list)) {
- btrfs_release_path(root, path);
- reada_csum(root, path, ordered_extent);
- }
- btrfs_free_path(path);
+ ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+ BUG_ON(!ordered_extent);
+
+ if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ BUG_ON(!list_empty(&ordered_extent->list));
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ if (!ret) {
+ trans = btrfs_join_transaction(root, 1);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ btrfs_end_transaction(trans, root);
}
+ goto out;
}
- trans = btrfs_join_transaction(root, 1);
-
- if (!ordered_extent)
- ordered_extent = btrfs_lookup_ordered_extent(inode, start);
- BUG_ON(!ordered_extent);
- if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
- goto nocow;
-
lock_extent(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
GFP_NOFS);
+ trans = btrfs_join_transaction(root, 1);
+
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compressed = 1;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compressed);
- ret = btrfs_mark_extent_written(trans, root, inode,
+ ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
@@ -1758,8 +1734,6 @@ static int btrfs_finish_ordered_io(struc
ordered_extent->disk_len,
ordered_extent->len,
ordered_extent->len,
- ordered_extent->file_offset +
- ordered_extent->len,
compressed, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
@@ -1770,22 +1744,20 @@ static int btrfs_finish_ordered_io(struc
unlock_extent(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
GFP_NOFS);
-nocow:
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
- btrfs_ordered_update_i_size(inode, ordered_extent);
- btrfs_update_inode(trans, root, inode);
- btrfs_remove_ordered_extent(inode, ordered_extent);
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
-
+ /* this also removes the ordered extent from the tree */
+ btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ btrfs_end_transaction(trans, root);
+out:
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
- btrfs_end_transaction(trans, root);
return 0;
}
@@ -2008,6 +1980,54 @@ zeroit:
return -EIO;
}
+struct delayed_iput {
+ struct list_head list;
+ struct inode *inode;
+};
+
+void btrfs_add_delayed_iput(struct inode *inode)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct delayed_iput *delayed;
+
+ if (atomic_add_unless(&inode->i_count, -1, 1))
+ return;
+
+ delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
+ delayed->inode = inode;
+
+ spin_lock(&fs_info->delayed_iput_lock);
+ list_add_tail(&delayed->list, &fs_info->delayed_iputs);
+ spin_unlock(&fs_info->delayed_iput_lock);
+}
+
+void btrfs_run_delayed_iputs(struct btrfs_root *root)
+{
+ LIST_HEAD(list);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct delayed_iput *delayed;
+ int empty;
+
+ spin_lock(&fs_info->delayed_iput_lock);
+ empty = list_empty(&fs_info->delayed_iputs);
+ spin_unlock(&fs_info->delayed_iput_lock);
+ if (empty)
+ return;
+
+ down_read(&root->fs_info->cleanup_work_sem);
+ spin_lock(&fs_info->delayed_iput_lock);
+ list_splice_init(&fs_info->delayed_iputs, &list);
+ spin_unlock(&fs_info->delayed_iput_lock);
+
+ while (!list_empty(&list)) {
+ delayed = list_entry(list.next, struct delayed_iput, list);
+ list_del(&delayed->list);
+ iput(delayed->inode);
+ kfree(delayed);
+ }
+ up_read(&root->fs_info->cleanup_work_sem);
+}
+
/*
* This creates an orphan entry for the given inode in case something goes
* wrong in the middle of an unlink/truncate.
@@ -2080,16 +2100,17 @@ void btrfs_orphan_cleanup(struct btrfs_r
struct inode *inode;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
- path = btrfs_alloc_path();
- if (!path)
+ if (!xchg(&root->clean_orphans, 0))
return;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
key.offset = (u64)-1;
-
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
@@ -2834,37 +2855,40 @@ out:
* min_type is the minimum key type to truncate down to. If set to 0, this
* will kill all the items on this inode, including the INODE_ITEM_KEY.
*/
-noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode,
- u64 new_size, u32 min_type)
+int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ u64 new_size, u32 min_type)
{
- int ret;
struct btrfs_path *path;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u32 found_type = (u8)-1;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
+ u64 mask = root->sectorsize - 1;
+ u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int encoding;
- u64 mask = root->sectorsize - 1;
+ int ret;
+ int err = 0;
+
+ BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
if (root->ref_cows)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
+
path = btrfs_alloc_path();
BUG_ON(!path);
path->reada = -1;
- /* FIXME, add redo link to tree so we don't leak on crash */
key.objectid = inode->i_ino;
key.offset = (u64)-1;
key.type = (u8)-1;
@@ -2872,17 +2896,17 @@ noinline int btrfs_truncate_inode_items(
search_again:
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto error;
+ if (ret < 0) {
+ err = ret;
+ goto out;
+ }
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
- if (path->slots[0] == 0) {
- ret = 0;
- goto error;
- }
+ if (path->slots[0] == 0)
+ goto out;
path->slots[0]--;
}
@@ -2917,28 +2941,17 @@ search_again:
}
item_end--;
}
- if (item_end < new_size) {
- if (found_type == BTRFS_DIR_ITEM_KEY)
- found_type = BTRFS_INODE_ITEM_KEY;
- else if (found_type == BTRFS_EXTENT_ITEM_KEY)
- found_type = BTRFS_EXTENT_DATA_KEY;
- else if (found_type == BTRFS_EXTENT_DATA_KEY)
- found_type = BTRFS_XATTR_ITEM_KEY;
- else if (found_type == BTRFS_XATTR_ITEM_KEY)
- found_type = BTRFS_INODE_REF_KEY;
- else if (found_type)
- found_type--;
- else
+ if (found_type > min_type) {
+ del_item = 1;
+ } else {
+ if (item_end < new_size)
break;
- btrfs_set_key_type(&key, found_type);
- goto next;
+ if (found_key.offset >= new_size)
+ del_item = 1;
+ else
+ del_item = 0;
}
- if (found_key.offset >= new_size)
- del_item = 1;
- else
- del_item = 0;
found_extent = 0;
-
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
@@ -3025,42 +3038,36 @@ delete:
inode->i_ino, extent_offset);
BUG_ON(ret);
}
-next:
- if (path->slots[0] == 0) {
- if (pending_del_nr)
- goto del_pending;
- btrfs_release_path(root, path);
- if (found_type == BTRFS_INODE_ITEM_KEY)
- break;
- goto search_again;
- }
- path->slots[0]--;
- if (pending_del_nr &&
- path->slots[0] + 1 != pending_del_slot) {
- struct btrfs_key debug;
-del_pending:
- btrfs_item_key_to_cpu(path->nodes[0], &debug,
- pending_del_slot);
- ret = btrfs_del_items(trans, root, path,
- pending_del_slot,
- pending_del_nr);
- BUG_ON(ret);
- pending_del_nr = 0;
+ if (found_type == BTRFS_INODE_ITEM_KEY)
+ break;
+
+ if (path->slots[0] == 0 ||
+ path->slots[0] != pending_del_slot) {
+ if (root->ref_cows) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (pending_del_nr) {
+ ret = btrfs_del_items(trans, root, path,
+ pending_del_slot,
+ pending_del_nr);
+ BUG_ON(ret);
+ pending_del_nr = 0;
+ }
btrfs_release_path(root, path);
- if (found_type == BTRFS_INODE_ITEM_KEY)
- break;
goto search_again;
+ } else {
+ path->slots[0]--;
}
}
- ret = 0;
-error:
+out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
}
btrfs_free_path(path);
- return ret;
+ return err;
}
/*
@@ -3180,10 +3187,6 @@ int btrfs_cont_expand(struct inode *inod
if (size <= hole_start)
return 0;
- err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
- if (err)
- return err;
-
while (1) {
struct btrfs_ordered_extent *ordered;
btrfs_wait_ordered_range(inode, hole_start,
@@ -3196,9 +3199,6 @@ int btrfs_cont_expand(struct inode *inod
btrfs_put_ordered_extent(ordered);
}
- trans = btrfs_start_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
-
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
@@ -3206,40 +3206,120 @@ int btrfs_cont_expand(struct inode *inod
BUG_ON(IS_ERR(em) || !em);
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
- if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
+ if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
- err = btrfs_drop_extents(trans, root, inode,
- cur_offset,
- cur_offset + hole_size,
- block_end,
- cur_offset, &hint_byte, 1);
- if (err)
- break;
- err = btrfs_reserve_metadata_space(root, 1);
+ err = btrfs_reserve_metadata_space(root, 2);
if (err)
break;
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+
+ err = btrfs_drop_extents(trans, inode, cur_offset,
+ cur_offset + hole_size,
+ &hint_byte, 1);
+ BUG_ON(err);
+
err = btrfs_insert_file_extent(trans, root,
inode->i_ino, cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
+ BUG_ON(err);
+
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
- btrfs_unreserve_metadata_space(root, 1);
+
+ btrfs_end_transaction(trans, root);
+ btrfs_unreserve_metadata_space(root, 2);
}
free_extent_map(em);
cur_offset = last_byte;
- if (err || cur_offset >= block_end)
+ if (cur_offset >= block_end)
break;
}
- btrfs_end_transaction(trans, root);
unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
return err;
}
+static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
+ unsigned long nr;
+ int ret;
+
+ if (attr->ia_size == inode->i_size)
+ return 0;
+
+ if (attr->ia_size > inode->i_size) {
+ unsigned long limit;
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (attr->ia_size > inode->i_sb->s_maxbytes)
+ return -EFBIG;
+ if (limit != RLIM_INFINITY && attr->ia_size > limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+ }
+ }
+
+ ret = btrfs_reserve_metadata_space(root, 1);
+ if (ret)
+ return ret;
+
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+
+ ret = btrfs_orphan_add(trans, inode);
+ BUG_ON(ret);
+
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_unreserve_metadata_space(root, 1);
+ btrfs_btree_balance_dirty(root, nr);
+
+ if (attr->ia_size > inode->i_size) {
+ ret = btrfs_cont_expand(inode, attr->ia_size);
+ if (ret) {
+ btrfs_truncate(inode);
+ return ret;
+ }
+
+ i_size_write(inode, attr->ia_size);
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ if (inode->i_nlink > 0) {
+ ret = btrfs_orphan_del(trans, inode);
+ BUG_ON(ret);
+ }
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+ return 0;
+ }
+
+ /*
+ * We're truncating a file that used to have good data down to
+ * zero. Make sure it gets into the ordered flush list so that
+ * any new writes get down to disk quickly.
+ */
+ if (attr->ia_size == 0)
+ BTRFS_I(inode)->ordered_data_close = 1;
+
+ /* we don't support swapfiles, so vmtruncate shouldn't fail */
+ ret = vmtruncate(inode, attr->ia_size);
+ BUG_ON(ret);
+
+ return 0;
+}
+
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
@@ -3250,23 +3330,14 @@ static int btrfs_setattr(struct dentry *
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- if (attr->ia_size > inode->i_size) {
- err = btrfs_cont_expand(inode, attr->ia_size);
- if (err)
- return err;
- } else if (inode->i_size > 0 &&
- attr->ia_size == 0) {
-
- /* we're truncating a file that used to have good
- * data down to zero. Make sure it gets into
- * the ordered flush list so that any new writes
- * get down to disk quickly.
- */
- BTRFS_I(inode)->ordered_data_close = 1;
- }
+ err = btrfs_setattr_size(inode, attr);
+ if (err)
+ return err;
}
+ attr->ia_valid &= ~ATTR_SIZE;
- err = inode_setattr(inode, attr);
+ if (attr->ia_valid)
+ err = inode_setattr(inode, attr);
if (!err && ((attr->ia_valid & ATTR_MODE)))
err = btrfs_acl_chmod(inode);
@@ -3287,36 +3358,43 @@ void btrfs_delete_inode(struct inode *in
}
btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (root->fs_info->log_root_recovering) {
+ BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
+ goto no_delete;
+ }
+
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0);
goto no_delete;
}
btrfs_i_size_write(inode, 0);
- trans = btrfs_join_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
- ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
- if (ret) {
- btrfs_orphan_del(NULL, inode);
- goto no_delete_lock;
- }
+ while (1) {
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+ ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
- btrfs_orphan_del(trans, inode);
+ if (ret != -EAGAIN)
+ break;
- nr = trans->blocks_used;
- clear_inode(inode);
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ trans = NULL;
+ btrfs_btree_balance_dirty(root, nr);
+ }
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
- return;
+ if (ret == 0) {
+ ret = btrfs_orphan_del(trans, inode);
+ BUG_ON(ret);
+ }
-no_delete_lock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
clear_inode(inode);
+ return;
}
/*
@@ -3569,7 +3647,6 @@ static noinline void init_btrfs_i(struct
INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
- mutex_init(&BTRFS_I(inode)->extent_mutex);
mutex_init(&BTRFS_I(inode)->log_mutex);
}
@@ -3695,6 +3772,13 @@ struct inode *btrfs_lookup_dentry(struct
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
+ if (root != sub_root) {
+ down_read(&root->fs_info->cleanup_work_sem);
+ if (!(inode->i_sb->s_flags & MS_RDONLY))
+ btrfs_orphan_cleanup(sub_root);
+ up_read(&root->fs_info->cleanup_work_sem);
+ }
+
return inode;
}
@@ -3869,7 +3953,11 @@ skip:
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
- filp->f_pos = INT_LIMIT(off_t);
+ /*
+ * 32-bit glibc will use getdents64, but then strtol -
+ * so the last number we can serve is this.
+ */
+ filp->f_pos = 0x7fffffff;
else
filp->f_pos++;
nopos:
@@ -4219,7 +4307,7 @@ static int btrfs_mknod(struct inode *dir
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4290,7 +4378,7 @@ static int btrfs_create(struct inode *di
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4336,6 +4424,10 @@ static int btrfs_link(struct dentry *old
if (inode->i_nlink == 0)
return -ENOENT;
+ /* do not allow sys_link's with other subvols of the same device */
+ if (root->objectid != BTRFS_I(inode)->root->objectid)
+ return -EPERM;
+
/*
* 1 item for inode ref
* 2 items for dir items
@@ -4423,7 +4515,7 @@ static int btrfs_mkdir(struct inode *dir
drop_on_err = 1;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err)
goto out_fail;
@@ -5074,17 +5166,20 @@ static void btrfs_truncate(struct inode
unsigned long nr;
u64 mask = root->sectorsize - 1;
- if (!S_ISREG(inode->i_mode))
- return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ if (!S_ISREG(inode->i_mode)) {
+ WARN_ON(1);
return;
+ }
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
return;
+
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
/*
* setattr is responsible for setting the ordered_data_close flag,
@@ -5106,21 +5201,32 @@ static void btrfs_truncate(struct inode
if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
btrfs_add_ordered_operation(trans, root, inode);
- btrfs_set_trans_block_group(trans, inode);
- btrfs_i_size_write(inode, inode->i_size);
+ while (1) {
+ ret = btrfs_truncate_inode_items(trans, root, inode,
+ inode->i_size,
+ BTRFS_EXTENT_DATA_KEY);
+ if (ret != -EAGAIN)
+ break;
- ret = btrfs_orphan_add(trans, inode);
- if (ret)
- goto out;
- /* FIXME, add redo link to tree so we don't leak on crash */
- ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
- BTRFS_EXTENT_DATA_KEY);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
- ret = btrfs_orphan_del(trans, inode);
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+ }
+
+ if (ret == 0 && inode->i_nlink > 0) {
+ ret = btrfs_orphan_del(trans, inode);
+ BUG_ON(ret);
+ }
+
+ ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
-out:
nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root);
BUG_ON(ret);
@@ -5217,9 +5323,9 @@ void btrfs_destroy_inode(struct inode *i
spin_lock(&root->list_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
- printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
- " list\n", inode->i_ino);
- dump_stack();
+ printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
+ inode->i_ino);
+ list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->list_lock);
@@ -5476,7 +5582,7 @@ out_fail:
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
@@ -5495,7 +5601,10 @@ int btrfs_start_delalloc_inodes(struct b
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
filemap_flush(inode->i_mapping);
- iput(inode);
+ if (delay_iput)
+ btrfs_add_delayed_iput(inode);
+ else
+ iput(inode);
}
cond_resched();
spin_lock(&root->fs_info->delalloc_lock);
@@ -5569,7 +5678,7 @@ static int btrfs_symlink(struct inode *d
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -5641,57 +5750,77 @@ out_fail:
return err;
}
-static int prealloc_file_range(struct btrfs_trans_handle *trans,
- struct inode *inode, u64 start, u64 end,
- u64 locked_end, u64 alloc_hint, int mode)
+static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
+ u64 alloc_hint, int mode, loff_t actual_len)
{
+ struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 alloc_size;
u64 cur_offset = start;
u64 num_bytes = end - start;
int ret = 0;
+ u64 i_size;
while (num_bytes > 0) {
alloc_size = min(num_bytes, root->fs_info->max_extent);
- ret = btrfs_reserve_metadata_space(root, 1);
- if (ret)
- goto out;
+ trans = btrfs_start_transaction(root, 1);
ret = btrfs_reserve_extent(trans, root, alloc_size,
root->sectorsize, 0, alloc_hint,
(u64)-1, &ins, 1);
if (ret) {
WARN_ON(1);
- goto out;
+ goto stop_trans;
}
+
+ ret = btrfs_reserve_metadata_space(root, 3);
+ if (ret) {
+ btrfs_free_reserved_extent(root, ins.objectid,
+ ins.offset);
+ goto stop_trans;
+ }
+
ret = insert_reserved_file_extent(trans, inode,
cur_offset, ins.objectid,
ins.offset, ins.offset,
- ins.offset, locked_end,
- 0, 0, 0,
+ ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
BUG_ON(ret);
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
+
num_bytes -= ins.offset;
cur_offset += ins.offset;
alloc_hint = ins.objectid + ins.offset;
- btrfs_unreserve_metadata_space(root, 1);
- }
-out:
- if (cur_offset > start) {
+
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- cur_offset > i_size_read(inode))
- btrfs_i_size_write(inode, cur_offset);
+ (actual_len > inode->i_size) &&
+ (cur_offset > inode->i_size)) {
+
+ if (cur_offset > actual_len)
+ i_size = actual_len;
+ else
+ i_size = cur_offset;
+ i_size_write(inode, i_size);
+ btrfs_ordered_update_i_size(inode, i_size, NULL);
+ }
+
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
+
+ btrfs_end_transaction(trans, root);
+ btrfs_unreserve_metadata_space(root, 3);
}
+ return ret;
+stop_trans:
+ btrfs_end_transaction(trans, root);
return ret;
+
}
static long btrfs_fallocate(struct inode *inode, int mode,
@@ -5705,8 +5834,6 @@ static long btrfs_fallocate(struct inode
u64 locked_end;
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root;
int ret;
alloc_start = offset & ~mask;
@@ -5725,9 +5852,7 @@ static long btrfs_fallocate(struct inode
goto out;
}
- root = BTRFS_I(inode)->root;
-
- ret = btrfs_check_data_free_space(root, inode,
+ ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
alloc_end - alloc_start);
if (ret)
goto out;
@@ -5736,12 +5861,6 @@ static long btrfs_fallocate(struct inode
while (1) {
struct btrfs_ordered_extent *ordered;
- trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
- if (!trans) {
- ret = -EIO;
- goto out_free;
- }
-
/* the extent lock is ordered inside the running
* transaction
*/
@@ -5755,8 +5874,6 @@ static long btrfs_fallocate(struct inode
btrfs_put_ordered_extent(ordered);
unlock_extent(&BTRFS_I(inode)->io_tree,
alloc_start, locked_end, GFP_NOFS);
- btrfs_end_transaction(trans, BTRFS_I(inode)->root);
-
/*
* we can't wait on the range with the transaction
* running or with the extent lock held
@@ -5777,10 +5894,12 @@ static long btrfs_fallocate(struct inode
BUG_ON(IS_ERR(em) || !em);
last_byte = min(extent_map_end(em), alloc_end);
last_byte = (last_byte + mask) & ~mask;
- if (em->block_start == EXTENT_MAP_HOLE) {
- ret = prealloc_file_range(trans, inode, cur_offset,
- last_byte, locked_end + 1,
- alloc_hint, mode);
+ if (em->block_start == EXTENT_MAP_HOLE ||
+ (cur_offset >= inode->i_size &&
+ !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+ ret = prealloc_file_range(inode,
+ cur_offset, last_byte,
+ alloc_hint, mode, offset+len);
if (ret < 0) {
free_extent_map(em);
break;
@@ -5799,9 +5918,8 @@ static long btrfs_fallocate(struct inode
unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
GFP_NOFS);
- btrfs_end_transaction(trans, BTRFS_I(inode)->root);
-out_free:
- btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
+ btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
+ alloc_end - alloc_start);
out:
mutex_unlock(&inode->i_mutex);
return ret;
file:b9840fa0982ead5db3562b859aad727cb7d9953f -> file:0bc5776950612f7b4b8e6e0d7794d97b2c7f294b
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -237,7 +237,6 @@ static noinline int create_subvol(struct
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
- unsigned long nr = 1;
/*
* 1 - inode item
@@ -290,7 +289,7 @@ static noinline int create_subvol(struct
btrfs_set_root_generation(&root_item, trans->transid);
btrfs_set_root_level(&root_item, 0);
btrfs_set_root_refs(&root_item, 1);
- btrfs_set_root_used(&root_item, 0);
+ btrfs_set_root_used(&root_item, leaf->len);
btrfs_set_root_last_snapshot(&root_item, 0);
memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
@@ -342,24 +341,21 @@ static noinline int create_subvol(struct
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
fail:
- nr = trans->blocks_used;
err = btrfs_commit_transaction(trans, root);
if (err && !ret)
ret = err;
btrfs_unreserve_metadata_space(root, 6);
- btrfs_btree_balance_dirty(root, nr);
return ret;
}
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
char *name, int namelen)
{
+ struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
- int ret = 0;
- int err;
- unsigned long nr = 0;
+ int ret;
if (!root->ref_cows)
return -EINVAL;
@@ -372,20 +368,20 @@ static int create_snapshot(struct btrfs_
*/
ret = btrfs_reserve_metadata_space(root, 6);
if (ret)
- goto fail_unlock;
+ goto fail;
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
if (!pending_snapshot) {
ret = -ENOMEM;
btrfs_unreserve_metadata_space(root, 6);
- goto fail_unlock;
+ goto fail;
}
pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
if (!pending_snapshot->name) {
ret = -ENOMEM;
kfree(pending_snapshot);
btrfs_unreserve_metadata_space(root, 6);
- goto fail_unlock;
+ goto fail;
}
memcpy(pending_snapshot->name, name, namelen);
pending_snapshot->name[namelen] = '\0';
@@ -395,10 +391,19 @@ static int create_snapshot(struct btrfs_
pending_snapshot->root = root;
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
- err = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
+ BUG_ON(ret);
+ btrfs_unreserve_metadata_space(root, 6);
-fail_unlock:
- btrfs_btree_balance_dirty(root, nr);
+ inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto fail;
+ }
+ BUG_ON(!inode);
+ d_instantiate(dentry, inode);
+ ret = 0;
+fail:
return ret;
}
@@ -1032,8 +1037,7 @@ static noinline long btrfs_ioctl_clone(s
BUG_ON(!trans);
/* punch hole in destination first */
- btrfs_drop_extents(trans, root, inode, off, off + len,
- off + len, 0, &hint_byte, 1);
+ btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1);
/* clone data */
key.objectid = src->i_ino;
file:5799bc46a30993a1cb477fb3a4dd9db23dcad6da -> file:5c2a9e78a949a03ff239db3b99843c258ce7cd84
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -291,16 +291,16 @@ int btrfs_put_ordered_extent(struct btrf
/*
* remove an ordered extent from the tree. No references are dropped
- * but, anyone waiting on this extent is woken up.
+ * and you must wake_up entry->wait. You must hold the tree mutex
+ * while you call this function.
*/
-int btrfs_remove_ordered_extent(struct inode *inode,
+static int __btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
tree = &BTRFS_I(inode)->ordered_tree;
- mutex_lock(&tree->mutex);
node = &entry->rb_node;
rb_erase(node, &tree->tree);
tree->last = NULL;
@@ -326,16 +326,34 @@ int btrfs_remove_ordered_extent(struct i
}
spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
+ return 0;
+}
+
+/*
+ * remove an ordered extent from the tree. No references are dropped
+ * but any waiters are woken.
+ */
+int btrfs_remove_ordered_extent(struct inode *inode,
+ struct btrfs_ordered_extent *entry)
+{
+ struct btrfs_ordered_inode_tree *tree;
+ int ret;
+
+ tree = &BTRFS_I(inode)->ordered_tree;
+ mutex_lock(&tree->mutex);
+ ret = __btrfs_remove_ordered_extent(inode, entry);
mutex_unlock(&tree->mutex);
wake_up(&entry->wait);
- return 0;
+
+ return ret;
}
/*
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
-int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
+int btrfs_wait_ordered_extents(struct btrfs_root *root,
+ int nocow_only, int delay_iput)
{
struct list_head splice;
struct list_head *cur;
@@ -372,7 +390,10 @@ int btrfs_wait_ordered_extents(struct bt
if (inode) {
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
- iput(inode);
+ if (delay_iput)
+ btrfs_add_delayed_iput(inode);
+ else
+ iput(inode);
} else {
btrfs_put_ordered_extent(ordered);
}
@@ -430,7 +451,7 @@ again:
btrfs_wait_ordered_range(inode, 0, (u64)-1);
else
filemap_flush(inode->i_mapping);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
cond_resched();
@@ -589,7 +610,7 @@ out:
* After an extent is done, call this to conditionally update the on disk
* i_size. i_size is updated to cover any fully written part of the file.
*/
-int btrfs_ordered_update_i_size(struct inode *inode,
+int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered)
{
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
@@ -597,18 +618,32 @@ int btrfs_ordered_update_i_size(struct i
u64 disk_i_size;
u64 new_i_size;
u64 i_size_test;
+ u64 i_size = i_size_read(inode);
struct rb_node *node;
+ struct rb_node *prev = NULL;
struct btrfs_ordered_extent *test;
+ int ret = 1;
+
+ if (ordered)
+ offset = entry_end(ordered);
+ else
+ offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
mutex_lock(&tree->mutex);
disk_i_size = BTRFS_I(inode)->disk_i_size;
+ /* truncate file */
+ if (disk_i_size > i_size) {
+ BTRFS_I(inode)->disk_i_size = i_size;
+ ret = 0;
+ goto out;
+ }
+
/*
* if the disk i_size is already at the inode->i_size, or
* this ordered extent is inside the disk i_size, we're done
*/
- if (disk_i_size >= inode->i_size ||
- ordered->file_offset + ordered->len <= disk_i_size) {
+ if (disk_i_size == i_size || offset <= disk_i_size) {
goto out;
}
@@ -616,8 +651,7 @@ int btrfs_ordered_update_i_size(struct i
* we can't update the disk_isize if there are delalloc bytes
* between disk_i_size and this ordered extent
*/
- if (test_range_bit(io_tree, disk_i_size,
- ordered->file_offset + ordered->len - 1,
+ if (test_range_bit(io_tree, disk_i_size, offset - 1,
EXTENT_DELALLOC, 0, NULL)) {
goto out;
}
@@ -626,20 +660,32 @@ int btrfs_ordered_update_i_size(struct i
* if we find an ordered extent then we can't update disk i_size
* yet
*/
- node = &ordered->rb_node;
- while (1) {
- node = rb_prev(node);
- if (!node)
- break;
+ if (ordered) {
+ node = rb_prev(&ordered->rb_node);
+ } else {
+ prev = tree_search(tree, offset);
+ /*
+ * we insert file extents without involving ordered struct,
+ * so there should be no ordered struct cover this offset
+ */
+ if (prev) {
+ test = rb_entry(prev, struct btrfs_ordered_extent,
+ rb_node);
+ BUG_ON(offset_in_entry(test, offset));
+ }
+ node = prev;
+ }
+ while (node) {
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (test->file_offset + test->len <= disk_i_size)
break;
- if (test->file_offset >= inode->i_size)
+ if (test->file_offset >= i_size)
break;
if (test->file_offset >= disk_i_size)
goto out;
+ node = rb_prev(node);
}
- new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
+ new_i_size = min_t(u64, offset, i_size);
/*
* at this point, we know we can safely update i_size to at least
@@ -647,7 +693,14 @@ int btrfs_ordered_update_i_size(struct i
* walk forward and see if ios from higher up in the file have
* finished.
*/
- node = rb_next(&ordered->rb_node);
+ if (ordered) {
+ node = rb_next(&ordered->rb_node);
+ } else {
+ if (prev)
+ node = rb_next(prev);
+ else
+ node = rb_first(&tree->tree);
+ }
i_size_test = 0;
if (node) {
/*
@@ -655,10 +708,10 @@ int btrfs_ordered_update_i_size(struct i
* between our ordered extent and the next one.
*/
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (test->file_offset > entry_end(ordered))
+ if (test->file_offset > offset)
i_size_test = test->file_offset;
} else {
- i_size_test = i_size_read(inode);
+ i_size_test = i_size;
}
/*
@@ -667,15 +720,25 @@ int btrfs_ordered_update_i_size(struct i
* are no delalloc bytes in this area, it is safe to update
* disk_i_size to the end of the region.
*/
- if (i_size_test > entry_end(ordered) &&
- !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
- EXTENT_DELALLOC, 0, NULL)) {
- new_i_size = min_t(u64, i_size_test, i_size_read(inode));
+ if (i_size_test > offset &&
+ !test_range_bit(io_tree, offset, i_size_test - 1,
+ EXTENT_DELALLOC, 0, NULL)) {
+ new_i_size = min_t(u64, i_size_test, i_size);
}
BTRFS_I(inode)->disk_i_size = new_i_size;
+ ret = 0;
out:
+ /*
+ * we need to remove the ordered extent with the tree lock held
+ * so that other people calling this function don't find our fully
+ * processed ordered entry and skip updating the i_size
+ */
+ if (ordered)
+ __btrfs_remove_ordered_extent(inode, ordered);
mutex_unlock(&tree->mutex);
- return 0;
+ if (ordered)
+ wake_up(&ordered->wait);
+ return ret;
}
/*
file:f82e87488ca8f47d158c37281a3aafa746d5031d -> file:1fe1282ef47c998aa9cde1c0c43e4f088e7e767c
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -150,12 +150,13 @@ void btrfs_start_ordered_extent(struct i
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
-int btrfs_ordered_update_i_size(struct inode *inode,
+int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
+int btrfs_wait_ordered_extents(struct btrfs_root *root,
+ int nocow_only, int delay_iput);
#endif
file:cfcc93c93a7b4db99b87ef3b91b30cc8d2f7e36b -> file:ab7ab53187452aa7794bfcdfb5596385d3cc77b2
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1561,6 +1561,20 @@ static int invalidate_extent_cache(struc
return 0;
}
+static void put_inodes(struct list_head *list)
+{
+ struct inodevec *ivec;
+ while (!list_empty(list)) {
+ ivec = list_entry(list->next, struct inodevec, list);
+ list_del(&ivec->list);
+ while (ivec->nr > 0) {
+ ivec->nr--;
+ iput(ivec->inode[ivec->nr]);
+ }
+ kfree(ivec);
+ }
+}
+
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key)
@@ -1723,6 +1737,11 @@ static noinline_for_stack int merge_relo
btrfs_btree_balance_dirty(root, nr);
+ /*
+ * put inodes outside transaction, otherwise we may deadlock.
+ */
+ put_inodes(&inode_list);
+
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
}
@@ -1752,19 +1771,7 @@ out:
btrfs_btree_balance_dirty(root, nr);
- /*
- * put inodes while we aren't holding the tree locks
- */
- while (!list_empty(&inode_list)) {
- struct inodevec *ivec;
- ivec = list_entry(inode_list.next, struct inodevec, list);
- list_del(&ivec->list);
- while (ivec->nr > 0) {
- ivec->nr--;
- iput(ivec->inode[ivec->nr]);
- }
- kfree(ivec);
- }
+ put_inodes(&inode_list);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@@ -3274,8 +3281,10 @@ static noinline_for_stack int relocate_b
return -ENOMEM;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ kfree(cluster);
return -ENOMEM;
+ }
rc->extents_found = 0;
rc->extents_skipped = 0;
@@ -3534,8 +3543,8 @@ int btrfs_relocate_block_group(struct bt
(unsigned long long)rc->block_group->key.objectid,
(unsigned long long)rc->block_group->flags);
- btrfs_start_delalloc_inodes(fs_info->tree_root);
- btrfs_wait_ordered_extents(fs_info->tree_root, 0);
+ btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
+ btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0);
while (1) {
rc->extents_found = 0;
@@ -3755,6 +3764,8 @@ out:
BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
+ else
+ btrfs_orphan_cleanup(fs_root);
}
return err;
}
file:752a5463bf53aee147068c2a0a9f678e12250f38 -> file:a649305b205968e024a967555520581c00f3356e
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -126,8 +126,9 @@ int btrfs_parse_options(struct btrfs_roo
{
struct btrfs_fs_info *info = root->fs_info;
substring_t args[MAX_OPT_ARGS];
- char *p, *num;
+ char *p, *num, *orig;
int intarg;
+ int ret = 0;
if (!options)
return 0;
@@ -140,6 +141,7 @@ int btrfs_parse_options(struct btrfs_roo
if (!options)
return -ENOMEM;
+ orig = options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -262,12 +264,18 @@ int btrfs_parse_options(struct btrfs_roo
case Opt_discard:
btrfs_set_opt(info->mount_opt, DISCARD);
break;
+ case Opt_err:
+ printk(KERN_INFO "btrfs: unrecognized mount option "
+ "'%s'\n", p);
+ ret = -EINVAL;
+ goto out;
default:
break;
}
}
- kfree(options);
- return 0;
+out:
+ kfree(orig);
+ return ret;
}
/*
@@ -405,8 +413,8 @@ int btrfs_sync_fs(struct super_block *sb
return 0;
}
- btrfs_start_delalloc_inodes(root);
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0, 0);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
@@ -450,6 +458,8 @@ static int btrfs_show_options(struct seq
seq_puts(seq, ",notreelog");
if (btrfs_test_opt(root, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
+ if (btrfs_test_opt(root, DISCARD))
+ seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
return 0;
file:c207e8c32c9bfc5212e111edb440adf4b59f2059 -> file:b2acc79f1b342e651a4c31da168c2952efa57ff2
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -333,6 +333,9 @@ static int __btrfs_end_transaction(struc
memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ if (throttle)
+ btrfs_run_delayed_iputs(root);
+
return 0;
}
@@ -354,7 +357,7 @@ int btrfs_end_transaction_throttle(struc
* those extents are sent to disk but does not wait on them
*/
int btrfs_write_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages)
+ struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int err = 0;
@@ -367,7 +370,7 @@ int btrfs_write_marked_extents(struct bt
while (1) {
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
- EXTENT_DIRTY);
+ mark);
if (ret)
break;
while (start <= end) {
@@ -413,7 +416,7 @@ int btrfs_write_marked_extents(struct bt
* on all the pages and clear them from the dirty pages state tree
*/
int btrfs_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages)
+ struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int err = 0;
@@ -425,12 +428,12 @@ int btrfs_wait_marked_extents(struct btr
unsigned long index;
while (1) {
- ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
- EXTENT_DIRTY);
+ ret = find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark);
if (ret)
break;
- clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
+ clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
@@ -460,13 +463,13 @@ int btrfs_wait_marked_extents(struct btr
* those extents are on disk for transaction or log commit
*/
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages)
+ struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int ret2;
- ret = btrfs_write_marked_extents(root, dirty_pages);
- ret2 = btrfs_wait_marked_extents(root, dirty_pages);
+ ret = btrfs_write_marked_extents(root, dirty_pages, mark);
+ ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
return ret || ret2;
}
@@ -479,7 +482,8 @@ int btrfs_write_and_wait_transaction(str
return filemap_write_and_wait(btree_inode->i_mapping);
}
return btrfs_write_and_wait_marked_extents(root,
- &trans->transaction->dirty_pages);
+ &trans->transaction->dirty_pages,
+ EXTENT_DIRTY);
}
/*
@@ -497,13 +501,16 @@ static int update_cowonly_root(struct bt
{
int ret;
u64 old_root_bytenr;
+ u64 old_root_used;
struct btrfs_root *tree_root = root->fs_info->tree_root;
+ old_root_used = btrfs_root_used(&root->root_item);
btrfs_write_dirty_block_groups(trans, root);
while (1) {
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
- if (old_root_bytenr == root->node->start)
+ if (old_root_bytenr == root->node->start &&
+ old_root_used == btrfs_root_used(&root->root_item))
break;
btrfs_set_root_node(&root->root_item, root->node);
@@ -512,6 +519,7 @@ static int update_cowonly_root(struct bt
&root->root_item);
BUG_ON(ret);
+ old_root_used = btrfs_root_used(&root->root_item);
ret = btrfs_write_dirty_block_groups(trans, root);
BUG_ON(ret);
}
@@ -795,7 +803,6 @@ static noinline int create_pending_snaps
memcpy(&pending->root_key, &key, sizeof(key));
fail:
kfree(new_root_item);
- btrfs_unreserve_metadata_space(root, 6);
return ret;
}
@@ -807,7 +814,6 @@ static noinline int finish_pending_snaps
u64 index = 0;
struct btrfs_trans_handle *trans;
struct inode *parent_inode;
- struct inode *inode;
struct btrfs_root *parent_root;
parent_inode = pending->dentry->d_parent->d_inode;
@@ -839,8 +845,6 @@ static noinline int finish_pending_snaps
BUG_ON(ret);
- inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
- d_instantiate(pending->dentry, inode);
fail:
btrfs_end_transaction(trans, fs_info->fs_root);
return ret;
@@ -994,11 +998,11 @@ int btrfs_commit_transaction(struct btrf
mutex_unlock(&root->fs_info->trans_mutex);
if (flush_on_commit) {
- btrfs_start_delalloc_inodes(root);
- ret = btrfs_wait_ordered_extents(root, 0);
+ btrfs_start_delalloc_inodes(root, 1);
+ ret = btrfs_wait_ordered_extents(root, 0, 1);
BUG_ON(ret);
} else if (snap_pending) {
- ret = btrfs_wait_ordered_extents(root, 1);
+ ret = btrfs_wait_ordered_extents(root, 0, 1);
BUG_ON(ret);
}
@@ -1116,6 +1120,10 @@ int btrfs_commit_transaction(struct btrf
current->journal_info = NULL;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+ if (current != root->fs_info->transaction_kthread)
+ btrfs_run_delayed_iputs(root);
+
return ret;
}
file:d4e3e7a6938cddebb56e05ae4131b7055f08be05 -> file:93c7ccb33118f38d420c00c3b790fe0040b7f0ca
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -107,10 +107,10 @@ void btrfs_throttle(struct btrfs_root *r
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages);
+ struct extent_io_tree *dirty_pages, int mark);
int btrfs_write_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages);
+ struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages);
+ struct extent_io_tree *dirty_pages, int mark);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
#endif
file:741666a7676a80a6553f5b22a37a41dd31971d3e -> file:4a9434b622ecfc0f571c32ea3abf54df3d878123
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -542,8 +542,8 @@ static noinline int replay_one_extent(st
saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
- ret = btrfs_drop_extents(trans, root, inode,
- start, extent_end, extent_end, start, &alloc_hint, 1);
+ ret = btrfs_drop_extents(trans, inode, start, extent_end,
+ &alloc_hint, 1);
BUG_ON(ret);
if (found_type == BTRFS_FILE_EXTENT_REG ||
@@ -930,6 +930,17 @@ out_nowrite:
return 0;
}
+static int insert_orphan_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 offset)
+{
+ int ret;
+ ret = btrfs_find_orphan_item(root, offset);
+ if (ret > 0)
+ ret = btrfs_insert_orphan_item(trans, root, offset);
+ return ret;
+}
+
+
/*
* There are a few corners where the link count of the file can't
* be properly maintained during replay. So, instead of adding
@@ -997,9 +1008,13 @@ static noinline int fixup_inode_link_cou
}
BTRFS_I(inode)->index_cnt = (u64)-1;
- if (inode->i_nlink == 0 && S_ISDIR(inode->i_mode)) {
- ret = replay_dir_deletes(trans, root, NULL, path,
- inode->i_ino, 1);
+ if (inode->i_nlink == 0) {
+ if (S_ISDIR(inode->i_mode)) {
+ ret = replay_dir_deletes(trans, root, NULL, path,
+ inode->i_ino, 1);
+ BUG_ON(ret);
+ }
+ ret = insert_orphan_item(trans, root, inode->i_ino);
BUG_ON(ret);
}
btrfs_free_path(path);
@@ -1587,7 +1602,6 @@ static int replay_one_buffer(struct btrf
/* inode keys are done during the first stage */
if (key.type == BTRFS_INODE_ITEM_KEY &&
wc->stage == LOG_WALK_REPLAY_INODES) {
- struct inode *inode;
struct btrfs_inode_item *inode_item;
u32 mode;
@@ -1603,31 +1617,16 @@ static int replay_one_buffer(struct btrf
eb, i, &key);
BUG_ON(ret);
- /* for regular files, truncate away
- * extents past the new EOF
+ /* for regular files, make sure corresponding
+ * orhpan item exist. extents past the new EOF
+ * will be truncated later by orphan cleanup.
*/
if (S_ISREG(mode)) {
- inode = read_one_inode(root,
- key.objectid);
- BUG_ON(!inode);
-
- ret = btrfs_truncate_inode_items(wc->trans,
- root, inode, inode->i_size,
- BTRFS_EXTENT_DATA_KEY);
+ ret = insert_orphan_item(wc->trans, root,
+ key.objectid);
BUG_ON(ret);
-
- /* if the nlink count is zero here, the iput
- * will free the inode. We bump it to make
- * sure it doesn't get freed until the link
- * count fixup is done
- */
- if (inode->i_nlink == 0) {
- btrfs_inc_nlink(inode);
- btrfs_update_inode(wc->trans,
- root, inode);
- }
- iput(inode);
}
+
ret = link_to_fixup_dir(wc->trans, root,
path, key.objectid);
BUG_ON(ret);
@@ -1977,10 +1976,11 @@ int btrfs_sync_log(struct btrfs_trans_ha
{
int index1;
int index2;
+ int mark;
int ret;
struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
- u64 log_transid = 0;
+ unsigned long log_transid = 0;
mutex_lock(&root->log_mutex);
index1 = root->log_transid % 2;
@@ -2014,24 +2014,29 @@ int btrfs_sync_log(struct btrfs_trans_ha
goto out;
}
+ log_transid = root->log_transid;
+ if (log_transid % 2 == 0)
+ mark = EXTENT_DIRTY;
+ else
+ mark = EXTENT_NEW;
+
/* we start IO on all the marked extents here, but we don't actually
* wait for them until later.
*/
- ret = btrfs_write_marked_extents(log, &log->dirty_log_pages);
+ ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
BUG_ON(ret);
btrfs_set_root_node(&log->root_item, log->node);
root->log_batch = 0;
- log_transid = root->log_transid;
root->log_transid++;
log->log_transid = root->log_transid;
root->log_start_pid = 0;
smp_mb();
/*
- * log tree has been flushed to disk, new modifications of
- * the log will be written to new positions. so it's safe to
- * allow log writers to go in.
+ * IO has been started, blocks of the log tree have WRITTEN flag set
+ * in their headers. new modifications of the log will be written to
+ * new positions. so it's safe to allow log writers to go in.
*/
mutex_unlock(&root->log_mutex);
@@ -2052,7 +2057,7 @@ int btrfs_sync_log(struct btrfs_trans_ha
index2 = log_root_tree->log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
- btrfs_wait_marked_extents(log, &log->dirty_log_pages);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2072,16 +2077,17 @@ int btrfs_sync_log(struct btrfs_trans_ha
* check the full commit flag again
*/
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
- btrfs_wait_marked_extents(log, &log->dirty_log_pages);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
}
ret = btrfs_write_and_wait_marked_extents(log_root_tree,
- &log_root_tree->dirty_log_pages);
+ &log_root_tree->dirty_log_pages,
+ EXTENT_DIRTY | EXTENT_NEW);
BUG_ON(ret);
- btrfs_wait_marked_extents(log, &log->dirty_log_pages);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
btrfs_set_super_log_root(&root->fs_info->super_for_commit,
log_root_tree->node->start);
@@ -2147,12 +2153,12 @@ int btrfs_free_log(struct btrfs_trans_ha
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY);
+ 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
if (ret)
break;
- clear_extent_dirty(&log->dirty_log_pages,
- start, end, GFP_NOFS);
+ clear_extent_bits(&log->dirty_log_pages, start, end,
+ EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
if (log->log_transid > 0) {
file:7eda483d7b5aa0cd5e5bd96d149cca6fe2fc8f16 -> file:41ecbb2347f2d3171f7645782ab4e5a39621c84d
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1135,7 +1135,7 @@ int btrfs_rm_device(struct btrfs_root *r
root->fs_info->avail_metadata_alloc_bits;
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
- root->fs_info->fs_devices->rw_devices <= 4) {
+ root->fs_info->fs_devices->num_devices <= 4) {
printk(KERN_ERR "btrfs: unable to go below four devices "
"on raid10\n");
ret = -EINVAL;
@@ -1143,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *r
}
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
- root->fs_info->fs_devices->rw_devices <= 2) {
+ root->fs_info->fs_devices->num_devices <= 2) {
printk(KERN_ERR "btrfs: unable to go below two "
"devices on raid1\n");
ret = -EINVAL;
@@ -1434,8 +1434,8 @@ int btrfs_init_new_device(struct btrfs_r
return -EINVAL;
bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
- if (!bdev)
- return -EIO;
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
if (root->fs_info->fs_devices->seeding) {
seeding_dev = 1;
@@ -2209,7 +2209,7 @@ static int __btrfs_alloc_chunk(struct bt
max_chunk_size = 10 * calc_size;
min_stripe_size = 64 * 1024 * 1024;
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
- max_chunk_size = 4 * calc_size;
+ max_chunk_size = 256 * 1024 * 1024;
min_stripe_size = 32 * 1024 * 1024;
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
calc_size = 8 * 1024 * 1024;
@@ -2538,6 +2538,11 @@ int btrfs_chunk_readonly(struct btrfs_ro
if (!em)
return 1;
+ if (btrfs_test_opt(root, DEGRADED)) {
+ free_extent_map(em);
+ return 0;
+ }
+
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
if (!map->stripes[i].dev->writeable) {
@@ -2649,8 +2654,10 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
- if (!em && unplug_page)
+ if (!em && unplug_page) {
+ kfree(multi);
return 0;
+ }
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
file:b6dd5967c48a2785074f75db2dacd6a7370a9ec2 -> file:193b58f7d3f3b36d027a85756c917aa7191f99b2
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -85,22 +85,23 @@ out:
return ret;
}
-int __btrfs_setxattr(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int do_setxattr(struct btrfs_trans_handle *trans,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
{
struct btrfs_dir_item *di;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
struct btrfs_path *path;
- int ret = 0, mod = 0;
+ size_t name_len = strlen(name);
+ int ret = 0;
+
+ if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
+ return -ENOSPC;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- trans = btrfs_join_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
-
/* first lets see if we already have this xattr */
di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
strlen(name), -1);
@@ -118,15 +119,12 @@ int __btrfs_setxattr(struct inode *inode
}
ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret)
- goto out;
+ BUG_ON(ret);
btrfs_release_path(root, path);
/* if we don't have a value then we are removing the xattr */
- if (!value) {
- mod = 1;
+ if (!value)
goto out;
- }
} else {
btrfs_release_path(root, path);
@@ -138,20 +136,45 @@ int __btrfs_setxattr(struct inode *inode
}
/* ok we have to create a completely new xattr */
- ret = btrfs_insert_xattr_item(trans, root, name, strlen(name),
- value, size, inode->i_ino);
+ ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
+ name, name_len, value, size);
+ BUG_ON(ret);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int __btrfs_setxattr(struct btrfs_trans_handle *trans,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ if (trans)
+ return do_setxattr(trans, inode, name, value, size, flags);
+
+ ret = btrfs_reserve_metadata_space(root, 2);
if (ret)
- goto out;
- mod = 1;
+ return ret;
-out:
- if (mod) {
- inode->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, root, inode);
+ trans = btrfs_start_transaction(root, 1);
+ if (!trans) {
+ ret = -ENOMEM;
+ goto out;
}
+ btrfs_set_trans_block_group(trans, inode);
- btrfs_end_transaction(trans, root);
- btrfs_free_path(path);
+ ret = do_setxattr(trans, inode, name, value, size, flags);
+ if (ret)
+ goto out;
+
+ inode->i_ctime = CURRENT_TIME;
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+out:
+ btrfs_end_transaction_throttle(trans, root);
+ btrfs_unreserve_metadata_space(root, 2);
return ret;
}
@@ -314,7 +337,9 @@ int btrfs_setxattr(struct dentry *dentry
if (size == 0)
value = ""; /* empty EA, do not remove */
- return __btrfs_setxattr(dentry->d_inode, name, value, size, flags);
+
+ return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size,
+ flags);
}
int btrfs_removexattr(struct dentry *dentry, const char *name)
@@ -329,10 +354,13 @@ int btrfs_removexattr(struct dentry *den
if (!btrfs_is_valid_xattr(name))
return -EOPNOTSUPP;
- return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
+
+ return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
+ XATTR_REPLACE);
}
-int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
+int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
int err;
size_t len;
@@ -354,7 +382,7 @@ int btrfs_xattr_security_init(struct ino
} else {
strcpy(name, XATTR_SECURITY_PREFIX);
strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
- err = __btrfs_setxattr(inode, name, value, len, 0);
+ err = __btrfs_setxattr(trans, inode, name, value, len, 0);
kfree(name);
}
file:c71e9c3cf3f749e8981d19433bef8ada685383aa -> file:721efa0346e037c2f6f32c91d7f284fbc580c87f
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -27,15 +27,16 @@ extern struct xattr_handler *btrfs_xattr
extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
void *buffer, size_t size);
-extern int __btrfs_setxattr(struct inode *inode, const char *name,
- const void *value, size_t size, int flags);
-
+extern int __btrfs_setxattr(struct btrfs_trans_handle *trans,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags);
extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size);
extern int btrfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int btrfs_removexattr(struct dentry *dentry, const char *name);
-extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir);
+extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir);
#endif /* __XATTR__ */
file:d6db933df2b27ce43c0fe31d6e35bb0968598891 -> file:be7613ebb6618fcb25bd2de01f44ffbac554f6cb
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -39,7 +39,9 @@ struct backing_dev_info directly_mappabl
#endif
/* permit direct mmap, for read, write or exec */
BDI_CAP_MAP_DIRECT |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
+ /* no writeback happens */
+ BDI_CAP_NO_ACCT_AND_WRITEBACK),
};
static struct kobj_map *cdev_map;
file:16f31c129497e2679d11547c17ccca1e4cfffb86 -> file:31da21f0654cf1aa0249e35942a700f94388b1f7
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -226,7 +226,7 @@ failed_put_cred:
return ret;
}
-void __exit cifs_exit_dns_resolver(void)
+void cifs_exit_dns_resolver(void)
{
key_revoke(dns_resolver_cache->thread_keyring);
unregister_key_type(&key_type_dns_resolver);
file:26b9eaa9f5ee5898ae4241412e4127512fc95ec4 -> file:763237aa2a23e0bcd3fc0ed65fcd6e0ee7c2ea8d
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -24,8 +24,10 @@
#define _DNS_RESOLVE_H
#ifdef __KERNEL__
+#include <linux/module.h>
+
extern int __init cifs_init_dns_resolver(void);
-extern void __exit cifs_exit_dns_resolver(void);
+extern void cifs_exit_dns_resolver(void);
extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
#endif /* KERNEL */
file:dc2ad6008b2d08a354ce23507c44febb121b6466 -> file:4314f0d48d85668e0d0af943cdb2b7d8fde4863a
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -33,10 +33,10 @@ void dlm_del_ast(struct dlm_lkb *lkb)
spin_unlock(&ast_queue_lock);
}
-void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
+void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode)
{
if (lkb->lkb_flags & DLM_IFL_USER) {
- dlm_user_add_ast(lkb, type, bastmode);
+ dlm_user_add_ast(lkb, type, mode);
return;
}
@@ -44,10 +44,21 @@ void dlm_add_ast(struct dlm_lkb *lkb, in
if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
kref_get(&lkb->lkb_ref);
list_add_tail(&lkb->lkb_astqueue, &ast_queue);
+ lkb->lkb_ast_first = type;
}
+
+ /* sanity check, this should not happen */
+
+ if ((type == AST_COMP) && (lkb->lkb_ast_type & AST_COMP))
+ log_print("repeat cast %d castmode %d lock %x %s",
+ mode, lkb->lkb_castmode,
+ lkb->lkb_id, lkb->lkb_resource->res_name);
+
lkb->lkb_ast_type |= type;
- if (bastmode)
- lkb->lkb_bastmode = bastmode;
+ if (type == AST_BAST)
+ lkb->lkb_bastmode = mode;
+ else
+ lkb->lkb_castmode = mode;
spin_unlock(&ast_queue_lock);
set_bit(WAKE_ASTS, &astd_wakeflags);
@@ -59,9 +70,9 @@ static void process_asts(void)
struct dlm_ls *ls = NULL;
struct dlm_rsb *r = NULL;
struct dlm_lkb *lkb;
- void (*cast) (void *astparam);
- void (*bast) (void *astparam, int mode);
- int type = 0, bastmode;
+ void (*castfn) (void *astparam);
+ void (*bastfn) (void *astparam, int mode);
+ int type, first, bastmode, castmode, do_bast, do_cast, last_castmode;
repeat:
spin_lock(&ast_queue_lock);
@@ -75,17 +86,48 @@ repeat:
list_del(&lkb->lkb_astqueue);
type = lkb->lkb_ast_type;
lkb->lkb_ast_type = 0;
+ first = lkb->lkb_ast_first;
+ lkb->lkb_ast_first = 0;
bastmode = lkb->lkb_bastmode;
-
+ castmode = lkb->lkb_castmode;
+ castfn = lkb->lkb_astfn;
+ bastfn = lkb->lkb_bastfn;
spin_unlock(&ast_queue_lock);
- cast = lkb->lkb_astfn;
- bast = lkb->lkb_bastfn;
- if ((type & AST_COMP) && cast)
- cast(lkb->lkb_astparam);
+ do_cast = (type & AST_COMP) && castfn;
+ do_bast = (type & AST_BAST) && bastfn;
+
+ /* Skip a bast if its blocking mode is compatible with the
+ granted mode of the preceding cast. */
- if ((type & AST_BAST) && bast)
- bast(lkb->lkb_astparam, bastmode);
+ if (do_bast) {
+ if (first == AST_COMP)
+ last_castmode = castmode;
+ else
+ last_castmode = lkb->lkb_castmode_done;
+ if (dlm_modes_compat(bastmode, last_castmode))
+ do_bast = 0;
+ }
+
+ if (first == AST_COMP) {
+ if (do_cast)
+ castfn(lkb->lkb_astparam);
+ if (do_bast)
+ bastfn(lkb->lkb_astparam, bastmode);
+ } else if (first == AST_BAST) {
+ if (do_bast)
+ bastfn(lkb->lkb_astparam, bastmode);
+ if (do_cast)
+ castfn(lkb->lkb_astparam);
+ } else {
+ log_error(ls, "bad ast_first %d ast_type %d",
+ first, type);
+ }
+
+ if (do_cast)
+ lkb->lkb_castmode_done = castmode;
+ if (do_bast)
+ lkb->lkb_bastmode_done = bastmode;
/* this removes the reference added by dlm_add_ast
and may result in the lkb being freed */
file:1b5fc5f428fdd2d0dc01050be27de84c65a2b659 -> file:bcb1aaba519d97c031b06b356e7aa9051a7ecb76
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -13,7 +13,7 @@
#ifndef __ASTD_DOT_H__
#define __ASTD_DOT_H__
-void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode);
+void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode);
void dlm_del_ast(struct dlm_lkb *lkb);
void dlm_astd_wake(void);
file:fd9859f92fad3e05ed2ab372d226d6e61fdedfb8 -> file:0df243850818340c4423fa56ae9264151285c0a1
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -410,10 +410,10 @@ static struct config_group *make_cluster
struct dlm_comms *cms = NULL;
void *gps = NULL;
- cl = kzalloc(sizeof(struct dlm_cluster), GFP_KERNEL);
- gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
- sps = kzalloc(sizeof(struct dlm_spaces), GFP_KERNEL);
- cms = kzalloc(sizeof(struct dlm_comms), GFP_KERNEL);
+ cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
+ gps = kcalloc(3, sizeof(struct config_group *), GFP_NOFS);
+ sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
+ cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
if (!cl || !gps || !sps || !cms)
goto fail;
@@ -482,9 +482,9 @@ static struct config_group *make_space(s
struct dlm_nodes *nds = NULL;
void *gps = NULL;
- sp = kzalloc(sizeof(struct dlm_space), GFP_KERNEL);
- gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
- nds = kzalloc(sizeof(struct dlm_nodes), GFP_KERNEL);
+ sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS);
+ gps = kcalloc(2, sizeof(struct config_group *), GFP_NOFS);
+ nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS);
if (!sp || !gps || !nds)
goto fail;
@@ -536,7 +536,7 @@ static struct config_item *make_comm(str
{
struct dlm_comm *cm;
- cm = kzalloc(sizeof(struct dlm_comm), GFP_KERNEL);
+ cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS);
if (!cm)
return ERR_PTR(-ENOMEM);
@@ -569,7 +569,7 @@ static struct config_item *make_node(str
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
struct dlm_node *nd;
- nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL);
+ nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
if (!nd)
return ERR_PTR(-ENOMEM);
@@ -705,7 +705,7 @@ static ssize_t comm_addr_write(struct dl
if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
return -ENOSPC;
- addr = kzalloc(sizeof(*addr), GFP_KERNEL);
+ addr = kzalloc(sizeof(*addr), GFP_NOFS);
if (!addr)
return -ENOMEM;
@@ -868,7 +868,7 @@ int dlm_nodeid_list(char *lsname, int **
ids_count = sp->members_count;
- ids = kcalloc(ids_count, sizeof(int), GFP_KERNEL);
+ ids = kcalloc(ids_count, sizeof(int), GFP_NOFS);
if (!ids) {
rv = -ENOMEM;
goto out;
@@ -886,7 +886,7 @@ int dlm_nodeid_list(char *lsname, int **
if (!new_count)
goto out_ids;
- new = kcalloc(new_count, sizeof(int), GFP_KERNEL);
+ new = kcalloc(new_count, sizeof(int), GFP_NOFS);
if (!new) {
kfree(ids);
rv = -ENOMEM;
file:1c8bb8c3a82efd92519c16e0a01088e9bf9bf5bf -> file:375a2359b3bfa526fd6c4950a024dfd83ce98d56
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -404,7 +404,7 @@ static void *table_seq_start(struct seq_
if (bucket >= ls->ls_rsbtbl_size)
return NULL;
- ri = kzalloc(sizeof(struct rsbtbl_iter), GFP_KERNEL);
+ ri = kzalloc(sizeof(struct rsbtbl_iter), GFP_NOFS);
if (!ri)
return NULL;
if (n == 0)
file:c4dfa1dcc86f32d43469f5122c22ade5516544a5 -> file:7b84c1dbc82ebeaec0a42846537524c73d1db13f
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -49,8 +49,7 @@ static struct dlm_direntry *get_free_de(
spin_unlock(&ls->ls_recover_list_lock);
if (!found)
- de = kzalloc(sizeof(struct dlm_direntry) + len,
- ls->ls_allocation);
+ de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_NOFS);
return de;
}
@@ -212,7 +211,7 @@ int dlm_recover_directory(struct dlm_ls
dlm_dir_clear(ls);
- last_name = kmalloc(DLM_RESNAME_MAXLEN, ls->ls_allocation);
+ last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS);
if (!last_name)
goto out;
@@ -323,7 +322,7 @@ static int get_entry(struct dlm_ls *ls,
if (namelen > DLM_RESNAME_MAXLEN)
return -EINVAL;
- de = kzalloc(sizeof(struct dlm_direntry) + namelen, ls->ls_allocation);
+ de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_NOFS);
if (!de)
return -ENOMEM;
file:d01ca0a711db15de2f1d57854b9acb875e9c63e0 -> file:f632b58cd2221c71fd7c6454cf70f9d17dfa7e1e
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -232,11 +232,17 @@ struct dlm_lkb {
int8_t lkb_status; /* granted, waiting, convert */
int8_t lkb_rqmode; /* requested lock mode */
int8_t lkb_grmode; /* granted lock mode */
- int8_t lkb_bastmode; /* requested mode */
int8_t lkb_highbast; /* highest mode bast sent for */
+
int8_t lkb_wait_type; /* type of reply waiting for */
int8_t lkb_wait_count;
int8_t lkb_ast_type; /* type of ast queued for */
+ int8_t lkb_ast_first; /* type of first ast queued */
+
+ int8_t lkb_bastmode; /* req mode of queued bast */
+ int8_t lkb_castmode; /* gr mode of queued cast */
+ int8_t lkb_bastmode_done; /* last delivered bastmode */
+ int8_t lkb_castmode_done; /* last delivered castmode */
struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
struct list_head lkb_statequeue; /* rsb g/c/w list */
@@ -473,7 +479,6 @@ struct dlm_ls {
int ls_low_nodeid;
int ls_total_weight;
int *ls_node_array;
- gfp_t ls_allocation;
struct dlm_rsb ls_stub_rsb; /* for returning errors */
struct dlm_lkb ls_stub_lkb; /* for returning errors */
file:eb507c453c5ff7ca221f933619a085461bec3300 -> file:d0e43a3da887b8d9f291810c4b69820bb76b63b7
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -307,7 +307,7 @@ static void queue_cast(struct dlm_rsb *r
lkb->lkb_lksb->sb_status = rv;
lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
- dlm_add_ast(lkb, AST_COMP, 0);
+ dlm_add_ast(lkb, AST_COMP, lkb->lkb_grmode);
}
static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -2280,20 +2280,30 @@ static int do_request(struct dlm_rsb *r,
if (can_be_queued(lkb)) {
error = -EINPROGRESS;
add_lkb(r, lkb, DLM_LKSTS_WAITING);
- send_blocking_asts(r, lkb);
add_timeout(lkb);
goto out;
}
error = -EAGAIN;
- if (force_blocking_asts(lkb))
- send_blocking_asts_all(r, lkb);
queue_cast(r, lkb, -EAGAIN);
-
out:
return error;
}
+static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ switch (error) {
+ case -EAGAIN:
+ if (force_blocking_asts(lkb))
+ send_blocking_asts_all(r, lkb);
+ break;
+ case -EINPROGRESS:
+ send_blocking_asts(r, lkb);
+ break;
+ }
+}
+
static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error = 0;
@@ -2304,7 +2314,6 @@ static int do_convert(struct dlm_rsb *r,
if (can_be_granted(r, lkb, 1, &deadlk)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
- grant_pending_locks(r);
goto out;
}
@@ -2334,7 +2343,6 @@ static int do_convert(struct dlm_rsb *r,
if (_can_be_granted(r, lkb, 1)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
- grant_pending_locks(r);
goto out;
}
/* else fall through and move to convert queue */
@@ -2344,28 +2352,47 @@ static int do_convert(struct dlm_rsb *r,
error = -EINPROGRESS;
del_lkb(r, lkb);
add_lkb(r, lkb, DLM_LKSTS_CONVERT);
- send_blocking_asts(r, lkb);
add_timeout(lkb);
goto out;
}
error = -EAGAIN;
- if (force_blocking_asts(lkb))
- send_blocking_asts_all(r, lkb);
queue_cast(r, lkb, -EAGAIN);
-
out:
return error;
}
+static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ switch (error) {
+ case 0:
+ grant_pending_locks(r);
+ /* grant_pending_locks also sends basts */
+ break;
+ case -EAGAIN:
+ if (force_blocking_asts(lkb))
+ send_blocking_asts_all(r, lkb);
+ break;
+ case -EINPROGRESS:
+ send_blocking_asts(r, lkb);
+ break;
+ }
+}
+
static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
remove_lock(r, lkb);
queue_cast(r, lkb, -DLM_EUNLOCK);
- grant_pending_locks(r);
return -DLM_EUNLOCK;
}
+static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ grant_pending_locks(r);
+}
+
/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -2375,12 +2402,18 @@ static int do_cancel(struct dlm_rsb *r,
error = revert_lock(r, lkb);
if (error) {
queue_cast(r, lkb, -DLM_ECANCEL);
- grant_pending_locks(r);
return -DLM_ECANCEL;
}
return 0;
}
+static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ if (error)
+ grant_pending_locks(r);
+}
+
/*
* Four stage 3 varieties:
* _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
@@ -2402,11 +2435,15 @@ static int _request_lock(struct dlm_rsb
goto out;
}
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_request() calls do_request() on remote node */
error = send_request(r, lkb);
- else
+ } else {
error = do_request(r, lkb);
+ /* for remote locks the request_reply is sent
+ between do_request and do_request_effects */
+ do_request_effects(r, lkb, error);
+ }
out:
return error;
}
@@ -2417,11 +2454,15 @@ static int _convert_lock(struct dlm_rsb
{
int error;
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_convert() calls do_convert() on remote node */
error = send_convert(r, lkb);
- else
+ } else {
error = do_convert(r, lkb);
+ /* for remote locks the convert_reply is sent
+ between do_convert and do_convert_effects */
+ do_convert_effects(r, lkb, error);
+ }
return error;
}
@@ -2432,11 +2473,15 @@ static int _unlock_lock(struct dlm_rsb *
{
int error;
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_unlock() calls do_unlock() on remote node */
error = send_unlock(r, lkb);
- else
+ } else {
error = do_unlock(r, lkb);
+ /* for remote locks the unlock_reply is sent
+ between do_unlock and do_unlock_effects */
+ do_unlock_effects(r, lkb, error);
+ }
return error;
}
@@ -2447,11 +2492,15 @@ static int _cancel_lock(struct dlm_rsb *
{
int error;
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_cancel() calls do_cancel() on remote node */
error = send_cancel(r, lkb);
- else
+ } else {
error = do_cancel(r, lkb);
+ /* for remote locks the cancel_reply is sent
+ between do_cancel and do_cancel_effects */
+ do_cancel_effects(r, lkb, error);
+ }
return error;
}
@@ -2689,7 +2738,7 @@ static int _create_message(struct dlm_ls
pass into lowcomms_commit and a message buffer (mb) that we
write our data into */
- mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
+ mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
if (!mh)
return -ENOBUFS;
@@ -3191,6 +3240,7 @@ static void receive_request(struct dlm_l
attach_lkb(r, lkb);
error = do_request(r, lkb);
send_request_reply(r, lkb, error);
+ do_request_effects(r, lkb, error);
unlock_rsb(r);
put_rsb(r);
@@ -3226,15 +3276,19 @@ static void receive_convert(struct dlm_l
goto out;
receive_flags(lkb, ms);
+
error = receive_convert_args(ls, lkb, ms);
- if (error)
- goto out_reply;
+ if (error) {
+ send_convert_reply(r, lkb, error);
+ goto out;
+ }
+
reply = !down_conversion(lkb);
error = do_convert(r, lkb);
- out_reply:
if (reply)
send_convert_reply(r, lkb, error);
+ do_convert_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
@@ -3266,13 +3320,16 @@ static void receive_unlock(struct dlm_ls
goto out;
receive_flags(lkb, ms);
+
error = receive_unlock_args(ls, lkb, ms);
- if (error)
- goto out_reply;
+ if (error) {
+ send_unlock_reply(r, lkb, error);
+ goto out;
+ }
error = do_unlock(r, lkb);
- out_reply:
send_unlock_reply(r, lkb, error);
+ do_unlock_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
@@ -3307,6 +3364,7 @@ static void receive_cancel(struct dlm_ls
error = do_cancel(r, lkb);
send_cancel_reply(r, lkb, error);
+ do_cancel_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
@@ -4512,7 +4570,7 @@ int dlm_user_request(struct dlm_ls *ls,
}
if (flags & DLM_LKF_VALBLK) {
- ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
+ ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
kfree(ua);
__put_lkb(ls, lkb);
@@ -4582,7 +4640,7 @@ int dlm_user_convert(struct dlm_ls *ls,
ua = lkb->lkb_ua;
if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
- ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
+ ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
error = -ENOMEM;
goto out_put;
file:d489fcc86713de1d0484902d6e7f911b57377cd0 -> file:c010ecfc0d295525de2455b02bf8a94dd8dbedea
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -430,7 +430,7 @@ static int new_lockspace(const char *nam
error = -ENOMEM;
- ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
+ ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
if (!ls)
goto out;
memcpy(ls->ls_name, name, namelen);
@@ -443,11 +443,6 @@ static int new_lockspace(const char *nam
if (flags & DLM_LSFL_TIMEWARN)
set_bit(LSFL_TIMEWARN, &ls->ls_flags);
- if (flags & DLM_LSFL_FS)
- ls->ls_allocation = GFP_NOFS;
- else
- ls->ls_allocation = GFP_KERNEL;
-
/* ls_exflags are forced to match among nodes, and we don't
need to require all nodes to have some flags set */
ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
@@ -456,7 +451,7 @@ static int new_lockspace(const char *nam
size = dlm_config.ci_rsbtbl_size;
ls->ls_rsbtbl_size = size;
- ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
+ ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
if (!ls->ls_rsbtbl)
goto out_lsfree;
for (i = 0; i < size; i++) {
@@ -468,7 +463,7 @@ static int new_lockspace(const char *nam
size = dlm_config.ci_lkbtbl_size;
ls->ls_lkbtbl_size = size;
- ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
+ ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
if (!ls->ls_lkbtbl)
goto out_rsbfree;
for (i = 0; i < size; i++) {
@@ -480,7 +475,7 @@ static int new_lockspace(const char *nam
size = dlm_config.ci_dirtbl_size;
ls->ls_dirtbl_size = size;
- ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
+ ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
if (!ls->ls_dirtbl)
goto out_lkbfree;
for (i = 0; i < size; i++) {
@@ -527,7 +522,7 @@ static int new_lockspace(const char *nam
mutex_init(&ls->ls_requestqueue_mutex);
mutex_init(&ls->ls_clear_proc_locks);
- ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
+ ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
if (!ls->ls_recover_buf)
goto out_dirfree;
file:70736eb4b51652e9ad64078180ffc8c154265df4 -> file:52cab160893ce0d095e771dd1abe72a4b170162f
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1060,7 +1060,7 @@ static void init_local(void)
if (dlm_our_addr(&sas, i))
break;
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+ addr = kmalloc(sizeof(*addr), GFP_NOFS);
if (!addr)
break;
memcpy(addr, &sas, sizeof(*addr));
@@ -1099,7 +1099,7 @@ static int sctp_listen_for_all(void)
struct sockaddr_storage localaddr;
struct sctp_event_subscribe subscribe;
int result = -EINVAL, num = 1, i, addr_len;
- struct connection *con = nodeid2con(0, GFP_KERNEL);
+ struct connection *con = nodeid2con(0, GFP_NOFS);
int bufsize = NEEDED_RMEM;
if (!con)
@@ -1171,7 +1171,7 @@ out:
static int tcp_listen_for_all(void)
{
struct socket *sock = NULL;
- struct connection *con = nodeid2con(0, GFP_KERNEL);
+ struct connection *con = nodeid2con(0, GFP_NOFS);
int result = -EINVAL;
if (!con)
file:b128775913b29d6b43aeeec34910af6f9583ce9a -> file:84f70bfb0baf4dfe4e19fee23238783965a637cc
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -48,7 +48,7 @@ static int dlm_add_member(struct dlm_ls
struct dlm_member *memb;
int w, error;
- memb = kzalloc(sizeof(struct dlm_member), ls->ls_allocation);
+ memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
if (!memb)
return -ENOMEM;
@@ -143,7 +143,7 @@ static void make_member_array(struct dlm
ls->ls_total_weight = total;
- array = kmalloc(sizeof(int) * total, ls->ls_allocation);
+ array = kmalloc(sizeof(int) * total, GFP_NOFS);
if (!array)
return;
@@ -226,7 +226,7 @@ int dlm_recover_members(struct dlm_ls *l
continue;
log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
- memb = kzalloc(sizeof(struct dlm_member), ls->ls_allocation);
+ memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
if (!memb)
return -ENOMEM;
memb->nodeid = rv->new[i];
@@ -341,7 +341,7 @@ int dlm_ls_start(struct dlm_ls *ls)
int *ids = NULL, *new = NULL;
int error, ids_count = 0, new_count = 0;
- rv = kzalloc(sizeof(struct dlm_recover), ls->ls_allocation);
+ rv = kzalloc(sizeof(struct dlm_recover), GFP_NOFS);
if (!rv)
return -ENOMEM;
file:c1775b84ebab2e552e4a4a04e5c8491f1fe99e3a -> file:8e0d00db004f8b8fd71cc75b3958c215beb836bd
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -39,7 +39,7 @@ char *dlm_allocate_lvb(struct dlm_ls *ls
{
char *p;
- p = kzalloc(ls->ls_lvblen, ls->ls_allocation);
+ p = kzalloc(ls->ls_lvblen, GFP_NOFS);
return p;
}
@@ -57,7 +57,7 @@ struct dlm_rsb *dlm_allocate_rsb(struct
DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
- r = kzalloc(sizeof(*r) + namelen, ls->ls_allocation);
+ r = kzalloc(sizeof(*r) + namelen, GFP_NOFS);
return r;
}
@@ -72,7 +72,7 @@ struct dlm_lkb *dlm_allocate_lkb(struct
{
struct dlm_lkb *lkb;
- lkb = kmem_cache_zalloc(lkb_cache, ls->ls_allocation);
+ lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS);
return lkb;
}
file:55ea369f43a9f0d153ca769074b89e71d52082cf -> file:052095cd592f3787ed20f525e1c7c08e0e30378b
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -26,7 +26,7 @@ static int prepare_data(u8 cmd, struct s
struct sk_buff *skb;
void *data;
- skb = genlmsg_new(size, GFP_KERNEL);
+ skb = genlmsg_new(size, GFP_NOFS);
if (!skb)
return -ENOMEM;
file:16f682e26c07e49493e8a667a6b36cf0cf9f6bb6 -> file:2863deb178e2668566607f1548fc6f454ba18d97
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -82,7 +82,7 @@ int dlm_posix_lock(dlm_lockspace_t *lock
if (!ls)
return -EINVAL;
- xop = kzalloc(sizeof(*xop), GFP_KERNEL);
+ xop = kzalloc(sizeof(*xop), GFP_NOFS);
if (!xop) {
rv = -ENOMEM;
goto out;
@@ -211,7 +211,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lo
if (!ls)
return -EINVAL;
- op = kzalloc(sizeof(*op), GFP_KERNEL);
+ op = kzalloc(sizeof(*op), GFP_NOFS);
if (!op) {
rv = -ENOMEM;
goto out;
@@ -266,7 +266,7 @@ int dlm_posix_get(dlm_lockspace_t *locks
if (!ls)
return -EINVAL;
- op = kzalloc(sizeof(*op), GFP_KERNEL);
+ op = kzalloc(sizeof(*op), GFP_NOFS);
if (!op) {
rv = -ENOMEM;
goto out;
file:67522c268c14972d20fb9825de128ff66aa2b2a9 -> file:3c83a49a48a3c6d464a779cf3929c6f670073d08
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -38,7 +38,7 @@ static int create_rcom(struct dlm_ls *ls
char *mb;
int mb_len = sizeof(struct dlm_rcom) + len;
- mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
+ mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
if (!mh) {
log_print("create_rcom to %d type %d len %d ENOBUFS",
to_nodeid, type, len);
file:7a2307c08911101e9f82b99e0c6bad22c03a6e24 -> file:a44fa22890e1dd06797ae5e43e77a86c0cf4ca63
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -35,7 +35,7 @@ void dlm_add_requestqueue(struct dlm_ls
struct rq_entry *e;
int length = ms->m_header.h_length - sizeof(struct dlm_message);
- e = kmalloc(sizeof(struct rq_entry) + length, ls->ls_allocation);
+ e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
if (!e) {
log_print("dlm_add_requestqueue: out of memory len %d", length);
return;
file:ebce994ab0b717253a0bfa98230c80cefde397d2 -> file:a4bfd31ac45bec4ad5e1010e0ad6a386a338f18b
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -173,7 +173,7 @@ static int lkb_is_endoflife(struct dlm_l
/* we could possibly check if the cancel of an orphan has resulted in the lkb
being removed and then remove that lkb from the orphans list and free it */
-void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
+void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
{
struct dlm_ls *ls;
struct dlm_user_args *ua;
@@ -206,8 +206,10 @@ void dlm_user_add_ast(struct dlm_lkb *lk
ast_type = lkb->lkb_ast_type;
lkb->lkb_ast_type |= type;
- if (bastmode)
- lkb->lkb_bastmode = bastmode;
+ if (type == AST_BAST)
+ lkb->lkb_bastmode = mode;
+ else
+ lkb->lkb_castmode = mode;
if (!ast_type) {
kref_get(&lkb->lkb_ref);
@@ -267,7 +269,7 @@ static int device_user_lock(struct dlm_u
goto out;
}
- ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
+ ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
if (!ua)
goto out;
ua->proc = proc;
@@ -307,7 +309,7 @@ static int device_user_unlock(struct dlm
if (!ls)
return -ENOENT;
- ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
+ ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
if (!ua)
goto out;
ua->proc = proc;
@@ -352,7 +354,7 @@ static int dlm_device_register(struct dl
error = -ENOMEM;
len = strlen(name) + strlen(name_prefix) + 2;
- ls->ls_device.name = kzalloc(len, GFP_KERNEL);
+ ls->ls_device.name = kzalloc(len, GFP_NOFS);
if (!ls->ls_device.name)
goto fail;
@@ -520,7 +522,7 @@ static ssize_t device_write(struct file
#endif
return -EINVAL;
- kbuf = kzalloc(count + 1, GFP_KERNEL);
+ kbuf = kzalloc(count + 1, GFP_NOFS);
if (!kbuf)
return -ENOMEM;
@@ -546,7 +548,7 @@ static ssize_t device_write(struct file
/* add 1 after namelen so that the name string is terminated */
kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
- GFP_KERNEL);
+ GFP_NOFS);
if (!kbuf) {
kfree(k32buf);
return -ENOMEM;
@@ -648,7 +650,7 @@ static int device_open(struct inode *ino
if (!ls)
return -ENOENT;
- proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
+ proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
if (!proc) {
dlm_put_lockspace(ls);
return -ENOMEM;
file:1c96864922869b396bcbf2cab5a70c2eb32c064b -> file:f196091dd7ff8d31c687526d973c404d30098f00
--- a/fs/dlm/user.h
+++ b/fs/dlm/user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -9,7 +9,7 @@
#ifndef __USER_DOT_H__
#define __USER_DOT_H__
-void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode);
+void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode);
int dlm_user_init(void);
void dlm_user_exit(void);
int dlm_device_deregister(struct dlm_ls *ls);
file:1744f17ce96ed6954955b1fdd1c0504ab04f7c45 -> file:4e25328986ac7b6718df18eace61eea75693a190
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -198,7 +198,7 @@ static int ecryptfs_open(struct inode *i
"the persistent file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
- goto out;
+ goto out_free;
}
}
if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
@@ -206,7 +206,7 @@ static int ecryptfs_open(struct inode *i
rc = -EPERM;
printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
"file must hence be opened RO\n", __func__);
- goto out;
+ goto out_free;
}
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
@@ -293,12 +293,40 @@ static int ecryptfs_fasync(int fd, struc
return rc;
}
-static int ecryptfs_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long
+ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct file *lower_file = NULL;
+ long rc = -ENOTTY;
+
+ if (ecryptfs_file_to_private(file))
+ lower_file = ecryptfs_file_to_lower(file);
+ if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl)
+ rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct file *lower_file = NULL;
+ long rc = -ENOIOCTLCMD;
+
+ if (ecryptfs_file_to_private(file))
+ lower_file = ecryptfs_file_to_lower(file);
+ if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl)
+ rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
+ return rc;
+}
+#endif
const struct file_operations ecryptfs_dir_fops = {
.readdir = ecryptfs_readdir,
- .ioctl = ecryptfs_ioctl,
+ .unlocked_ioctl = ecryptfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+#endif
.mmap = generic_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
@@ -315,7 +343,10 @@ const struct file_operations ecryptfs_ma
.write = do_sync_write,
.aio_write = generic_file_aio_write,
.readdir = ecryptfs_readdir,
- .ioctl = ecryptfs_ioctl,
+ .unlocked_ioctl = ecryptfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+#endif
.mmap = generic_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
@@ -324,20 +355,3 @@ const struct file_operations ecryptfs_ma
.fasync = ecryptfs_fasync,
.splice_read = generic_file_splice_read,
};
-
-static int
-ecryptfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int rc = 0;
- struct file *lower_file = NULL;
-
- if (ecryptfs_file_to_private(file))
- lower_file = ecryptfs_file_to_lower(file);
- if (lower_file && lower_file->f_op && lower_file->f_op->ioctl)
- rc = lower_file->f_op->ioctl(ecryptfs_inode_to_lower(inode),
- lower_file, cmd, arg);
- else
- rc = -ENOTTY;
- return rc;
-}
file:268b7d19affa931abc91bcd2aebf977adee9f54b -> file:b582f09a90122cd8f9c966703f33d63f2ccde3a4
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -69,15 +69,19 @@ ecryptfs_create_underlying_file(struct i
struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
struct dentry *dentry_save;
struct vfsmount *vfsmount_save;
+ unsigned int flags_save;
int rc;
dentry_save = nd->path.dentry;
vfsmount_save = nd->path.mnt;
+ flags_save = nd->flags;
nd->path.dentry = lower_dentry;
nd->path.mnt = lower_mnt;
+ nd->flags &= ~LOOKUP_OPEN;
rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
nd->path.dentry = dentry_save;
nd->path.mnt = vfsmount_save;
+ nd->flags = flags_save;
return rc;
}
@@ -272,7 +276,7 @@ int ecryptfs_lookup_and_interpose_lower(
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to allocate ecryptfs_dentry_info struct\n",
__func__);
- goto out_dput;
+ goto out_put;
}
ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry);
ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt);
@@ -345,8 +349,9 @@ int ecryptfs_lookup_and_interpose_lower(
out_free_kmem:
kmem_cache_free(ecryptfs_header_cache_2, page_virt);
goto out;
-out_dput:
+out_put:
dput(lower_dentry);
+ mntput(lower_mnt);
d_drop(ecryptfs_dentry);
out:
return rc;
file:56da15f6d77100f5c604ba0eabb5df111e9379aa -> file:a0410eb44dfa8cf58f10617fa88859f04905f62d
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@ static int count(char __user * __user *
argv++;
if (i++ >= max)
return -E2BIG;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTNOHAND;
cond_resched();
}
}
@@ -419,6 +422,12 @@ static int copy_strings(int argc, char _
while (len > 0) {
int offset, bytes_to_copy;
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
+ cond_resched();
+
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm
#else
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
+
+ if (unlikely(stack_top < mmap_min_addr) ||
+ unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+ return -ENOMEM;
+
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
file:f3032c919a22dbee67f4d4d2f1f17269e69bac30 -> file:e85b63c9fbcb6b49cba4c8c154a22f1f556ff128
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -189,9 +189,6 @@ unsigned ext4_init_block_bitmap(struct s
* when a file system is mounted (see ext4_fill_super).
*/
-
-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-
/**
* ext4_get_group_desc() -- load group descriptor from disk
* @sb: super block
file:fa6b79fb594dc2068fe920a3c3b8ab406b14ca9d -> file:0773352fd5125c8425c881ec7dd8af91367e84c7
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1888,6 +1888,8 @@ static inline void set_bitmap_uptodate(s
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
}
+#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+
#endif /* __KERNEL__ */
#endif /* _EXT4_H */
file:99482eae7b378292ee1d09d5dd947050f6b10a35 -> file:f37555909872f5b102f1fef31ed51d08923d7790
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1948,7 +1948,7 @@ ext4_ext_in_cache(struct inode *inode, e
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
cex->ec_type != EXT4_EXT_CACHE_EXTENT);
- if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
+ if (in_range(block, cex->ec_block, cex->ec_len)) {
ex->ee_block = cpu_to_le32(cex->ec_block);
ext4_ext_store_pblock(ex, cex->ec_start);
ex->ee_len = cpu_to_le16(cex->ec_len);
@@ -3302,7 +3302,7 @@ int ext4_ext_get_blocks(handle_t *handle
*/
ee_len = ext4_ext_get_actual_len(ex);
/* if found extent covers block, simply return it */
- if (iblock >= ee_block && iblock < ee_block + ee_len) {
+ if (in_range(iblock, ee_block, ee_len)) {
newblock = iblock - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (iblock - ee_block);
file:99596fc88821e8de7a48b1b8dbea17a55fb79dea -> file:1b23f9d2ef5668a5d992b42fd77d38fda3e197bc
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4815,20 +4815,26 @@ void ext4_set_inode_flags(struct inode *
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
void ext4_get_inode_flags(struct ext4_inode_info *ei)
{
- unsigned int flags = ei->vfs_inode.i_flags;
+ unsigned int vfs_fl;
+ unsigned long old_fl, new_fl;
- ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
- EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
- if (flags & S_SYNC)
- ei->i_flags |= EXT4_SYNC_FL;
- if (flags & S_APPEND)
- ei->i_flags |= EXT4_APPEND_FL;
- if (flags & S_IMMUTABLE)
- ei->i_flags |= EXT4_IMMUTABLE_FL;
- if (flags & S_NOATIME)
- ei->i_flags |= EXT4_NOATIME_FL;
- if (flags & S_DIRSYNC)
- ei->i_flags |= EXT4_DIRSYNC_FL;
+ do {
+ vfs_fl = ei->vfs_inode.i_flags;
+ old_fl = ei->i_flags;
+ new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+ EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
+ EXT4_DIRSYNC_FL);
+ if (vfs_fl & S_SYNC)
+ new_fl |= EXT4_SYNC_FL;
+ if (vfs_fl & S_APPEND)
+ new_fl |= EXT4_APPEND_FL;
+ if (vfs_fl & S_IMMUTABLE)
+ new_fl |= EXT4_IMMUTABLE_FL;
+ if (vfs_fl & S_NOATIME)
+ new_fl |= EXT4_NOATIME_FL;
+ if (vfs_fl & S_DIRSYNC)
+ new_fl |= EXT4_DIRSYNC_FL;
+ } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
@@ -5067,7 +5073,7 @@ static int ext4_inode_blocks_set(handle_
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = 0;
- ei->i_flags &= ~EXT4_HUGE_FILE_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
return 0;
}
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
@@ -5080,9 +5086,9 @@ static int ext4_inode_blocks_set(handle_
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
- ei->i_flags &= ~EXT4_HUGE_FILE_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
} else {
- ei->i_flags |= EXT4_HUGE_FILE_FL;
+ ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
/* i_block is stored in file system block size */
i_blocks = i_blocks >> (inode->i_blkbits - 9);
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
file:0ca811061bc72eca18b46ca025b660f32ce87138 -> file:ceb9d41cd7cf4c01a391f60038bd4fa9b9a0314a
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -221,8 +221,6 @@ struct ext4_buddy {
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-
static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
struct ext4_free_extent *fex)
{
file:5b14d11438b7a5747c626453df5519f3fd70a593 -> file:a73ed781752dd689d1312399f33a4f939fdd646e
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -958,6 +958,9 @@ mext_check_arguments(struct inode *orig_
return -EINVAL;
}
+ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
+ return -EPERM;
+
/* Ext4 move extent does not support swapfile */
if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
file:54a05ccf52216abdaf70e06c0b6443b5c14fe005 -> file:f27e045df7a6e502c9d6d92389a8062eeb8a0e24
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -227,7 +227,7 @@ handle_t *ext4_journal_start_sb(struct s
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
- vfs_check_frozen(sb, SB_FREEZE_WRITE);
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
/* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly. */
@@ -1218,6 +1218,11 @@ static int parse_options(char *options,
if (!*p)
continue;
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
@@ -1503,10 +1508,11 @@ set_qf_format:
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_barrier:
- if (match_int(&args[0], &option)) {
- set_opt(sbi->s_mount_opt, BARRIER);
- break;
- }
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
@@ -1579,10 +1585,11 @@ set_qf_format:
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
case Opt_auto_da_alloc:
- if (match_int(&args[0], &option)) {
- clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
- break;
- }
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = 1; /* No argument, default to 1 */
if (option)
clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
else
@@ -3390,7 +3397,7 @@ int ext4_force_commit(struct super_block
journal = EXT4_SB(sb)->s_journal;
if (journal) {
- vfs_check_frozen(sb, SB_FREEZE_WRITE);
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
ret = ext4_journal_force_commit(journal);
}
file:51d9e33d634f4fb40f28fc00276123e1dc8f13c0 -> file:650546f8612d44d1200e2530ec704f52ac3b592b
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1158,6 +1158,14 @@ __acquires(&fc->lock)
}
}
+static void end_queued_requests(struct fuse_conn *fc)
+{
+ fc->max_background = UINT_MAX;
+ flush_bg_queue(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
+}
+
/*
* Abort all requests.
*
@@ -1184,8 +1192,7 @@ void fuse_abort_conn(struct fuse_conn *f
fc->connected = 0;
fc->blocked = 0;
end_io_requests(fc);
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ end_queued_requests(fc);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1200,8 +1207,9 @@ int fuse_dev_release(struct inode *inode
if (fc) {
spin_lock(&fc->lock);
fc->connected = 0;
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ fc->blocked = 0;
+ end_queued_requests(fc);
+ wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
fuse_conn_put(fc);
}
file:a9f5e137f1d31547f86bbfdb87df11c07daf963f -> file:cbd2214edc15b2c4cd65e40f7de6538d1169f982
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -134,6 +134,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
void fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = get_fuse_conn(inode);
if (ff->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations;
@@ -141,6 +142,15 @@ void fuse_finish_open(struct inode *inod
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
+ if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fc->lock);
+ fi->attr_version = ++fc->attr_version;
+ i_size_write(inode, 0);
+ spin_unlock(&fc->lock);
+ fuse_invalidate_attr(inode);
+ }
}
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
file:297d7e5cebad8d283a9360ad86cf3a96a2028f02 -> file:0bb312961c92fb01d661ac273b0c0e4f32d140dc
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const
unsigned totlen = be16_to_cpu(dent->de_rec_len);
if (gfs2_dirent_sentinel(dent))
- actual = GFS2_DIRENT_SIZE(0);
+ actual = 0;
if (totlen - actual >= required)
return 1;
return 0;
file:fad364548bc9e3716b08dc0f6ca66a58d1f9b56c -> file:8b0da9b108021a280d895ec5b26c860dd9a52351
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -85,46 +85,25 @@ struct ea_buffer {
#define EA_MALLOC 0x0008
+static int is_known_namespace(const char *name)
+{
+ if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+ return false;
+
+ return true;
+}
+
/*
* These three routines are used to recognize on-disk extended attributes
* that are in a recognized namespace. If the attribute is not recognized,
* "os2." is prepended to the name
*/
-static inline int is_os2_xattr(struct jfs_ea *ea)
+static int is_os2_xattr(struct jfs_ea *ea)
{
- /*
- * Check for "system."
- */
- if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
- return false;
- /*
- * Check for "user."
- */
- if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
- return false;
- /*
- * Check for "security."
- */
- if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_SECURITY_PREFIX,
- XATTR_SECURITY_PREFIX_LEN))
- return false;
- /*
- * Check for "trusted."
- */
- if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
- return false;
- /*
- * Add any other valid namespace prefixes here
- */
-
- /*
- * We assume it's OS/2's flat namespace
- */
- return true;
+ return !is_known_namespace(ea->name);
}
static inline int name_size(struct jfs_ea *ea)
@@ -762,13 +741,23 @@ static int can_set_xattr(struct inode *i
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return can_set_system_xattr(inode, name, value, value_len);
+ if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
+ /*
+ * This makes sure that we aren't trying to set an
+ * attribute in a different namespace by prefixing it
+ * with "os2."
+ */
+ if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
+ return -EOPNOTSUPP;
+ return 0;
+ }
+
/*
* Don't allow setting an attribute in an unknown namespace.
*/
if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
- strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
- strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
return -EOPNOTSUPP;
return 0;
@@ -950,19 +939,8 @@ ssize_t __jfs_getxattr(struct inode *ino
int xattr_size;
ssize_t size;
int namelen = strlen(name);
- char *os2name = NULL;
char *value;
- if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
- os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
- GFP_KERNEL);
- if (!os2name)
- return -ENOMEM;
- strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
- name = os2name;
- namelen -= XATTR_OS2_PREFIX_LEN;
- }
-
down_read(&JFS_IP(inode)->xattr_sem);
xattr_size = ea_get(inode, &ea_buf, 0);
@@ -1000,8 +978,6 @@ ssize_t __jfs_getxattr(struct inode *ino
out:
up_read(&JFS_IP(inode)->xattr_sem);
- kfree(os2name);
-
return size;
}
@@ -1010,6 +986,19 @@ ssize_t jfs_getxattr(struct dentry *dent
{
int err;
+ if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+ /*
+ * skip past "os2." prefix
+ */
+ name += XATTR_OS2_PREFIX_LEN;
+ /*
+ * Don't allow retrieving properly prefixed attributes
+ * by prepending them with "os2."
+ */
+ if (is_known_namespace(name))
+ return -EOPNOTSUPP;
+ }
+
err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
return err;
file:127ed5cf28667ee069b8597943a5f000c450280b -> file:19cbbf763110a56aa14859b921492328ded25336
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -273,7 +273,7 @@ static int nfs_sockaddr_match_ipaddr6(co
sin1->sin6_scope_id != sin2->sin6_scope_id)
return 0;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
+ return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
}
#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
file:61b3bf503ee157d8243259d41e3d7e13765b27f8 -> file:9f83d9fe9a61af75574782c310d27ee78f03017a
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/aio.h>
+#include <linux/gfp.h>
+#include <linux/swap.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -484,11 +486,19 @@ static void nfs_invalidate_page(struct p
*/
static int nfs_release_page(struct page *page, gfp_t gfp)
{
+ struct address_space *mapping = page->mapping;
+
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
/* Only do I/O if gfp is a superset of GFP_KERNEL */
- if ((gfp & GFP_KERNEL) == GFP_KERNEL)
- nfs_wb_page(page->mapping->host, page);
+ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+ if (current_is_kswapd())
+ how = 0;
+ nfs_commit_inode(mapping->host, how);
+ }
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
file:5ec74cd5402cea6ad9a7928d651d9efd94db21ac -> file:e81b2bf67c42ee980371dcd3b73916bbdef82312
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -2096,7 +2096,7 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req
encode_compound_hdr(&xdr, req, &hdr);
encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
- replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
+ replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
file:a53b9e5068cdccc0a5850cbd7284e7d8d1fbc52b -> file:c0173a8531e212d8a473221c48f4ec363252c6d9
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -616,6 +616,13 @@ static void nfs_show_mount_options(struc
if (nfss->options & NFS_OPTION_FSCACHE)
seq_printf(m, ",fsc");
+
+ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
+ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
+ seq_printf(m, ",lookupcache=none");
+ else
+ seq_printf(m, ",lookupcache=pos");
+ }
}
/*
file:e27960cd76ab9046257826783d907bdb6f3ca2a8 -> file:5d3d2a782abcf00438f82e6c1cff75c44cc059ca
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -72,6 +72,9 @@ static int inotify_handle_event(struct f
ret = 0;
}
+ if (entry->mask & IN_ONESHOT)
+ fsnotify_destroy_mark_by_entry(entry);
+
/*
* If we hold the entry until after the event is on the queue
* IN_IGNORED won't be able to pass this event in the queue
file:22ef16a478a002e2c103e35d619cc1c42e076745 -> file:aef8f5d7f94f3a08643b6e9aac54930a3649a5bb
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -106,8 +106,11 @@ static inline __u32 inotify_arg_to_mask(
{
__u32 mask;
- /* everything should accept their own ignored and cares about children */
- mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
+ /*
+ * everything should accept their own ignored, cares about children,
+ * and should receive events when the inode is unmounted
+ */
+ mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */
mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
file:d8fe53a6f8df53a1e495cf2aeb390ce46238ac79 -> file:c8288df5712010c8485d352b8881de68813b0c4f
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -293,12 +293,30 @@ static int ocfs2_set_acl(handle_t *handl
int ocfs2_check_acl(struct inode *inode, int mask)
{
- struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *di_bh = NULL;
+ struct posix_acl *acl;
+ int ret = -EAGAIN;
+
+ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
+ return ret;
+
+ ret = ocfs2_read_inode_block(inode, &di_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
- if (IS_ERR(acl))
+ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
+
+ brelse(di_bh);
+
+ if (IS_ERR(acl)) {
+ mlog_errno(PTR_ERR(acl));
return PTR_ERR(acl);
+ }
if (acl) {
- int ret = posix_acl_permission(inode, acl, mask);
+ ret = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return ret;
}
@@ -347,7 +365,7 @@ int ocfs2_init_acl(handle_t *handle,
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
- int ret = 0;
+ int ret = 0, ret2;
mode_t mode;
if (!S_ISLNK(inode->i_mode)) {
@@ -384,7 +402,12 @@ int ocfs2_init_acl(handle_t *handle,
mode = inode->i_mode;
ret = posix_acl_create_masq(clone, &mode);
if (ret >= 0) {
- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret2) {
+ mlog_errno(ret2);
+ ret = ret2;
+ goto cleanup;
+ }
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
file:38a42f5d59ff70345b7a8f5840000dc850ea6cff -> file:5661db139ca0501c12edd4396a00fb386641b31f
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1765,9 +1765,9 @@ set_and_inc:
*
* The array index of the subtree root is passed back.
*/
-static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
- struct ocfs2_path *left,
- struct ocfs2_path *right)
+int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
+ struct ocfs2_path *left,
+ struct ocfs2_path *right)
{
int i = 0;
@@ -2872,8 +2872,8 @@ out:
* This looks similar, but is subtly different to
* ocfs2_find_cpos_for_left_leaf().
*/
-static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
- struct ocfs2_path *path, u32 *cpos)
+int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
+ struct ocfs2_path *path, u32 *cpos)
{
int i, j, ret = 0;
u64 blkno;
file:9c122d574464284ef5c998e53f3685939c76213b -> file:1db4359ccb90b61521a0167be7581eb91e823d83
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -317,4 +317,9 @@ int ocfs2_path_bh_journal_access(handle_
int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
handle_t *handle,
struct ocfs2_path *path);
+int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
+ struct ocfs2_path *path, u32 *cpos);
+int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
+ struct ocfs2_path *left,
+ struct ocfs2_path *right);
#endif /* OCFS2_ALLOC_H */
file:83bcaf266b358d7922998b3ec1d46d203531d616 -> file:ef1ac9ab4ee1a8504da627d037b63a74c5d20302
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct k
atomic_dec(&dlm->res_cur_count);
- dlm_put(dlm);
-
if (!hlist_unhashed(&res->hash_node) ||
!list_empty(&res->granted) ||
!list_empty(&res->converting) ||
@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_
res->migration_pending = 0;
res->inflight_locks = 0;
- /* put in dlm_lockres_release */
- dlm_grab(dlm);
res->dlm = dlm;
kref_init(&res->refs);
@@ -3046,8 +3042,6 @@ int dlm_migrate_request_handler(struct o
/* check for pre-existing lock */
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, name, namelen, hash);
- spin_lock(&dlm->master_lock);
-
if (res) {
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -3065,14 +3059,15 @@ int dlm_migrate_request_handler(struct o
spin_unlock(&res->spinlock);
}
+ spin_lock(&dlm->master_lock);
/* ignore status. only nonzero status would BUG. */
ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
name, namelen,
migrate->new_master,
migrate->master);
-unlock:
spin_unlock(&dlm->master_lock);
+unlock:
spin_unlock(&dlm->spinlock);
if (oldmle) {
file:d9fa3d22e17c6d27134327d3fd96199038913ed0 -> file:3492550b70c8d2d834ac38a7acd02d24a25a6f3a
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1941,6 +1941,8 @@ void dlm_move_lockres_to_recovery_list(s
struct list_head *queue;
struct dlm_lock *lock, *next;
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
res->state |= DLM_LOCK_RES_RECOVERING;
if (!list_empty(&res->recovering)) {
mlog(0,
@@ -2265,19 +2267,15 @@ static void dlm_do_local_recovery_cleanu
/* zero the lvb if necessary */
dlm_revalidate_lvb(dlm, res, dead_node);
if (res->owner == dead_node) {
- if (res->state & DLM_LOCK_RES_DROPPING_REF)
- mlog(0, "%s:%.*s: owned by "
- "dead node %u, this node was "
- "dropping its ref when it died. "
- "continue, dropping the flag.\n",
- dlm->name, res->lockname.len,
- res->lockname.name, dead_node);
-
- /* the wake_up for this will happen when the
- * RECOVERING flag is dropped later */
- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(ML_NOTICE, "Ignore %.*s for "
+ "recovery as it is being freed\n",
+ res->lockname.len,
+ res->lockname.name);
+ } else
+ dlm_move_lockres_to_recovery_list(dlm,
+ res);
- dlm_move_lockres_to_recovery_list(dlm, res);
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
file:52ec020ea78b42f11ad83b3a2632dbcf08a20dde -> file:86491f583ea56f7dc6e62f3b580334601bbfc27a
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -93,19 +93,27 @@ int __dlm_lockres_has_locks(struct dlm_l
* truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
- if (!__dlm_lockres_has_locks(res) &&
- (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
- /* try not to scan the bitmap unless the first two
- * conditions are already true */
- int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
- if (bit >= O2NM_MAX_NODES) {
- /* since the bit for dlm->node_num is not
- * set, inflight_locks better be zero */
- BUG_ON(res->inflight_locks != 0);
- return 1;
- }
- }
- return 0;
+ int bit;
+
+ if (__dlm_lockres_has_locks(res))
+ return 0;
+
+ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
+ return 0;
+
+ if (res->state & DLM_LOCK_RES_RECOVERING)
+ return 0;
+
+ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+ if (bit < O2NM_MAX_NODES)
+ return 0;
+
+ /*
+ * since the bit for dlm->node_num is not set, inflight_locks better
+ * be zero
+ */
+ BUG_ON(res->inflight_locks != 0);
+ return 1;
}
@@ -153,45 +161,25 @@ void dlm_lockres_calc_usage(struct dlm_c
spin_unlock(&dlm->spinlock);
}
-static int dlm_purge_lockres(struct dlm_ctxt *dlm,
+static void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
int master;
int ret = 0;
- spin_lock(&res->spinlock);
- if (!__dlm_lockres_unused(res)) {
- mlog(0, "%s:%.*s: tried to purge but not unused\n",
- dlm->name, res->lockname.len, res->lockname.name);
- __dlm_print_one_lock_resource(res);
- spin_unlock(&res->spinlock);
- BUG();
- }
-
- if (res->state & DLM_LOCK_RES_MIGRATING) {
- mlog(0, "%s:%.*s: Delay dropref as this lockres is "
- "being remastered\n", dlm->name, res->lockname.len,
- res->lockname.name);
- /* Re-add the lockres to the end of the purge list */
- if (!list_empty(&res->purge)) {
- list_del_init(&res->purge);
- list_add_tail(&res->purge, &dlm->purge_list);
- }
- spin_unlock(&res->spinlock);
- return 0;
- }
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
master = (res->owner == dlm->node_num);
- if (!master)
- res->state |= DLM_LOCK_RES_DROPPING_REF;
- spin_unlock(&res->spinlock);
mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
res->lockname.name, master);
if (!master) {
+ res->state |= DLM_LOCK_RES_DROPPING_REF;
/* drop spinlock... retake below */
+ spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
spin_lock(&res->spinlock);
@@ -209,31 +197,35 @@ static int dlm_purge_lockres(struct dlm_
mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
dlm->name, res->lockname.len, res->lockname.name, ret);
spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
}
- spin_lock(&res->spinlock);
if (!list_empty(&res->purge)) {
mlog(0, "removing lockres %.*s:%p from purgelist, "
"master = %d\n", res->lockname.len, res->lockname.name,
res, master);
list_del_init(&res->purge);
- spin_unlock(&res->spinlock);
dlm_lockres_put(res);
dlm->purge_count--;
- } else
- spin_unlock(&res->spinlock);
+ }
+
+ if (!__dlm_lockres_unused(res)) {
+ mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
__dlm_unhash_lockres(res);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
if (!master) {
- spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_DROPPING_REF;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
- }
- return 0;
+ } else
+ spin_unlock(&res->spinlock);
}
static void dlm_run_purge_list(struct dlm_ctxt *dlm,
@@ -252,17 +244,7 @@ static void dlm_run_purge_list(struct dl
lockres = list_entry(dlm->purge_list.next,
struct dlm_lock_resource, purge);
- /* Status of the lockres *might* change so double
- * check. If the lockres is unused, holding the dlm
- * spinlock will prevent people from getting and more
- * refs on it -- there's no need to keep the lockres
- * spinlock. */
spin_lock(&lockres->spinlock);
- unused = __dlm_lockres_unused(lockres);
- spin_unlock(&lockres->spinlock);
-
- if (!unused)
- continue;
purge_jiffies = lockres->last_used +
msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
@@ -274,15 +256,29 @@ static void dlm_run_purge_list(struct dl
* in tail order, we can stop at the first
* unpurgable resource -- anyone added after
* him will have a greater last_used value */
+ spin_unlock(&lockres->spinlock);
break;
}
+ /* Status of the lockres *might* change so double
+ * check. If the lockres is unused, holding the dlm
+ * spinlock will prevent people from getting and more
+ * refs on it. */
+ unused = __dlm_lockres_unused(lockres);
+ if (!unused ||
+ (lockres->state & DLM_LOCK_RES_MIGRATING)) {
+ mlog(0, "lockres %s:%.*s: is in use or "
+ "being remastered, used %d, state %d\n",
+ dlm->name, lockres->lockname.len,
+ lockres->lockname.name, !unused, lockres->state);
+ list_move_tail(&dlm->purge_list, &lockres->purge);
+ spin_unlock(&lockres->spinlock);
+ continue;
+ }
+
dlm_lockres_get(lockres);
- /* This may drop and reacquire the dlm spinlock if it
- * has to do migration. */
- if (dlm_purge_lockres(dlm, lockres))
- BUG();
+ dlm_purge_lockres(dlm, lockres);
dlm_lockres_put(lockres);
file:4c827d86547440bcbff79b54780cebc1cb1484a2 -> file:3fcb47959d886bf6acac096cc653194f16aba243
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -485,7 +485,11 @@ static int ocfs2_read_locked_inode(struc
OCFS2_BH_IGNORE_CACHE);
} else {
status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
- if (!status)
+ /*
+ * If buffer is in jbd, then its checksum may not have been
+ * computed as yet.
+ */
+ if (!status && !buffer_jbd(bh))
status = ocfs2_validate_inode_block(osb->sb, bh);
}
if (status < 0) {
file:544ac6245175f26c23249cf916c5001ae8f5d067 -> file:b5cb3ede9408944d99abc7c7c3b28c01b1ddadeb
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -133,7 +133,7 @@ int ocfs2_lock(struct file *file, int cm
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
- if (__mandatory_lock(inode))
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
file:03a1ab83a65b08544c0b9c7493431b0a122ab35a -> file:10e9527dda1f98cf0b0d5e0fa2c4610610990973
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -969,6 +969,103 @@ out:
}
/*
+ * Find the end range for a leaf refcount block indicated by
+ * el->l_recs[index].e_blkno.
+ */
+static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_extent_block *eb,
+ struct ocfs2_extent_list *el,
+ int index, u32 *cpos_end)
+{
+ int ret, i, subtree_root;
+ u32 cpos;
+ u64 blkno;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct ocfs2_path *left_path = NULL, *right_path = NULL;
+ struct ocfs2_extent_tree et;
+ struct ocfs2_extent_list *tmp_el;
+
+ if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
+ /*
+ * We have a extent rec after index, so just use the e_cpos
+ * of the next extent rec.
+ */
+ *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
+ return 0;
+ }
+
+ if (!eb || (eb && !eb->h_next_leaf_blk)) {
+ /*
+ * We are the last extent rec, so any high cpos should
+ * be stored in this leaf refcount block.
+ */
+ *cpos_end = UINT_MAX;
+ return 0;
+ }
+
+ /*
+ * If the extent block isn't the last one, we have to find
+ * the subtree root between this extent block and the next
+ * leaf extent block and get the corresponding e_cpos from
+ * the subroot. Otherwise we may corrupt the b-tree.
+ */
+ ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
+
+ left_path = ocfs2_new_path_from_et(&et);
+ if (!left_path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
+ ret = ocfs2_find_path(ci, left_path, cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ right_path = ocfs2_new_path_from_path(left_path);
+ if (!right_path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_path(ci, right_path, cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ subtree_root = ocfs2_find_subtree_root(&et, left_path,
+ right_path);
+
+ tmp_el = left_path->p_node[subtree_root].el;
+ blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
+ for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
+ if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
+ *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
+ break;
+ }
+ }
+
+ BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
+
+out:
+ ocfs2_free_path(left_path);
+ ocfs2_free_path(right_path);
+ return ret;
+}
+
+/*
* Given a cpos and len, try to find the refcount record which contains cpos.
* 1. If cpos can be found in one refcount record, return the record.
* 2. If cpos can't be found, return a fake record which start from cpos
@@ -983,10 +1080,10 @@ static int ocfs2_get_refcount_rec(struct
struct buffer_head **ret_bh)
{
int ret = 0, i, found;
- u32 low_cpos;
+ u32 low_cpos, uninitialized_var(cpos_end);
struct ocfs2_extent_list *el;
- struct ocfs2_extent_rec *tmp, *rec = NULL;
- struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_rec *rec = NULL;
+ struct ocfs2_extent_block *eb = NULL;
struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *rb =
@@ -1034,12 +1131,16 @@ static int ocfs2_get_refcount_rec(struct
}
}
- /* adjust len when we have ocfs2_extent_rec after it. */
- if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
- tmp = &el->l_recs[i+1];
+ if (found) {
+ ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
+ eb, el, i, &cpos_end);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- if (le32_to_cpu(tmp->e_cpos) < cpos + len)
- len = le32_to_cpu(tmp->e_cpos) - cpos;
+ if (cpos_end < low_cpos + len)
+ len = cpos_end - low_cpos;
}
ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
@@ -2353,16 +2454,26 @@ static int ocfs2_calc_refcount_meta_cred
len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
/*
- * If the refcount rec already exist, cool. We just need
- * to check whether there is a split. Otherwise we just need
- * to increase the refcount.
- * If we will insert one, increases recs_add.
- *
* We record all the records which will be inserted to the
* same refcount block, so that we can tell exactly whether
* we need a new refcount block or not.
+ *
+ * If we will insert a new one, this is easy and only happens
+ * during adding refcounted flag to the extent, so we don't
+ * have a chance of spliting. We just need one record.
+ *
+ * If the refcount rec already exists, that would be a little
+ * complicated. we may have to:
+ * 1) split at the beginning if the start pos isn't aligned.
+ * we need 1 more record in this case.
+ * 2) split int the end if the end pos isn't aligned.
+ * we need 1 more record in this case.
+ * 3) split in the middle because of file system fragmentation.
+ * we need 2 more records in this case(we can't detect this
+ * beforehand, so always think of the worst case).
*/
if (rec.r_refcount) {
+ recs_add += 2;
/* Check whether we need a split at the beginning. */
if (cpos == start_cpos &&
cpos != le64_to_cpu(rec.r_cpos))
file:14f47d2bfe02eb6500666e8a63a227a2d4ee10f2 -> file:9f55be445bd647355e930b5636de531032dacd2f
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -701,6 +701,10 @@ unlock_osb:
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
+
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
+ MS_POSIXACL : 0);
}
out:
unlock_kernel();
file:e3421030a69f713971a78e7d5d525098a4e44416 -> file:91e656fd528b82e74bda7520ee8c113da52dda70
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(stru
}
/* Fast symlinks can't be large */
- len = strlen(target);
+ len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
link = kzalloc(len + 1, GFP_NOFS);
if (!link) {
status = -ENOMEM;
file:fc71aab084603749abc8138ee860c2f17faa05fa -> file:bae725b7febd3019772b21e97e16832c06875d80
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -74,6 +74,7 @@ ibm_partition(struct parsed_partitions *
} *label;
unsigned char *data;
Sector sect;
+ sector_t labelsect;
res = 0;
blocksize = bdev_logical_block_size(bdev);
@@ -98,9 +99,19 @@ ibm_partition(struct parsed_partitions *
goto out_freeall;
/*
+ * Special case for FBA disks: label sector does not depend on
+ * blocksize.
+ */
+ if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+ (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+ labelsect = info->label_block;
+ else
+ labelsect = info->label_block * (blocksize >> 9);
+
+ /*
* Get volume label, extract name and type.
*/
- data = read_dev_sector(bdev, info->label_block*(blocksize/512), &sect);
+ data = read_dev_sector(bdev, labelsect, &sect);
if (data == NULL)
goto out_readerr;
file:ae17d026aaa3f496fc0bf9d8522f52d027d8dadb -> file:d0cc080c732b7abba788ac24913c8175449876db
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -363,7 +363,7 @@ pipe_read(struct kiocb *iocb, const stru
error = ops->confirm(pipe, buf);
if (error) {
if (!ret)
- error = ret;
+ ret = error;
break;
}
file:77385c8449844c398b9e2f134780f5b32891d0a0 -> file:25fbc2019ec52e7a1177b158d6ddcae6adcaf4ac
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -405,7 +405,6 @@ static int do_task_stat(struct seq_file
/* add up live thread stats at the group level */
if (whole) {
- struct task_cputime cputime;
struct task_struct *t = task;
do {
min_flt += t->min_flt;
@@ -416,9 +415,7 @@ static int do_task_stat(struct seq_file
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
- thread_group_cputime(task, &cputime);
- utime = cputime.utime;
- stime = cputime.stime;
+ thread_group_times(task, &utime, &stime);
gtime = cputime_add(gtime, sig->gtime);
}
file:366b1017a4f13565287886cac1ed4077928319a5 -> file:e10ef045284782ba35fc000c4c5c63b4f149026b
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -206,6 +206,7 @@ static void show_map_vma(struct seq_file
int flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
+ unsigned long start;
dev_t dev = 0;
int len;
@@ -216,8 +217,14 @@ static void show_map_vma(struct seq_file
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
+ /* We don't show the stack guard page in /proc/maps */
+ start = vma->vm_start;
+ if (vma->vm_flags & VM_GROWSDOWN)
+ if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+ start += PAGE_SIZE;
+
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
- vma->vm_start,
+ start,
vma->vm_end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
file:90622200b39c0622e0f159d423c929a036d76257 -> file:b5fe0aa033e7148a877d1bb7bf170f068c6f6f40
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2184,6 +2184,15 @@ static int journal_read_transaction(stru
brelse(d_bh);
return 1;
}
+
+ if (bdev_read_only(sb->s_bdev)) {
+ reiserfs_warning(sb, "clm-2076",
+ "device is readonly, unable to replay log");
+ brelse(c_bh);
+ brelse(d_bh);
+ return -EROFS;
+ }
+
trans_id = get_desc_trans_id(desc);
/* now we know we've got a good transaction, and it was inside the valid time ranges */
log_blocks = kmalloc(get_desc_trans_len(desc) *
@@ -2422,12 +2431,6 @@ static int journal_read(struct super_blo
goto start_log_replay;
}
- if (continue_replay && bdev_read_only(sb->s_bdev)) {
- reiserfs_warning(sb, "clm-2076",
- "device is readonly, unable to replay log");
- return -1;
- }
-
/* ok, there are transactions that need to be replayed. start with the first log block, find
** all the valid transactions, and pick out the oldest.
*/
file:a92c8792c0f6d82d95fe00b8b3acbdc54802fd80 -> file:b37b13b93d89f719b43e26e9c9eb4b1f092b2e1a
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -75,7 +75,7 @@ int reiserfs_security_init(struct inode
return error;
}
- if (sec->length) {
+ if (sec->length && reiserfs_xattrs_initialized(inode->i_sb)) {
blocks = reiserfs_xattr_jcreate_nblocks(inode) +
reiserfs_xattr_nblocks(inode, sec->length);
/* We don't want to count the directories twice if we have
file:b07565c9438672015aa041e3fb57e4b01ca6e27c -> file:d98bea8865c15b3c122c3e99d740c241330034a8
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -87,6 +87,7 @@ static int signalfd_copyinfo(struct sign
err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
+ err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
case __SI_POLL:
err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
@@ -110,6 +111,7 @@ static int signalfd_copyinfo(struct sign
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
+ err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
default:
/*
file:7394e9e17534ecb03573e28b2288d06cccf24a32 -> file:e5efbb96d2bdad48ebb4266198fd3b9f5fa7f629
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -365,17 +365,7 @@ __generic_file_splice_read(struct file *
* If the page isn't uptodate, we may need to start io on it
*/
if (!PageUptodate(page)) {
- /*
- * If in nonblock mode then dont block on waiting
- * for an in-flight io page
- */
- if (flags & SPLICE_F_NONBLOCK) {
- if (!trylock_page(page)) {
- error = -EAGAIN;
- break;
- }
- } else
- lock_page(page);
+ lock_page(page);
/*
* Page was truncated, or invalidated by the
file:f5ea4680f15fdca8a009f66a979bd6d3805244f6 -> file:7118a383a87a2623fe0250958c133d2ad385b801
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode
char *p;
p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
- if (p)
+ if (!IS_ERR(p))
memmove(last_sysfs_file, p, strlen(p) + 1);
/* need attr_sd for attr and ops, its parent for kobj */
file:5bb523d7f37e9263399444379f9ac16451b4dd62 -> file:98fe0db674bcad47a7d40ab673a870f7a7ea9aba
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -789,6 +789,8 @@ xfs_ioc_fsgetxattr(
{
struct fsxattr fa;
+ memset(&fa, 0, sizeof(struct fsxattr));
+
xfs_ilock(ip, XFS_ILOCK_SHARED);
fa.fsx_xflags = xfs_ip2xflags(ip);
fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
file:73c22278fdaf82955c66f02cf2b4b626f0713dcd -> file:e4ccd9f24829aeef169c845c78cae2ce11b6f899
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -62,7 +62,9 @@ xfs_swapext(
goto out;
}
- if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) {
+ if (!(file->f_mode & FMODE_WRITE) ||
+ !(file->f_mode & FMODE_READ) ||
+ (file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
goto out_put_file;
}
@@ -74,6 +76,7 @@ xfs_swapext(
}
if (!(target_file->f_mode & FMODE_WRITE) ||
+ !(target_file->f_mode & FMODE_READ) ||
(target_file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
goto out_put_target_file;
file:0cbdccc4003d8dfc3a151841f3f63b049319d538 -> file:3d016e99ee5d0d027f460c61b9671e6649a364b7
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -85,7 +85,6 @@
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
- {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
@@ -103,6 +102,7 @@
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
file:912b8ff3f27290d333fa8f7d4f11626817a42ea2 -> file:05f6018b928eb1860761e047bdecdebfecb1bc51
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -932,7 +932,7 @@ extern void blk_queue_max_segment_size(s
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
@@ -1083,7 +1083,7 @@ static inline unsigned int queue_physica
return q->limits.physical_block_size;
}
-static inline int bdev_physical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
{
return queue_physical_block_size(bdev_get_queue(bdev));
}
file:64b1a4cc5a8b9757c78c56905176da9b9e1d4545 -> file:f73bc1b68c107d9d68531de46ad916d18de070c3
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -282,10 +282,12 @@ extern struct clocksource * __init __wea
extern void clocksource_mark_unstable(struct clocksource *cs);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
-extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
+extern void
+update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
-static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
+static inline void
+update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
{
}
file:af931ee43dd8e43454b9cd9d389ec0bd5b1a7b21 -> file:cab23f2da4dc35111c2d90c71a5e8630266005a2
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -309,5 +309,7 @@ asmlinkage long compat_sys_newfstatat(un
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
int flags, int mode);
+extern void __user *compat_alloc_user_space(unsigned long len);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
file:a5740fc4d04b9415478f4180dcbdad289f5eb281 -> file:a73454aec33312359c4233fa4ec7e0598dbbe345
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How man
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p,
- struct cpumask *mask);
+extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -69,9 +68,6 @@ struct seq_file;
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
-extern void cpuset_lock(void);
-extern void cpuset_unlock(void);
-
extern int cpuset_mem_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(s
{
cpumask_copy(mask, cpu_possible_mask);
}
-static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
- struct cpumask *mask)
+
+static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+ return cpumask_any(cpu_active_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_al
{
}
-static inline void cpuset_lock(void) {}
-static inline void cpuset_unlock(void) {}
-
static inline int cpuset_mem_spread_node(void)
{
return 0;
file:b6a562291a6d00657d572b690a18e71c1236f50b -> file:1ff096263b59f7f76bb9c6452f4c50209e9e3462
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -145,11 +145,11 @@ struct inodes_stat_t {
*
*/
#define RW_MASK 1
-#define RWA_MASK 2
+#define RWA_MASK 16
#define READ 0
#define WRITE 1
-#define READA 2 /* read-ahead - don't block if no resources */
-#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
+#define READA 16 /* readahead - don't block if no resources */
+#define SWRITE 17 /* for ll_rw_block(), wait for buffer lock */
#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
#define READ_META (READ | (1 << BIO_RW_META))
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
file:7ca72b74eec7e4142866f44debf96f594c70037f -> file:c49d6f542104a338a68d27fa8b3c2f50c58ea65e
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -52,16 +52,21 @@
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
+ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
+ *
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
-#define IRQF_TIMER 0x00000200
+#define __IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
+#define IRQF_NO_SUSPEND 0x00004000
+
+#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
/*
* Bits used by threaded handlers:
file:b0f6d97a06c2cff52f40eb1b1b0c846895b29038 -> file:a0699160d19b0e0d167e2ff8635571b9f3974b22
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -339,6 +339,7 @@ enum {
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
ATA_EHI_QUIET = (1 << 3), /* be quiet */
+ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
file:397a3774e6e964a06cb4dbd99738684034e22a8b -> file:1bb43c1db2187b6457b49df60d0e47d954088795
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -77,7 +77,11 @@ extern unsigned int kobjsize(const void
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
#define VM_GROWSUP 0x00000200
+#else
+#define VM_GROWSUP 0x00000000
+#endif
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
@@ -838,6 +842,12 @@ int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
@@ -1196,8 +1206,10 @@ unsigned long ra_submit(struct file_ra_s
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-#ifdef CONFIG_IA64
+#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#else
+ #define expand_upwards(vma, address) do { } while (0)
#endif
extern int expand_stack_downwards(struct vm_area_struct *vma,
unsigned long address);
file:84a524afb3dcdffdd60c7ef1eaf2672acd7731ca -> file:9d12ed56bfbc6116db691245a311ae313bce2e2e
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -138,7 +138,7 @@ struct vm_area_struct {
within vm_mm. */
/* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next;
+ struct vm_area_struct *vm_next, *vm_prev;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
unsigned long vm_flags; /* Flags, see mm.h. */
file:6f7561730d88c3b8c816e34e76b140cb9612ce8c -> file:6c31a2a7c18d72194f7d197cb20263feb3f55936
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -290,6 +290,13 @@ struct zone {
unsigned long watermark[NR_WMARK];
/*
+ * When free pages are below this point, additional steps are taken
+ * when reading the number of free pages to avoid per-cpu counter
+ * drift allowing watermarks to be breached
+ */
+ unsigned long percpu_drift_mark;
+
+ /*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -460,6 +467,12 @@ static inline int zone_is_oom_locked(con
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
file:6991ab5b24d1bbfd736df4d27315a83ee7271779 -> file:91b05c171854f12488e92c8fe2d23812bb1f03e5
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,8 +14,10 @@ struct irq_desc;
extern void mask_msi_irq(unsigned int irq);
extern void unmask_msi_irq(unsigned int irq);
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
+extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc {
file:812a5f3c2abe90da914e7c998b7b7d8e509b0e38 -> file:ec12f8c247705225e3143397d37d4f0917ad8269
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1560,6 +1560,8 @@ extern void netif_carrier_on(struct net_
extern void netif_carrier_off(struct net_device *dev);
+extern void netif_notify_peers(struct net_device *dev);
+
/**
* netif_dormant_on - mark device as dormant.
* @dev: network device
file:44428d247dbe6e3b52e9b71798eff99735ae4319 -> file:5ecdb50bb3766b8cbd0c67f9eeee58e61245cd01
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -201,6 +201,7 @@ static inline int notifier_to_errno(int
#define NETDEV_PRE_UP 0x000D
#define NETDEV_BONDING_OLDTYPE 0x000E
#define NETDEV_BONDING_NEWTYPE 0x000F
+#define NETDEV_NOTIFY_PEERS 0x0013
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
file:67325bfc803eeea2f55f4c83527c904960ed1bd3 -> file:fe2f4ee6ce411d56c0e3456468896e7c166ba280
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -393,6 +393,9 @@
#define PCI_DEVICE_ID_VLSI_82C147 0x0105
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
+
#define PCI_VENDOR_ID_ADL 0x1005
#define PCI_DEVICE_ID_ADL_2301 0x2301
@@ -1262,6 +1265,7 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_TT128 0x9128
@@ -2030,6 +2034,7 @@
#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
+#define PCI_VENDOR_ID_BCM_GVC 0x14a4
#define PCI_VENDOR_ID_BROADCOM 0x14e4
#define PCI_DEVICE_ID_TIGON3_5752 0x1600
#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
file:99928dce37ea927bde2f515df93ad9a2bab2a239 -> file:7fa02b4af838513b9a609122db0772654cdc2f91
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -70,6 +70,11 @@ int reiserfs_security_write(struct reise
void reiserfs_security_free(struct reiserfs_security_handle *sec);
#endif
+static inline int reiserfs_xattrs_initialized(struct super_block *sb)
+{
+ return REISERFS_SB(sb)->priv_root != NULL;
+}
+
#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
{
file:ce27be0f657deac029c890a3b4c6c9e673bebf93 -> file:f145ec3eedc2f0fc846b06cbe63f726fb46865c0
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -145,7 +145,6 @@ extern unsigned long this_cpu_load(void)
extern void calc_global_load(void);
-extern u64 cpu_nr_migrations(int cpu);
extern unsigned long get_parent_ip(unsigned long addr);
@@ -628,6 +627,9 @@ struct signal_struct {
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ cputime_t prev_utime, prev_stime;
+#endif
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
@@ -998,6 +1000,7 @@ struct sched_domain {
char *name;
#endif
+ unsigned int span_weight;
/*
* Span of all CPUs in this domain.
*
@@ -1069,7 +1072,8 @@ struct sched_domain;
struct sched_class {
const struct sched_class *next;
- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
+ bool head);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
@@ -1079,7 +1083,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
+ int (*select_task_rq)(struct rq *rq, struct task_struct *p,
+ int sd_flag, int flags);
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
@@ -1091,7 +1096,8 @@ struct sched_class {
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
- void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
+ void (*task_waking) (struct rq *this_rq, struct task_struct *task);
+ void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
@@ -1102,7 +1108,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
- void (*task_new) (struct rq *rq, struct task_struct *p);
+ void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1111,10 +1117,11 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);
- unsigned int (*get_rr_interval) (struct task_struct *task);
+ unsigned int (*get_rr_interval) (struct rq *rq,
+ struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p);
+ void (*moved_group) (struct task_struct *p, int on_rq);
#endif
};
@@ -1175,7 +1182,6 @@ struct sched_entity {
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
- u64 nr_forced2_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
@@ -1726,6 +1732,7 @@ static inline void put_task_struct(struc
extern cputime_t task_utime(struct task_struct *p);
extern cputime_t task_stime(struct task_struct *p);
extern cputime_t task_gtime(struct task_struct *p);
+extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern int task_free_register(struct notifier_block *n);
extern int task_free_unregister(struct notifier_block *n);
@@ -1886,6 +1893,7 @@ extern void sched_clock_idle_sleep_event
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_HOTPLUG_CPU
+extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
file:a28c37d9dd9ec9185f65348da841ccf87a747f04 -> file:3cbf483919bc95475a8d3b4a63ce16e6349798b3
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -301,8 +301,8 @@ struct ssb_bus {
/* ID information about the Chip. */
u16 chip_id;
u16 chip_rev;
- u16 sprom_size; /* number of words in sprom */
u16 sprom_offset;
+ u16 sprom_size; /* number of words in sprom */
u8 chip_package;
/* List of devices (cores) on the backplane. */
@@ -391,6 +391,9 @@ extern int ssb_bus_sdiobus_register(stru
extern void ssb_bus_unregister(struct ssb_bus *bus);
+/* Does the device have an SPROM? */
+extern bool ssb_is_sprom_available(struct ssb_bus *bus);
+
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
file:7600f38c5aa96b9d70bed32150bbc5c776ad8613 -> file:2cdf249b4e5f0627980e4140bf96170cb8546ad0
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -46,7 +46,6 @@
#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */
#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */
#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */
-#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
#define SSB_CHIPCO_CAP_OTPS_SHIFT 19
#define SSB_CHIPCO_CAP_OTPS_BASE 5
#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */
@@ -54,6 +53,7 @@
#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */
#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
+#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
#define SSB_CHIPCO_CORECTL 0x0008
#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
@@ -386,6 +386,7 @@
/** Chip specific Chip-Status register contents. */
+#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */
#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
@@ -399,6 +400,18 @@
#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
+/** Macros to determine SPROM presence based on Chip-Status register. */
+#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \
+ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_OTP_SEL)
+#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \
+ (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS)
+#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \
+ (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \
+ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_OTP_SEL))
+
/** Clockcontrol masks and values **/
file:0482229c07db348636a9089722b69168a8b4dcc9 -> file:8dc082194b226a3485535fa09646e6f31ccc4558
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -98,6 +98,9 @@ extern int tick_check_oneshot_change(int
extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_check_idle(int cpu);
extern int tick_oneshot_mode_active(void);
+# ifndef arch_needs_cpu
+# define arch_needs_cpu(cpu) (0)
+# endif
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
file:57e63579bfdd7f193511675fc9ed26141b586638 -> file:5b81156780b17f8bf52608ca7b8230dd638a530a
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 1*SD_SHARE_CPUPOWER \
| 0*SD_POWERSAVINGS_BALANCE \
- | 0*SD_SHARE_PKG_RESOURCES \
+ | 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
, \
file:2526f3bbd273e522e1ff77b2ba404c87d9a5a56a -> file:dd0bde120045aab26a2ce403af9d39d8b46b4171
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -19,4 +19,8 @@
/* device can't handle its Configuration or Interface strings */
#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
+/* device needs a pause during initialization, after we read the device
+ descriptor */
+#define USB_QUIRK_DELAY_INIT 0x00000040
+
#endif /* __LINUX_USB_QUIRKS_H */
file:2d0f222388a8e9f2dfb3f8bec716eff2999d78f2 -> file:13070d659129d44cee316c25f451258973ae2d2f
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -166,6 +166,28 @@ static inline unsigned long zone_page_st
return x;
}
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ x += zone_pcp(zone, cpu)->vm_stat_diff[item];
+
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);
file:b9648890b3eb76e461eb939be048571d38b797b3 -> file:c3eebd06bd87d15ced4521307bba2254633bd695
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -259,11 +259,21 @@ static inline int between(__u32 seq1, __
return seq3 - seq2 >= seq1 - seq2;
}
-static inline int tcp_too_many_orphans(struct sock *sk, int num)
+static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
- return (num > sysctl_tcp_max_orphans) ||
- (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
+ struct percpu_counter *ocp = sk->sk_prot->orphan_count;
+ int orphans = percpu_counter_read_positive(ocp);
+
+ if (orphans << shift > sysctl_tcp_max_orphans) {
+ orphans = percpu_counter_sum_positive(ocp);
+ if (orphans << shift > sysctl_tcp_max_orphans)
+ return true;
+ }
+
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+ return true;
+ return false;
}
/* syncookies: remember time of last synqueue overflow */
@@ -501,8 +511,22 @@ extern unsigned int tcp_current_mss(stru
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
{
- if (tp->max_window && pktsize > (tp->max_window >> 1))
- return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
+ int cutoff;
+
+ /* When peer uses tiny windows, there is no use in packetizing
+ * to sub-MSS pieces for the sake of SWS or making sure there
+ * are enough packets in the pipe for fast recovery.
+ *
+ * On the other hand, for extremely large MSS devices, handling
+ * smaller than MSS windows in this way does make sense.
+ */
+ if (tp->max_window >= 512)
+ cutoff = (tp->max_window >> 1);
+ else
+ cutoff = tp->max_window;
+
+ if (cutoff && pktsize > cutoff)
+ return max_t(int, cutoff, 68U - tp->tcp_header_len);
else
return pktsize;
}
file:2cda0401156874213972853e7c2a1cbb19f00a06 -> file:21926a0458fc91e4908f42c64022d1bfbd3cac11
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -182,6 +182,10 @@ extern int sysctl_x25_clear_request_tim
extern int sysctl_x25_ack_holdback_timeout;
extern int sysctl_x25_forward;
+extern int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr);
+
extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
struct x25_address *);
extern int x25_addr_aton(unsigned char *, struct x25_address *,
file:6a664c3f7c1e426dd0b74129ce9cf81cb07fefa0 -> file:7dc97d12253c1bdc494b939e5e856b1b1fa6fc2c
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
unsigned int card_type; /* EMU10K1_CARD_* */
unsigned int ecard_ctrl; /* ecard control bits */
unsigned long dma_mask; /* PCI DMA mask */
+ unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
struct snd_dma_buffer silent_page; /* silent page */
struct snd_dma_buffer ptb_pages; /* page table pages */
file:ab76fb0ef8443e373c845b4a82510f09d64a1e79 -> file:5e3e3a15661bc5c97674081974d8ee51ef135445
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -242,6 +242,8 @@ long compat_sys_semctl(int first, int se
struct semid64_ds __user *up64;
int version = compat_ipc_parse_version(&third);
+ memset(&s64, 0, sizeof(s64));
+
if (!uptr)
return -EINVAL;
if (get_user(pad, (u32 __user *) uptr))
@@ -422,6 +424,8 @@ long compat_sys_msgctl(int first, int se
int version = compat_ipc_parse_version(&second);
void __user *p;
+ memset(&m64, 0, sizeof(m64));
+
switch (second & (~IPC_64)) {
case IPC_INFO:
case IPC_RMID:
@@ -595,6 +599,8 @@ long compat_sys_shmctl(int first, int se
int err, err2;
int version = compat_ipc_parse_version(&second);
+ memset(&s64, 0, sizeof(s64));
+
switch (second & (~IPC_64)) {
case IPC_RMID:
case SHM_LOCK:
file:d8d1e9ff4e8869ba1c9ebe6300f37dec6c62e89b -> file:380ea4fe08e7151c71c64a39eac8a9e92a2ea7ee
--- a/ipc/compat_mq.c
+++ b/ipc/compat_mq.c
@@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const
void __user *p = NULL;
if (u_attr && oflag & O_CREAT) {
struct mq_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+
p = compat_alloc_user_space(sizeof(attr));
if (get_compat_mq_attr(&attr, u_attr) ||
copy_to_user(p, &attr, sizeof(attr)))
@@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr
struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
long ret;
+ memset(&mqstat, 0, sizeof(mqstat));
+
if (u_mqstat) {
if (get_compat_mq_attr(&mqstat, u_mqstat) ||
copy_to_user(p, &mqstat, sizeof(mqstat)))
file:2f2a47959576964c200e7493464818da982c3e7a -> file:b781007eea46aa02355303c48c52c071a9931b74
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -560,6 +560,8 @@ static unsigned long copy_semid_to_user(
{
struct semid_ds out;
+ memset(&out, 0, sizeof(out));
+
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
out.sem_otime = in->sem_otime;
file:e9b039f74129e3e5e47d34c1e91a128b29fa45df -> file:d30732c97599d6e0a516ccfc305cef1d76bc2304
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -474,6 +474,7 @@ static inline unsigned long copy_shmid_t
{
struct shmid_ds out;
+ memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
out.shm_segsz = in->shm_segsz;
out.shm_atime = in->shm_atime;
file:180d1885f782e5d9e4e5872d166bf8df5738eae8 -> file:8bc557869d90a9b5ebc47f6de0b5abca05aaa203
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -25,6 +25,7 @@
#include <linux/posix-timers.h>
#include <linux/times.h>
#include <linux/ptrace.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
@@ -1136,3 +1137,24 @@ compat_sys_sysinfo(struct compat_sysinfo
return 0;
}
+
+/*
+ * Allocate user-space memory for the duration of a single system call,
+ * in order to marshall parameters inside a compat thunk.
+ */
+void __user *compat_alloc_user_space(unsigned long len)
+{
+ void __user *ptr;
+
+ /* If len would occupy more than half of the entire compat space... */
+ if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
+ return NULL;
+
+ ptr = arch_compat_alloc_user_space(len);
+
+ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
+ return NULL;
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(compat_alloc_user_space);
file:291ac586f37ff93e047391be46b3db90d5f80445 -> file:7e8b6acd2e857f722048bb646334390d7892afc0
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -151,7 +151,7 @@ static inline void check_for_tasks(int c
write_lock_irq(&tasklist_lock);
for_each_process(p) {
- if (task_cpu(p) == cpu &&
+ if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
(!cputime_eq(p->utime, cputime_zero) ||
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
@@ -163,6 +163,7 @@ static inline void check_for_tasks(int c
}
struct take_cpu_down_param {
+ struct task_struct *caller;
unsigned long mod;
void *hcpu;
};
@@ -171,6 +172,7 @@ struct take_cpu_down_param {
static int __ref take_cpu_down(void *_param)
{
struct take_cpu_down_param *param = _param;
+ unsigned int cpu = (unsigned long)param->hcpu;
int err;
/* Ensure this CPU doesn't handle any more interrupts. */
@@ -181,6 +183,8 @@ static int __ref take_cpu_down(void *_pa
raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
param->hcpu);
+ if (task_cpu(param->caller) == cpu)
+ move_task_off_dead_cpu(cpu, param->caller);
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly. */
sched_idle_next();
@@ -191,10 +195,10 @@ static int __ref take_cpu_down(void *_pa
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
- cpumask_var_t old_allowed;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
+ .caller = current,
.mod = mod,
.hcpu = hcpu,
};
@@ -205,10 +209,8 @@ static int __ref _cpu_down(unsigned int
if (!cpu_online(cpu))
return -EINVAL;
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
- return -ENOMEM;
-
cpu_hotplug_begin();
+ set_cpu_active(cpu, false);
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
@@ -223,10 +225,6 @@ static int __ref _cpu_down(unsigned int
goto out_release;
}
- /* Ensure that we are not runnable on dying cpu */
- cpumask_copy(old_allowed, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpu_active_mask);
-
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
set_cpu_active(cpu, true);
@@ -235,7 +233,7 @@ static int __ref _cpu_down(unsigned int
hcpu) == NOTIFY_BAD)
BUG();
- goto out_allowed;
+ goto out_release;
}
BUG_ON(cpu_online(cpu));
@@ -253,8 +251,6 @@ static int __ref _cpu_down(unsigned int
check_for_tasks(cpu);
-out_allowed:
- set_cpus_allowed_ptr(current, old_allowed);
out_release:
cpu_hotplug_done();
if (!err) {
@@ -262,7 +258,6 @@ out_release:
hcpu) == NOTIFY_BAD)
BUG();
}
- free_cpumask_var(old_allowed);
return err;
}
@@ -280,18 +275,6 @@ int __ref cpu_down(unsigned int cpu)
goto out;
}
- set_cpu_active(cpu, false);
-
- /*
- * Make sure the all cpus did the reschedule and are not
- * using stale version of the cpu_active_mask.
- * This is not strictly necessary becuase stop_machine()
- * that we run down the line already provides the required
- * synchronization. But it's really a side effect and we do not
- * want to depend on the innards of the stop_machine here.
- */
- synchronize_sched();
-
err = _cpu_down(cpu, 0);
out:
@@ -382,19 +365,12 @@ int disable_nonboot_cpus(void)
return error;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
- /* We take down all of the non-boot CPUs in one shot to avoid races
+ /*
+ * We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
- for_each_online_cpu(cpu) {
- if (cpu == first_cpu)
- continue;
- set_cpu_active(cpu, false);
- }
-
- synchronize_sched();
-
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
file:5d03d305a2b1bbe98d0b3904109f1ae076282387 -> file:0ee292b51dd64d0080a2f9affe1cf57c923e0662
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2148,19 +2148,52 @@ void __init cpuset_init_smp(void)
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
mutex_lock(&callback_mutex);
- cpuset_cpus_allowed_locked(tsk, pmask);
+ task_lock(tsk);
+ guarantee_online_cpus(task_cs(tsk), pmask);
+ task_unlock(tsk);
mutex_unlock(&callback_mutex);
}
-/**
- * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
- * Must be called with callback_mutex held.
- **/
-void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
+int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
- task_lock(tsk);
- guarantee_online_cpus(task_cs(tsk), pmask);
- task_unlock(tsk);
+ const struct cpuset *cs;
+ int cpu;
+
+ rcu_read_lock();
+ cs = task_cs(tsk);
+ if (cs)
+ cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
+ rcu_read_unlock();
+
+ /*
+ * We own tsk->cpus_allowed, nobody can change it under us.
+ *
+ * But we used cs && cs->cpus_allowed lockless and thus can
+ * race with cgroup_attach_task() or update_cpumask() and get
+ * the wrong tsk->cpus_allowed. However, both cases imply the
+ * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
+ * which takes task_rq_lock().
+ *
+ * If we are called after it dropped the lock we must see all
+ * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
+ * set any mask even if it is not right from task_cs() pov,
+ * the pending set_cpus_allowed_ptr() will fix things.
+ */
+
+ cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
+ if (cpu >= nr_cpu_ids) {
+ /*
+ * Either tsk->cpus_allowed is wrong (see above) or it
+ * is actually empty. The latter case is only possible
+ * if we are racing with remove_tasks_in_empty_cpuset().
+ * Like above we can temporary set any mask and rely on
+ * set_cpus_allowed_ptr() as synchronization point.
+ */
+ cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
+ cpu = cpumask_any(cpu_active_mask);
+ }
+
+ return cpu;
}
void cpuset_init_current_mems_allowed(void)
@@ -2349,22 +2382,6 @@ int __cpuset_node_allowed_hardwall(int n
}
/**
- * cpuset_lock - lock out any changes to cpuset structures
- *
- * The out of memory (oom) code needs to mutex_lock cpusets
- * from being changed while it scans the tasklist looking for a
- * task in an overlapping cpuset. Expose callback_mutex via this
- * cpuset_lock() routine, so the oom code can lock it, before
- * locking the task list. The tasklist_lock is a spinlock, so
- * must be taken inside callback_mutex.
- */
-
-void cpuset_lock(void)
-{
- mutex_lock(&callback_mutex);
-}
-
-/**
* cpuset_unlock - release lock on cpuset changes
*
* Undo the lock taken in a previous cpuset_lock() call.
file:f7864ac2ecc1ad54c0af6b06b6f9d2da4a93f1ac -> file:570255f541e507b416ecb51c89db8b0adf0cffe9
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -110,8 +110,8 @@ static void __exit_signal(struct task_st
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
- sig->utime = cputime_add(sig->utime, task_utime(tsk));
- sig->stime = cputime_add(sig->stime, task_stime(tsk));
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
@@ -899,6 +899,15 @@ NORET_TYPE void do_exit(long code)
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
+ /*
+ * If do_exit is called because this processes oopsed, it's possible
+ * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
+ * continuing. Amongst other possible reasons, this is to prevent
+ * mm_release()->clear_child_tid() from writing to a user-controlled
+ * kernel address.
+ */
+ set_fs(USER_DS);
+
tracehook_report_exit(&code);
validate_creds_for_do_exit(tsk);
@@ -1205,6 +1214,7 @@ static int wait_task_zombie(struct wait_
struct signal_struct *psig;
struct signal_struct *sig;
unsigned long maxrss;
+ cputime_t tgutime, tgstime;
/*
* The resource counters for the group leader are in its
@@ -1220,20 +1230,23 @@ static int wait_task_zombie(struct wait_
* need to protect the access to parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
+ *
+ * We use thread_group_times() to get times for the thread
+ * group, which consolidates times for all threads in the
+ * group including the group leader.
*/
+ thread_group_times(p, &tgutime, &tgstime);
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
psig->cutime =
cputime_add(psig->cutime,
- cputime_add(p->utime,
- cputime_add(sig->utime,
- sig->cutime)));
+ cputime_add(tgutime,
+ sig->cutime));
psig->cstime =
cputime_add(psig->cstime,
- cputime_add(p->stime,
- cputime_add(sig->stime,
- sig->cstime)));
+ cputime_add(tgstime,
+ sig->cstime));
psig->cgtime =
cputime_add(psig->cgtime,
cputime_add(p->gtime,
@@ -1370,8 +1383,7 @@ static int wait_task_stopped(struct wait
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
- /* don't need the RCU readlock here as we're holding a spinlock */
- uid = __task_cred(p)->uid;
+ uid = task_uid(p);
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
@@ -1444,7 +1456,7 @@ static int wait_task_continued(struct wa
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
- uid = __task_cred(p)->uid;
+ uid = task_uid(p);
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
file:ff6d443f99e34e62b88701bdfddf54a9baba55c8 -> file:2ac4cbbc4f502a06e865b8f99462832f097e2a87
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -293,7 +293,7 @@ out:
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
- struct vm_area_struct *mpnt, *tmp, **pprev;
+ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
@@ -321,6 +321,7 @@ static int dup_mmap(struct mm_struct *mm
if (retval)
goto out;
+ prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
@@ -349,7 +350,7 @@ static int dup_mmap(struct mm_struct *mm
vma_set_policy(tmp, pol);
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
- tmp->vm_next = NULL;
+ tmp->vm_next = tmp->vm_prev = NULL;
anon_vma_link(tmp);
file = tmp->vm_file;
if (file) {
@@ -383,6 +384,8 @@ static int dup_mmap(struct mm_struct *mm
*/
*pprev = tmp;
pprev = &tmp->vm_next;
+ tmp->vm_prev = prev;
+ prev = tmp;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
@@ -900,6 +903,9 @@ static int copy_signal(unsigned long clo
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
sig->cgtime = cputime_zero;
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ sig->prev_utime = sig->prev_stime = cputime_zero;
+#endif
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
@@ -1243,21 +1249,6 @@ static struct task_struct *copy_process(
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
- /*
- * The task hasn't been attached yet, so its cpus_allowed mask will
- * not be changed, nor will its assigned CPU.
- *
- * The cpus_allowed mask of the parent may have changed after it was
- * copied first time - so re-copy it here, then check the child's CPU
- * to ensure it is on a valid CPU (and if not, just force it back to
- * parent's CPU). This avoids alot of nasty races.
- */
- p->cpus_allowed = current->cpus_allowed;
- p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
- if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
- !cpu_online(task_cpu(p))))
- set_task_cpu(p, smp_processor_id());
-
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
file:f8498c3b55b5b96333ef35e229aa847ab483371f -> file:06d08e5eb2fc96d5089220abd5021e31817417af
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1395,7 +1395,6 @@ static inline struct futex_hash_bucket *
{
struct futex_hash_bucket *hb;
- get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
@@ -1407,7 +1406,6 @@ static inline void
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
spin_unlock(&hb->lock);
- drop_futex_key_refs(&q->key);
}
/**
@@ -1512,8 +1510,6 @@ static void unqueue_me_pi(struct futex_q
q->pi_state = NULL;
spin_unlock(q->lock_ptr);
-
- drop_futex_key_refs(&q->key);
}
/*
@@ -1844,7 +1840,10 @@ static int futex_wait(u32 __user *uaddr,
}
retry:
- /* Prepare to wait on uaddr. */
+ /*
+ * Prepare to wait on uaddr. On success, holds hb lock and increments
+ * q.key refs.
+ */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out;
@@ -1854,24 +1853,23 @@ retry:
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
+ /* unqueue_me() drops q.key ref */
if (!unqueue_me(&q))
- goto out_put_key;
+ goto out;
ret = -ETIMEDOUT;
if (to && !to->task)
- goto out_put_key;
+ goto out;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
- if (!signal_pending(current)) {
- put_futex_key(fshared, &q.key);
+ if (!signal_pending(current))
goto retry;
- }
ret = -ERESTARTSYS;
if (!abs_time)
- goto out_put_key;
+ goto out;
restart = &current_thread_info()->restart_block;
restart->fn = futex_wait_restart;
@@ -1888,8 +1886,6 @@ retry:
ret = -ERESTART_RESTARTBLOCK;
-out_put_key:
- put_futex_key(fshared, &q.key);
out:
if (to) {
hrtimer_cancel(&to->timer);
@@ -2268,7 +2264,10 @@ static int futex_wait_requeue_pi(u32 __u
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
- /* Prepare to wait on uaddr. */
+ /*
+ * Prepare to wait on uaddr. On success, increments q.key (key1) ref
+ * count.
+ */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out_key2;
@@ -2286,7 +2285,9 @@ static int futex_wait_requeue_pi(u32 __u
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
- * race with the atomic proxy lock acquition by the requeue code.
+ * race with the atomic proxy lock acquisition by the requeue code. The
+ * futex_requeue dropped our key1 reference and incremented our key2
+ * reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
file:ef3c3f88a7a35e36d8f1fb551c56e534cea52687 -> file:f83972b16564d00676154900f09d3c70affd773c
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -33,10 +33,11 @@
* @children: child nodes
* @all: list head for list of all nodes
* @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- * copy of the profiling data here to allow collecting coverage data
- * for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ * files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ * object files. Used only when gcov_persist=1.
* @dentry: main debugfs entry, either a directory or data file
* @links: associated symbolic links
* @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
struct list_head children;
struct list_head all;
struct gcov_node *parent;
- struct gcov_info *info;
- struct gcov_info *ghost;
+ struct gcov_info **loaded_info;
+ struct gcov_info *unloaded_info;
struct dentry *dentry;
struct dentry **links;
+ int num_loaded;
char name[0];
};
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_
};
/*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
*/
static struct gcov_info *get_node_info(struct gcov_node *node)
{
- if (node->info)
- return node->info;
+ if (node->num_loaded > 0)
+ return node->loaded_info[0];
- return node->ghost;
+ return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+ struct gcov_info *info;
+ int i = 0;
+
+ if (node->unloaded_info)
+ info = gcov_info_dup(node->unloaded_info);
+ else
+ info = gcov_info_dup(node->loaded_info[i++]);
+ if (!info)
+ return NULL;
+ for (; i < node->num_loaded; i++)
+ gcov_info_add(info, node->loaded_info[i]);
+
+ return info;
}
/*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *i
mutex_lock(&node_lock);
/*
* Read from a profiling data copy to minimize reference tracking
- * complexity and concurrent access.
+ * complexity and concurrent access and to keep accumulating multiple
+ * profiling data sets associated with one node simple.
*/
- info = gcov_info_dup(get_node_info(node));
+ info = get_accumulated_info(node);
if (!info)
goto out_unlock;
iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_nam
return NULL;
}
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+ int i;
+
+ if (node->unloaded_info)
+ gcov_info_reset(node->unloaded_info);
+ for (i = 0; i < node->num_loaded; i++)
+ gcov_info_reset(node->loaded_info[i]);
+}
+
static void remove_node(struct gcov_node *node);
/*
* write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
*/
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct fil
node = get_node_by_name(info->filename);
if (node) {
/* Reset counts or remove node for unloaded modules. */
- if (node->ghost)
+ if (node->num_loaded == 0)
remove_node(node);
else
- gcov_info_reset(node->info);
+ reset_node(node);
}
/* Reset counts for open file. */
gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->all);
- node->info = info;
+ if (node->loaded_info) {
+ node->loaded_info[0] = info;
+ node->num_loaded = 1;
+ }
node->parent = parent;
if (name)
strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct
struct gcov_node *node;
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
- if (!node) {
- pr_warning("out of memory\n");
- return NULL;
+ if (!node)
+ goto err_nomem;
+ if (info) {
+ node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+ GFP_KERNEL);
+ if (!node->loaded_info)
+ goto err_nomem;
}
init_node(node, info, name, parent);
/* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct
list_add(&node->all, &all_head);
return node;
+
+err_nomem:
+ kfree(node);
+ pr_warning("out of memory\n");
+ return NULL;
}
/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_nod
list_del(&node->all);
debugfs_remove(node->dentry);
remove_links(node);
- if (node->ghost)
- gcov_info_free(node->ghost);
+ kfree(node->loaded_info);
+ if (node->unloaded_info)
+ gcov_info_free(node->unloaded_info);
kfree(node);
}
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_na
/*
* write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
*/
static ssize_t reset_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *
mutex_lock(&node_lock);
restart:
list_for_each_entry(node, &all_head, all) {
- if (node->info)
- gcov_info_reset(node->info);
+ if (node->num_loaded > 0)
+ reset_node(node);
else if (list_empty(&node->children)) {
remove_node(node);
/* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
}
/*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
*/
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
{
- node->ghost = gcov_info_dup(node->info);
- if (!node->ghost) {
- pr_warning("could not save data for '%s' (out of memory)\n",
- node->info->filename);
- return -ENOMEM;
+ struct gcov_info **loaded_info;
+ int num = node->num_loaded;
+
+ /*
+ * Prepare new array. This is done first to simplify cleanup in
+ * case the new data set is incompatible, the node only contains
+ * unloaded data sets and there's not enough memory for the array.
+ */
+ loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+ if (!loaded_info) {
+ pr_warning("could not add '%s' (out of memory)\n",
+ info->filename);
+ return;
+ }
+ memcpy(loaded_info, node->loaded_info,
+ num * sizeof(struct gcov_info *));
+ loaded_info[num] = info;
+ /* Check if the new data set is compatible. */
+ if (num == 0) {
+ /*
+ * A module was unloaded, modified and reloaded. The new
+ * data set replaces the copy of the last one.
+ */
+ if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+ pr_warning("discarding saved data for %s "
+ "(incompatible version)\n", info->filename);
+ gcov_info_free(node->unloaded_info);
+ node->unloaded_info = NULL;
+ }
+ } else {
+ /*
+ * Two different versions of the same object file are loaded.
+ * The initial one takes precedence.
+ */
+ if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+ pr_warning("could not add '%s' (incompatible "
+ "version)\n", info->filename);
+ kfree(loaded_info);
+ return;
+ }
}
- node->info = NULL;
+ /* Overwrite previous array. */
+ kfree(node->loaded_info);
+ node->loaded_info = loaded_info;
+ node->num_loaded = num + 1;
+}
- return 0;
+/*
+ * Return the index of a profiling data set associated with a node.
+ */
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
+{
+ int i;
+
+ for (i = 0; i < node->num_loaded; i++) {
+ if (node->loaded_info[i] == info)
+ return i;
+ }
+ return -ENOENT;
}
/*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Save the data of a profiling data set which is being unloaded.
*/
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static void save_info(struct gcov_node *node, struct gcov_info *info)
{
- if (gcov_info_is_compatible(node->ghost, info))
- gcov_info_add(info, node->ghost);
+ if (node->unloaded_info)
+ gcov_info_add(node->unloaded_info, info);
else {
- pr_warning("discarding saved data for '%s' (version changed)\n",
+ node->unloaded_info = gcov_info_dup(info);
+ if (!node->unloaded_info) {
+ pr_warning("could not save data for '%s' "
+ "(out of memory)\n", info->filename);
+ }
+ }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+ int i;
+
+ i = get_info_index(node, info);
+ if (i < 0) {
+ pr_warning("could not remove '%s' (not found)\n",
info->filename);
+ return;
}
- gcov_info_free(node->ghost);
- node->ghost = NULL;
- node->info = info;
+ if (gcov_persist)
+ save_info(node, info);
+ /* Shrink array. */
+ node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+ node->num_loaded--;
+ if (node->num_loaded > 0)
+ return;
+ /* Last loaded data set was removed. */
+ kfree(node->loaded_info);
+ node->loaded_info = NULL;
+ node->num_loaded = 0;
+ if (!node->unloaded_info)
+ remove_node(node);
}
/*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action,
node = get_node_by_name(info->filename);
switch (action) {
case GCOV_ADD:
- /* Add new node or revive ghost. */
- if (!node) {
+ if (node)
+ add_info(node, info);
+ else
add_node(info);
- break;
- }
- if (gcov_persist)
- revive_node(node, info);
- else {
- pr_warning("could not add '%s' (already exists)\n",
- info->filename);
- }
break;
case GCOV_REMOVE:
- /* Remove node or turn into ghost. */
- if (!node) {
+ if (node)
+ remove_info(node, info);
+ else {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
- break;
}
- if (gcov_persist) {
- if (!ghost_node(node))
- break;
- }
- remove_node(node);
break;
}
mutex_unlock(&node_lock);
file:2b45b2ee3964f47b6470a791d366842e8437048e -> file:f0c2528f56fe4770f316a6450da05237c2915601
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -143,10 +143,9 @@ int groups_search(const struct group_inf
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
- int cmp = grp - GROUP_AT(group_info, mid);
- if (cmp > 0)
+ if (grp > GROUP_AT(group_info, mid))
left = mid + 1;
- else if (cmp < 0)
+ else if (grp < GROUP_AT(group_info, mid))
right = mid;
else
return 1;
file:931a4d99bc559c9b1497de94eb7270406fc211ab -> file:a6e9d00a8323ffc22c0a1b1397cfb0e02c48907a
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -920,6 +920,7 @@ static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
{
if (hrtimer_is_queued(timer)) {
+ unsigned long state;
int reprogram;
/*
@@ -933,8 +934,13 @@ remove_hrtimer(struct hrtimer *timer, st
debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
- __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
- reprogram);
+ /*
+ * We must preserve the CALLBACK state flag here,
+ * otherwise we could move the timer base in
+ * switch_hrtimer_base.
+ */
+ state = timer->state & HRTIMER_STATE_CALLBACK;
+ __remove_hrtimer(timer, base, state, reprogram);
return 1;
}
return 0;
@@ -1221,6 +1227,9 @@ static void __run_hrtimer(struct hrtimer
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
enqueue_hrtimer(timer, base);
}
+
+ WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
+
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
file:fa4bdd4c5fbc2c6ec985d64c54d89ed02b285008 -> file:f34e23178f54ce4d6db2e5c47d4376867ec4eaef
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -200,7 +200,7 @@ static inline int setup_affinity(unsigne
void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
{
if (suspend) {
- if (!desc->action || (desc->action->flags & IRQF_TIMER))
+ if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
return;
desc->status |= IRQ_SUSPENDED;
}
file:ca07c5c0c914186de89fb26e7323193375051178 -> file:e99e7cd9cf4784ee8289c69584e17fcc287159b5
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -195,14 +195,7 @@ __account_scheduler_latency(struct task_
account_global_scheduler_latency(tsk, &lat);
- /*
- * short term hack; if we're > 32 we stop; future we recycle:
- */
- tsk->latency_record_count++;
- if (tsk->latency_record_count >= LT_SAVECOUNT)
- goto out_unlock;
-
- for (i = 0; i < LT_SAVECOUNT; i++) {
+ for (i = 0; i < tsk->latency_record_count; i++) {
struct latency_record *mylat;
int same = 1;
@@ -228,8 +221,14 @@ __account_scheduler_latency(struct task_
}
}
+ /*
+ * short term hack; if we're > 32 we stop; future we recycle:
+ */
+ if (tsk->latency_record_count >= LT_SAVECOUNT)
+ goto out_unlock;
+
/* Allocated a new one: */
- i = tsk->latency_record_count;
+ i = tsk->latency_record_count++;
memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
out_unlock:
file:0c91bf6544bf5d79b3d67ceea1f6a5cdf7c71104 -> file:12390f287ac72d6bd67d43c6ea1dea4820d4dd89
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -542,7 +542,6 @@ struct rq {
struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
- u64 nr_migrations_in;
struct cfs_rq cfs;
struct rt_rq rt;
@@ -742,7 +741,7 @@ sched_feat_write(struct file *filp, cons
size_t cnt, loff_t *ppos)
{
char buf[64];
- char *cmp = buf;
+ char *cmp;
int neg = 0;
int i;
@@ -753,6 +752,7 @@ sched_feat_write(struct file *filp, cons
return -EFAULT;
buf[cnt] = 0;
+ cmp = strstrip(buf);
if (strncmp(buf, "NO_", 3) == 0) {
neg = 1;
@@ -760,9 +760,7 @@ sched_feat_write(struct file *filp, cons
}
for (i = 0; sched_feat_names[i]; i++) {
- int len = strlen(sched_feat_names[i]);
-
- if (strncmp(cmp, sched_feat_names[i], len) == 0) {
+ if (strcmp(cmp, sched_feat_names[i]) == 0) {
if (neg)
sysctl_sched_features &= ~(1UL << i);
else
@@ -943,14 +941,25 @@ static inline void finish_lock_switch(st
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
+ * Check whether the task is waking, we use this to synchronize ->cpus_allowed
+ * against ttwu().
+ */
+static inline int task_is_waking(struct task_struct *p)
+{
+ return unlikely(p->state == TASK_WAKING);
+}
+
+/*
* __task_rq_lock - lock the runqueue a given task resides on.
* Must be called interrupts disabled.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
+ struct rq *rq;
+
for (;;) {
- struct rq *rq = task_rq(p);
+ rq = task_rq(p);
spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
@@ -1623,7 +1632,7 @@ static void update_group_shares_cpu(stru
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
- unsigned long weight, rq_weight = 0, shares = 0;
+ unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
unsigned long *usd_rq_weight;
struct sched_domain *sd = data;
unsigned long flags;
@@ -1639,6 +1648,7 @@ static int tg_shares_up(struct task_grou
weight = tg->cfs_rq[i]->load.weight;
usd_rq_weight[i] = weight;
+ rq_weight += weight;
/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
@@ -1647,10 +1657,13 @@ static int tg_shares_up(struct task_grou
if (!weight)
weight = NICE_0_LOAD;
- rq_weight += weight;
+ sum_weight += weight;
shares += tg->cfs_rq[i]->shares;
}
+ if (!rq_weight)
+ rq_weight = sum_weight;
+
if ((!shares && rq_weight) || shares > tg->shares)
shares = tg->shares;
@@ -1818,6 +1831,20 @@ static void cfs_rq_set_shares(struct cfs
static void calc_load_account_active(struct rq *this_rq);
static void update_sysctl(void);
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+ set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+ * successfuly executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
+ task_thread_info(p)->cpu = cpu;
+#endif
+}
+
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
@@ -1867,13 +1894,14 @@ static void update_avg(u64 *avg, u64 sam
*avg += diff >> 3;
}
-static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
if (wakeup)
p->se.start_runtime = p->se.sum_exec_runtime;
sched_info_queued(p);
- p->sched_class->enqueue_task(rq, p, wakeup);
+ p->sched_class->enqueue_task(rq, p, wakeup, head);
p->se.on_rq = 1;
}
@@ -1949,7 +1977,7 @@ static void activate_task(struct rq *rq,
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
- enqueue_task(rq, p, wakeup);
+ enqueue_task(rq, p, wakeup, false);
inc_nr_running(rq);
}
@@ -1974,20 +2002,6 @@ inline int task_curr(const struct task_s
return cpu_curr(task_cpu(p)) == p;
}
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
- set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
- /*
- * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
- * successfuly executed on another CPU. We must ensure that updates of
- * per-task data have been completed by this moment.
- */
- smp_wmb();
- task_thread_info(p)->cpu = cpu;
-#endif
-}
-
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio, int running)
@@ -2014,21 +2028,15 @@ static inline void check_class_changed(s
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
-
/* Must have done schedule() in kthread() before we set_task_cpu */
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
WARN_ON(1);
return;
}
- spin_lock_irqsave(&rq->lock, flags);
- set_task_cpu(p, cpu);
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
- spin_unlock_irqrestore(&rq->lock, flags);
}
EXPORT_SYMBOL(kthread_bind);
@@ -2066,35 +2074,23 @@ task_hot(struct task_struct *p, u64 now,
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
int old_cpu = task_cpu(p);
- struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
- struct cfs_rq *old_cfsrq = task_cfs_rq(p),
- *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
- u64 clock_offset;
- clock_offset = old_rq->clock - new_rq->clock;
+#ifdef CONFIG_SCHED_DEBUG
+ /*
+ * We should never call set_task_cpu() on a blocked task,
+ * ttwu() will sort out the placement.
+ */
+ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
+ !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+#endif
trace_sched_migrate_task(p, new_cpu);
-#ifdef CONFIG_SCHEDSTATS
- if (p->se.wait_start)
- p->se.wait_start -= clock_offset;
- if (p->se.sleep_start)
- p->se.sleep_start -= clock_offset;
- if (p->se.block_start)
- p->se.block_start -= clock_offset;
-#endif
if (old_cpu != new_cpu) {
p->se.nr_migrations++;
- new_rq->nr_migrations_in++;
-#ifdef CONFIG_SCHEDSTATS
- if (task_hot(p, old_rq->clock, NULL))
- schedstat_inc(p, se.nr_forced2_migrations);
-#endif
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1, 1, NULL, 0);
}
- p->se.vruntime -= old_cfsrq->min_vruntime -
- new_cfsrq->min_vruntime;
__set_task_cpu(p, new_cpu);
}
@@ -2327,6 +2323,69 @@ void task_oncpu_function_call(struct tas
preempt_enable();
}
+#ifdef CONFIG_SMP
+/*
+ * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
+ */
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+ int dest_cpu;
+ const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+
+ /* Look for allowed, online CPU in same node. */
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+ return dest_cpu;
+
+ /* Any allowed, online CPU? */
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+ if (dest_cpu < nr_cpu_ids)
+ return dest_cpu;
+
+ /* No more Mr. Nice Guy. */
+ if (unlikely(dest_cpu >= nr_cpu_ids)) {
+ dest_cpu = cpuset_cpus_allowed_fallback(p);
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit()) {
+ printk(KERN_INFO "process %d (%s) no "
+ "longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
+ }
+
+ return dest_cpu;
+}
+
+/*
+ * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
+ */
+static inline
+int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
+{
+ int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
+
+ /*
+ * In order not to call set_task_cpu() on a blocking task we need
+ * to rely on ttwu() to place the task on a valid ->cpus_allowed
+ * cpu.
+ *
+ * Since this is common to all placement strategies, this lives here.
+ *
+ * [ this allows ->select_task() to simply return task_cpu(p) and
+ * not worry about this generic constraint ]
+ */
+ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
+ !cpu_online(cpu)))
+ cpu = select_fallback_rq(task_cpu(p), p);
+
+ return cpu;
+}
+#endif
+
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
@@ -2375,22 +2434,34 @@ static int try_to_wake_up(struct task_st
*
* First fix up the nr_uninterruptible count:
*/
- if (task_contributes_to_load(p))
- rq->nr_uninterruptible--;
+ if (task_contributes_to_load(p)) {
+ if (likely(cpu_online(orig_cpu)))
+ rq->nr_uninterruptible--;
+ else
+ this_rq()->nr_uninterruptible--;
+ }
p->state = TASK_WAKING;
- task_rq_unlock(rq, &flags);
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+ if (p->sched_class->task_waking)
+ p->sched_class->task_waking(rq, p);
+
+ cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
+ __task_rq_unlock(rq);
- rq = task_rq_lock(p, &flags);
-
- if (rq != orig_rq)
- update_rq_clock(rq);
+ rq = cpu_rq(cpu);
+ spin_lock(&rq->lock);
+ update_rq_clock(rq);
+ /*
+ * We migrated the task without holding either rq->lock, however
+ * since the task is not on the task list itself, nobody else
+ * will try and migrate the task, hence the rq should match the
+ * cpu we just moved it to.
+ */
+ WARN_ON(task_cpu(p) != cpu);
WARN_ON(p->state != TASK_WAKING);
- cpu = task_cpu(p);
#ifdef CONFIG_SCHEDSTATS
schedstat_inc(rq, ttwu_count);
@@ -2443,8 +2514,8 @@ out_running:
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
- if (p->sched_class->task_wake_up)
- p->sched_class->task_wake_up(rq, p);
+ if (p->sched_class->task_woken)
+ p->sched_class->task_woken(rq, p);
if (unlikely(rq->idle_stamp)) {
u64 delta = rq->clock - rq->idle_stamp;
@@ -2524,7 +2595,6 @@ static void __sched_fork(struct task_str
p->se.nr_failed_migrations_running = 0;
p->se.nr_failed_migrations_hot = 0;
p->se.nr_forced_migrations = 0;
- p->se.nr_forced2_migrations = 0;
p->se.nr_wakeups = 0;
p->se.nr_wakeups_sync = 0;
@@ -2545,14 +2615,6 @@ static void __sched_fork(struct task_str
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
-
- /*
- * We mark the process as running here, but have not actually
- * inserted it onto the runqueue yet. This guarantees that
- * nobody will actually run it, and a signal or other external
- * event cannot wake it up and insert it on the runqueue either.
- */
- p->state = TASK_RUNNING;
}
/*
@@ -2563,6 +2625,12 @@ void sched_fork(struct task_struct *p, i
int cpu = get_cpu();
__sched_fork(p);
+ /*
+ * We mark the process as running here. This guarantees that
+ * nobody will actually run it, and a signal or other external
+ * event cannot wake it up and insert it on the runqueue either.
+ */
+ p->state = TASK_RUNNING;
/*
* Revert to default priority/policy on fork if requested.
@@ -2594,9 +2662,9 @@ void sched_fork(struct task_struct *p, i
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
-#ifdef CONFIG_SMP
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
-#endif
+ if (p->sched_class->task_fork)
+ p->sched_class->task_fork(p);
+
set_task_cpu(p, cpu);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -2626,28 +2694,38 @@ void wake_up_new_task(struct task_struct
{
unsigned long flags;
struct rq *rq;
+ int cpu = get_cpu();
+#ifdef CONFIG_SMP
rq = task_rq_lock(p, &flags);
- BUG_ON(p->state != TASK_RUNNING);
- update_rq_clock(rq);
+ p->state = TASK_WAKING;
- if (!p->sched_class->task_new || !current->se.on_rq) {
- activate_task(rq, p, 0);
- } else {
- /*
- * Let the scheduling class do new task startup
- * management (if any):
- */
- p->sched_class->task_new(rq, p);
- inc_nr_running(rq);
- }
+ /*
+ * Fork balancing, do it here and not earlier because:
+ * - cpus_allowed can change in the fork path
+ * - any previously selected cpu might disappear through hotplug
+ *
+ * We set TASK_WAKING so that select_task_rq() can drop rq->lock
+ * without people poking at ->cpus_allowed.
+ */
+ cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
+ set_task_cpu(p, cpu);
+
+ p->state = TASK_RUNNING;
+ task_rq_unlock(rq, &flags);
+#endif
+
+ rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
+ activate_task(rq, p, 0);
trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
- if (p->sched_class->task_wake_up)
- p->sched_class->task_wake_up(rq, p);
+ if (p->sched_class->task_woken)
+ p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, &flags);
+ put_cpu();
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -3034,15 +3112,6 @@ static void calc_load_account_active(str
}
/*
- * Externally visible per-cpu scheduler statistics:
- * cpu_nr_migrations(cpu) - number of migrations into that cpu
- */
-u64 cpu_nr_migrations(int cpu)
-{
- return cpu_rq(cpu)->nr_migrations_in;
-}
-
-/*
* Update rq->cpu_load[] statistics. This function is usually called every
* scheduler tick (TICK_NSEC).
*/
@@ -3124,24 +3193,28 @@ static void double_rq_unlock(struct rq *
}
/*
- * If dest_cpu is allowed for this process, migrate the task to it.
- * This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
- * the cpu_allowed mask is restored.
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
*/
-static void sched_migrate_task(struct task_struct *p, int dest_cpu)
+void sched_exec(void)
{
+ struct task_struct *p = current;
struct migration_req req;
unsigned long flags;
struct rq *rq;
+ int dest_cpu;
rq = task_rq_lock(p, &flags);
- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
- || unlikely(!cpu_active(dest_cpu)))
- goto out;
+ dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
+ if (dest_cpu == smp_processor_id())
+ goto unlock;
- /* force the process onto the specified CPU */
- if (migrate_task(p, dest_cpu, &req)) {
+ /*
+ * select_task_rq() can race against ->cpus_allowed
+ */
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
+ likely(cpu_active(dest_cpu)) &&
+ migrate_task(p, dest_cpu, &req)) {
/* Need to wait for migration thread (might exit: take ref). */
struct task_struct *mt = rq->migration_thread;
@@ -3153,24 +3226,11 @@ static void sched_migrate_task(struct ta
return;
}
-out:
+unlock:
task_rq_unlock(rq, &flags);
}
/*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
-void sched_exec(void)
-{
- int new_cpu, this_cpu = get_cpu();
- new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
- put_cpu();
- if (new_cpu != this_cpu)
- sched_migrate_task(current, new_cpu);
-}
-
-/*
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
*/
@@ -3617,7 +3677,7 @@ unsigned long __weak arch_scale_freq_pow
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
{
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long weight = sd->span_weight;
unsigned long smt_gain = sd->smt_gain;
smt_gain /= weight;
@@ -3650,7 +3710,7 @@ unsigned long scale_rt_power(int cpu)
static void update_cpu_power(struct sched_domain *sd, int cpu)
{
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long weight = sd->span_weight;
unsigned long power = SCHED_LOAD_SCALE;
struct sched_group *sdg = sd->groups;
@@ -5223,45 +5283,90 @@ cputime_t task_stime(struct task_struct
{
return p->stime;
}
+
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ struct task_cputime cputime;
+
+ thread_group_cputime(p, &cputime);
+
+ *ut = cputime.utime;
+ *st = cputime.stime;
+}
#else
+
+#ifndef nsecs_to_cputime
+# define nsecs_to_cputime(__nsecs) \
+ msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
+#endif
+
cputime_t task_utime(struct task_struct *p)
{
- clock_t utime = cputime_to_clock_t(p->utime),
- total = utime + cputime_to_clock_t(p->stime);
+ cputime_t utime = p->utime, total = utime + p->stime;
u64 temp;
/*
* Use CFS's precise accounting:
*/
- temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
+ temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
temp *= utime;
do_div(temp, total);
}
- utime = (clock_t)temp;
+ utime = (cputime_t)temp;
- p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
+ p->prev_utime = max(p->prev_utime, utime);
return p->prev_utime;
}
cputime_t task_stime(struct task_struct *p)
{
- clock_t stime;
+ cputime_t stime;
/*
* Use CFS's precise accounting. (we subtract utime from
* the total, to make sure the total observed by userspace
* grows monotonically - apps rely on that):
*/
- stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
- cputime_to_clock_t(task_utime(p));
+ stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
if (stime >= 0)
- p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
+ p->prev_stime = max(p->prev_stime, stime);
return p->prev_stime;
}
+
+/*
+ * Must be called with siglock held.
+ */
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ struct signal_struct *sig = p->signal;
+ struct task_cputime cputime;
+ cputime_t rtime, utime, total;
+
+ thread_group_cputime(p, &cputime);
+
+ total = cputime_add(cputime.utime, cputime.stime);
+ rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+ if (total) {
+ u64 temp = rtime;
+
+ temp *= cputime.utime;
+ do_div(temp, total);
+ utime = (cputime_t)temp;
+ } else
+ utime = rtime;
+
+ sig->prev_utime = max(sig->prev_utime, utime);
+ sig->prev_stime = max(sig->prev_stime,
+ cputime_sub(rtime, sig->prev_utime));
+
+ *ut = sig->prev_utime;
+ *st = sig->prev_stime;
+}
#endif
inline cputime_t task_gtime(struct task_struct *p)
@@ -5553,7 +5658,7 @@ int mutex_spin_on_owner(struct mutex *lo
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
- goto out;
+ return 0;
#else
cpu = owner->cpu;
#endif
@@ -5563,14 +5668,14 @@ int mutex_spin_on_owner(struct mutex *lo
* the cpu field may no longer be valid.
*/
if (cpu >= nr_cpumask_bits)
- goto out;
+ return 0;
/*
* We need to validate that we can do a
* get_cpu() and that we have the percpu area.
*/
if (!cpu_online(cpu))
- goto out;
+ return 0;
rq = cpu_rq(cpu);
@@ -5589,7 +5694,7 @@ int mutex_spin_on_owner(struct mutex *lo
cpu_relax();
}
-out:
+
return 1;
}
#endif
@@ -5937,14 +6042,15 @@ EXPORT_SYMBOL(wait_for_completion_killab
*/
bool try_wait_for_completion(struct completion *x)
{
+ unsigned long flags;
int ret = 1;
- spin_lock_irq(&x->wait.lock);
+ spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
- spin_unlock_irq(&x->wait.lock);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
@@ -5959,12 +6065,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
*/
bool completion_done(struct completion *x)
{
+ unsigned long flags;
int ret = 1;
- spin_lock_irq(&x->wait.lock);
+ spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
- spin_unlock_irq(&x->wait.lock);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
@@ -6058,7 +6165,7 @@ void rt_mutex_setprio(struct task_struct
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) {
- enqueue_task(rq, p, 0);
+ enqueue_task(rq, p, 0, oldprio < prio);
check_class_changed(rq, p, prev_class, oldprio, running);
}
@@ -6102,7 +6209,7 @@ void set_user_nice(struct task_struct *p
delta = p->prio - old_prio;
if (on_rq) {
- enqueue_task(rq, p, 0);
+ enqueue_task(rq, p, 0, false);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -6493,7 +6600,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_
return -EINVAL;
retval = -ESRCH;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
@@ -6501,7 +6608,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
@@ -6519,7 +6626,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, p
if (!param || pid < 0)
return -EINVAL;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
@@ -6530,7 +6637,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, p
goto out_unlock;
lp.sched_priority = p->rt_priority;
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
@@ -6540,7 +6647,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, p
return retval;
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
@@ -6551,22 +6658,18 @@ long sched_setaffinity(pid_t pid, const
int retval;
get_online_cpus();
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
- /*
- * It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
- * usage count and then drop tasklist_lock.
- */
+ /* Prevent p going away */
get_task_struct(p);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
@@ -6647,10 +6750,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
+ unsigned long flags;
+ struct rq *rq;
int retval;
get_online_cpus();
- read_lock(&tasklist_lock);
+ rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
@@ -6661,10 +6766,12 @@ long sched_getaffinity(pid_t pid, struct
if (retval)
goto out_unlock;
+ rq = task_rq_lock(p, &flags);
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+ task_rq_unlock(rq, &flags);
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
put_online_cpus();
return retval;
@@ -6903,6 +7010,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p
{
struct task_struct *p;
unsigned int time_slice;
+ unsigned long flags;
+ struct rq *rq;
int retval;
struct timespec t;
@@ -6910,7 +7019,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p
return -EINVAL;
retval = -ESRCH;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
@@ -6919,15 +7028,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p
if (retval)
goto out_unlock;
- time_slice = p->sched_class->get_rr_interval(p);
+ rq = task_rq_lock(p, &flags);
+ time_slice = p->sched_class->get_rr_interval(rq, p);
+ task_rq_unlock(rq, &flags);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
@@ -7018,6 +7129,7 @@ void __cpuinit init_idle(struct task_str
spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
+ idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -7112,7 +7224,19 @@ int set_cpus_allowed_ptr(struct task_str
struct rq *rq;
int ret = 0;
+ /*
+ * Serialize against TASK_WAKING so that ttwu() and wunt() can
+ * drop the rq->lock and still rely on ->cpus_allowed.
+ */
+again:
+ while (task_is_waking(p))
+ cpu_relax();
rq = task_rq_lock(p, &flags);
+ if (task_is_waking(p)) {
+ task_rq_unlock(rq, &flags);
+ goto again;
+ }
+
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
goto out;
@@ -7141,7 +7265,7 @@ int set_cpus_allowed_ptr(struct task_str
get_task_struct(mt);
task_rq_unlock(rq, &flags);
- wake_up_process(rq->migration_thread);
+ wake_up_process(mt);
put_task_struct(mt);
wait_for_completion(&req.done);
tlb_migrate_finish(p->mm);
@@ -7168,7 +7292,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
- int ret = 0, on_rq;
+ int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
@@ -7180,19 +7304,17 @@ static int __migrate_task(struct task_st
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
- /* Waking up, don't get in the way of try_to_wake_up(). */
- if (p->state == TASK_WAKING)
- goto fail;
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto fail;
- on_rq = p->se.on_rq;
- if (on_rq)
+ /*
+ * If we're not on a rq, the next wake-up will ensure we're
+ * placed properly.
+ */
+ if (p->se.on_rq) {
deactivate_task(rq_src, p, 0);
-
- set_task_cpu(p, dest_cpu);
- if (on_rq) {
+ set_task_cpu(p, dest_cpu);
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
}
@@ -7271,57 +7393,29 @@ static int migration_thread(void *data)
}
#ifdef CONFIG_HOTPLUG_CPU
-
-static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
-{
- int ret;
-
- local_irq_disable();
- ret = __migrate_task(p, src_cpu, dest_cpu);
- local_irq_enable();
- return ret;
-}
-
/*
* Figure out where task on dead CPU should go, use force if necessary.
*/
-static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{
- int dest_cpu;
- const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
-
-again:
- /* Look for allowed, online CPU in same node. */
- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
- goto move;
-
- /* Any allowed, online CPU? */
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
- if (dest_cpu < nr_cpu_ids)
- goto move;
-
- /* No more Mr. Nice Guy. */
- if (dest_cpu >= nr_cpu_ids) {
- cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+ struct rq *rq = cpu_rq(dead_cpu);
+ int needs_cpu, uninitialized_var(dest_cpu);
+ unsigned long flags;
- /*
- * Don't tell them about moving exiting tasks or
- * kernel threads (both mm NULL), since they never
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
- printk(KERN_INFO "process %d (%s) no "
- "longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, dead_cpu);
- }
- }
+ local_irq_save(flags);
-move:
- /* It can have affinity changed while we were choosing. */
- if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
- goto again;
+ spin_lock(&rq->lock);
+ needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
+ if (needs_cpu)
+ dest_cpu = select_fallback_rq(dead_cpu, p);
+ spin_unlock(&rq->lock);
+ /*
+ * It can only fail if we race with set_cpus_allowed(),
+ * in the racer should migrate the task anyway.
+ */
+ if (needs_cpu)
+ __migrate_task(p, dead_cpu, dest_cpu);
+ local_irq_restore(flags);
}
/*
@@ -7669,10 +7763,9 @@ migration_call(struct notifier_block *nf
unsigned long flags;
struct rq *rq;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
if (IS_ERR(p))
return NOTIFY_BAD;
@@ -7687,7 +7780,6 @@ migration_call(struct notifier_block *nf
break;
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
/* Strictly unnecessary, as first user will wake it. */
wake_up_process(cpu_rq(cpu)->migration_thread);
@@ -7704,7 +7796,6 @@ migration_call(struct notifier_block *nf
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
if (!cpu_rq(cpu)->migration_thread)
break;
/* Unbind it from offline cpu so it can run. Fall thru. */
@@ -7715,14 +7806,22 @@ migration_call(struct notifier_block *nf
cpu_rq(cpu)->migration_thread = NULL;
break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
- migrate_live_tasks(cpu);
+ case CPU_POST_DEAD:
+ /*
+ * Bring the migration thread down in CPU_POST_DEAD event,
+ * since the timers should have got migrated by now and thus
+ * we should not see a deadlock between trying to kill the
+ * migration thread and the sched_rt_period_timer.
+ */
rq = cpu_rq(cpu);
kthread_stop(rq->migration_thread);
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
+ break;
+
+ case CPU_DEAD:
+ migrate_live_tasks(cpu);
+ rq = cpu_rq(cpu);
/* Idle task back to normal (off runqueue, low prio) */
spin_lock_irq(&rq->lock);
update_rq_clock(rq);
@@ -7731,7 +7830,6 @@ migration_call(struct notifier_block *nf
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
spin_unlock_irq(&rq->lock);
- cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
calc_global_load_remove(rq);
@@ -7755,7 +7853,6 @@ migration_call(struct notifier_block *nf
break;
case CPU_DYING:
- case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
spin_lock_irqsave(&rq->lock, flags);
@@ -8075,6 +8172,9 @@ cpu_attach_domain(struct sched_domain *s
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
+ for (tmp = sd; tmp; tmp = tmp->parent)
+ tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
+
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
@@ -10073,13 +10173,13 @@ void sched_move_task(struct task_struct
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->moved_group)
- tsk->sched_class->moved_group(tsk);
+ tsk->sched_class->moved_group(tsk, on_rq);
#endif
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
- enqueue_task(rq, tsk, 0);
+ enqueue_task(rq, tsk, 0, false);
task_rq_unlock(rq, &flags);
}
@@ -10860,12 +10960,30 @@ static void cpuacct_charge(struct task_s
}
/*
+ * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
+ * in cputime_t units. As a result, cpuacct_update_stats calls
+ * percpu_counter_add with values large enough to always overflow the
+ * per cpu batch limit causing bad SMP scalability.
+ *
+ * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
+ * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
+ * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
+ */
+#ifdef CONFIG_SMP
+#define CPUACCT_BATCH \
+ min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
+#else
+#define CPUACCT_BATCH 0
+#endif
+
+/*
* Charge the system/user time to the task's accounting group.
*/
static void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val)
{
struct cpuacct *ca;
+ int batch = CPUACCT_BATCH;
if (unlikely(!cpuacct_subsys.active))
return;
@@ -10874,7 +10992,7 @@ static void cpuacct_update_stats(struct
ca = task_ca(tsk);
do {
- percpu_counter_add(&ca->cpustat[idx], val);
+ __percpu_counter_add(&ca->cpustat[idx], val, batch);
ca = ca->parent;
} while (ca);
rcu_read_unlock();
file:6988cf08f705d3bbd7b8cd16a43fe70cd32b795e -> file:6f836a89375ba91f0789ba7063339909428852a7
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -423,7 +423,6 @@ void proc_sched_show_task(struct task_st
P(se.nr_failed_migrations_running);
P(se.nr_failed_migrations_hot);
P(se.nr_forced_migrations);
- P(se.nr_forced2_migrations);
P(se.nr_wakeups);
P(se.nr_wakeups_sync);
P(se.nr_wakeups_migrate);
@@ -499,7 +498,6 @@ void proc_sched_set_task(struct task_str
p->se.nr_failed_migrations_running = 0;
p->se.nr_failed_migrations_hot = 0;
p->se.nr_forced_migrations = 0;
- p->se.nr_forced2_migrations = 0;
p->se.nr_wakeups = 0;
p->se.nr_wakeups_sync = 0;
p->se.nr_wakeups_migrate = 0;
file:d80812d39d6238378ccac64538056d762128f084 -> file:01e311e6b47fd6b9368e49327f2467d8d148bc60
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -488,6 +488,7 @@ __update_curr(struct cfs_rq *cfs_rq, str
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
+
curr->vruntime += delta_exec_weighted;
update_min_vruntime(cfs_rq);
}
@@ -743,16 +744,26 @@ place_entity(struct cfs_rq *cfs_rq, stru
se->vruntime = vruntime;
}
+#define ENQUEUE_WAKEUP 1
+#define ENQUEUE_MIGRATE 2
+
static void
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
/*
+ * Update the normalized vruntime before updating min_vruntime
+ * through callig update_curr().
+ */
+ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
+ se->vruntime += cfs_rq->min_vruntime;
+
+ /*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
account_entity_enqueue(cfs_rq, se);
- if (wakeup) {
+ if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
}
@@ -806,6 +817,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
update_min_vruntime(cfs_rq);
+
+ /*
+ * Normalize the entity after updating the min_vruntime because the
+ * update can refer to the ->curr item and we need to reflect this
+ * movement in our normalized position.
+ */
+ if (!sleep)
+ se->vruntime -= cfs_rq->min_vruntime;
}
/*
@@ -1012,17 +1031,24 @@ static inline void hrtick_update(struct
* increased. Here we update the fair scheduling stats and
* then put the task into the rbtree:
*/
-static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
+ int flags = 0;
+
+ if (wakeup)
+ flags |= ENQUEUE_WAKEUP;
+ if (p->state == TASK_WAKING)
+ flags |= ENQUEUE_MIGRATE;
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
- enqueue_entity(cfs_rq, se, wakeup);
- wakeup = 1;
+ enqueue_entity(cfs_rq, se, flags);
+ flags = ENQUEUE_WAKEUP;
}
hrtick_update(rq);
@@ -1098,6 +1124,14 @@ static void yield_task_fair(struct rq *r
#ifdef CONFIG_SMP
+static void task_waking_fair(struct rq *rq, struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ se->vruntime -= cfs_rq->min_vruntime;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* effective_load() calculates the load change as seen from the root_task_group
@@ -1216,6 +1250,7 @@ static int wake_affine(struct sched_doma
* effect of the currently running task from the load
* of the current CPU:
*/
+ rcu_read_lock();
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;
@@ -1241,6 +1276,7 @@ static int wake_affine(struct sched_doma
balanced = !this_load ||
100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
+ rcu_read_unlock();
/*
* If the currently running task will sleep within
@@ -1348,6 +1384,56 @@ find_idlest_cpu(struct sched_group *grou
}
/*
+ * Try and locate an idle CPU in the sched_domain.
+ */
+static int select_idle_sibling(struct task_struct *p, int target)
+{
+ int cpu = smp_processor_id();
+ int prev_cpu = task_cpu(p);
+ struct sched_domain *sd;
+ int i;
+
+ /*
+ * If the task is going to be woken-up on this cpu and if it is
+ * already idle, then it is the right target.
+ */
+ if (target == cpu && idle_cpu(cpu))
+ return cpu;
+
+ /*
+ * If the task is going to be woken-up on the cpu where it previously
+ * ran and if it is currently idle, then it the right target.
+ */
+ if (target == prev_cpu && idle_cpu(prev_cpu))
+ return prev_cpu;
+
+ /*
+ * Otherwise, iterate the domains and find an elegible idle cpu.
+ */
+ for_each_domain(target, sd) {
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
+ break;
+
+ for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
+ if (idle_cpu(i)) {
+ target = i;
+ break;
+ }
+ }
+
+ /*
+ * Lets stop looking for an idle sibling when we reached
+ * the domain that spans the current cpu and prev_cpu.
+ */
+ if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
+ cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
+ break;
+ }
+
+ return target;
+}
+
+/*
* sched_balance_self: balance the current task (running on cpu) in domains
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
* SD_BALANCE_EXEC.
@@ -1358,7 +1444,8 @@ find_idlest_cpu(struct sched_group *grou
*
* preempt must be disabled.
*/
-static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
+static int
+select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
{
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id();
@@ -1375,7 +1462,6 @@ static int select_task_rq_fair(struct ta
new_cpu = prev_cpu;
}
- rcu_read_lock();
for_each_domain(cpu, tmp) {
if (!(tmp->flags & SD_LOAD_BALANCE))
continue;
@@ -1404,38 +1490,14 @@ static int select_task_rq_fair(struct ta
want_sd = 0;
}
- if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
- int candidate = -1, i;
-
- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
- candidate = cpu;
-
- /*
- * Check for an idle shared cache.
- */
- if (tmp->flags & SD_PREFER_SIBLING) {
- if (candidate == cpu) {
- if (!cpu_rq(prev_cpu)->cfs.nr_running)
- candidate = prev_cpu;
- }
-
- if (candidate == -1 || candidate == cpu) {
- for_each_cpu(i, sched_domain_span(tmp)) {
- if (!cpumask_test_cpu(i, &p->cpus_allowed))
- continue;
- if (!cpu_rq(i)->cfs.nr_running) {
- candidate = i;
- break;
- }
- }
- }
- }
-
- if (candidate >= 0) {
- affine_sd = tmp;
- want_affine = 0;
- cpu = candidate;
- }
+ /*
+ * If both cpu and prev_cpu are part of this domain,
+ * cpu is a valid SD_WAKE_AFFINE target.
+ */
+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
+ affine_sd = tmp;
+ want_affine = 0;
}
if (!want_sd && !want_affine)
@@ -1448,23 +1510,28 @@ static int select_task_rq_fair(struct ta
sd = tmp;
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
if (sched_feat(LB_SHARES_UPDATE)) {
/*
* Pick the largest domain to update shares over
*/
tmp = sd;
- if (affine_sd && (!tmp ||
- cpumask_weight(sched_domain_span(affine_sd)) >
- cpumask_weight(sched_domain_span(sd))))
+ if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
tmp = affine_sd;
- if (tmp)
+ if (tmp) {
+ spin_unlock(&rq->lock);
update_shares(tmp);
+ spin_lock(&rq->lock);
+ }
}
+#endif
- if (affine_sd && wake_affine(affine_sd, p, sync)) {
- new_cpu = cpu;
- goto out;
+ if (affine_sd) {
+ if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
+ return select_idle_sibling(p, cpu);
+ else
+ return select_idle_sibling(p, prev_cpu);
}
while (sd) {
@@ -1495,10 +1562,10 @@ static int select_task_rq_fair(struct ta
/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
- weight = cpumask_weight(sched_domain_span(sd));
+ weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
- if (weight <= cpumask_weight(sched_domain_span(tmp)))
+ if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
@@ -1506,8 +1573,6 @@ static int select_task_rq_fair(struct ta
/* while loop will break here if sd == NULL */
}
-out:
- rcu_read_unlock();
return new_cpu;
}
#endif /* CONFIG_SMP */
@@ -1911,28 +1976,32 @@ static void task_tick_fair(struct rq *rq
}
/*
- * Share the fairness runtime between parent and child, thus the
- * total amount of pressure for CPU stays equal - new tasks
- * get a chance to run but frequent forkers are not allowed to
- * monopolize the CPU. Note: the parent runqueue is locked,
- * the child is not running yet.
+ * called on fork with the child task as argument from the parent's context
+ * - child not yet on the tasklist
+ * - preemption disabled
*/
-static void task_new_fair(struct rq *rq, struct task_struct *p)
+static void task_fork_fair(struct task_struct *p)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ struct cfs_rq *cfs_rq = task_cfs_rq(current);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
int this_cpu = smp_processor_id();
+ struct rq *rq = this_rq();
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+
+ update_rq_clock(rq);
- sched_info_queued(p);
+ if (unlikely(task_cpu(p) != this_cpu))
+ __set_task_cpu(p, this_cpu);
update_curr(cfs_rq);
+
if (curr)
se->vruntime = curr->vruntime;
place_entity(cfs_rq, se, 1);
- /* 'curr' will be NULL if the child belongs to a different group */
- if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
- curr && entity_before(curr, se)) {
+ if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
@@ -1941,7 +2010,9 @@ static void task_new_fair(struct rq *rq,
resched_task(rq->curr);
}
- enqueue_task_fair(rq, p, 0);
+ se->vruntime -= cfs_rq->min_vruntime;
+
+ spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -1994,30 +2065,27 @@ static void set_curr_task_fair(struct rq
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void moved_group_fair(struct task_struct *p)
+static void moved_group_fair(struct task_struct *p, int on_rq)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
update_curr(cfs_rq);
- place_entity(cfs_rq, &p->se, 1);
+ if (!on_rq)
+ place_entity(cfs_rq, &p->se, 1);
}
#endif
-unsigned int get_rr_interval_fair(struct task_struct *task)
+unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
struct sched_entity *se = &task->se;
- unsigned long flags;
- struct rq *rq;
unsigned int rr_interval = 0;
/*
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
* idle runqueue:
*/
- rq = task_rq_lock(task, &flags);
if (rq->cfs.load.weight)
rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
- task_rq_unlock(rq, &flags);
return rr_interval;
}
@@ -2043,11 +2111,13 @@ static const struct sched_class fair_sch
.move_one_task = move_one_task_fair,
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,
+
+ .task_waking = task_waking_fair,
#endif
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
- .task_new = task_new_fair,
+ .task_fork = task_fork_fair,
.prio_changed = prio_changed_fair,
.switched_to = switched_to_fair,
file:b133a28fcde32cbf20421f077686c14319871f74 -> file:93ad2e7953cf354b32eea2244b9a352393c432a7
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -6,7 +6,8 @@
*/
#ifdef CONFIG_SMP
-static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
+static int
+select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
@@ -97,7 +98,7 @@ static void prio_changed_idle(struct rq
check_preempt_curr(rq, p, 0);
}
-unsigned int get_rr_interval_idle(struct task_struct *task)
+unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{
return 0;
}
file:a4d790cddb1983196ba9881936d8abf7a8870dbe -> file:af24fab76a9e2b51b3fff6e64da1bfd6b299a72a
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -194,7 +194,7 @@ static inline struct rt_rq *group_rt_rq(
return rt_se->my_q;
}
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
@@ -204,7 +204,7 @@ static void sched_rt_rq_enqueue(struct r
if (rt_rq->rt_nr_running) {
if (rt_se && !on_rt_rq(rt_se))
- enqueue_rt_entity(rt_se);
+ enqueue_rt_entity(rt_se, false);
if (rt_rq->highest_prio.curr < curr->prio)
resched_task(curr);
}
@@ -803,7 +803,7 @@ void dec_rt_tasks(struct sched_rt_entity
dec_rt_group(rt_se, rt_rq);
}
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
@@ -819,7 +819,10 @@ static void __enqueue_rt_entity(struct s
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return;
- list_add_tail(&rt_se->run_list, queue);
+ if (head)
+ list_add(&rt_se->run_list, queue);
+ else
+ list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
inc_rt_tasks(rt_se, rt_rq);
@@ -856,11 +859,11 @@ static void dequeue_rt_stack(struct sche
}
}
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
{
dequeue_rt_stack(rt_se);
for_each_sched_rt_entity(rt_se)
- __enqueue_rt_entity(rt_se);
+ __enqueue_rt_entity(rt_se, head);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
@@ -871,21 +874,22 @@ static void dequeue_rt_entity(struct sch
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
- __enqueue_rt_entity(rt_se);
+ __enqueue_rt_entity(rt_se, false);
}
}
/*
* Adding/removing a task to/from a priority array:
*/
-static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
struct sched_rt_entity *rt_se = &p->rt;
if (wakeup)
rt_se->timeout = 0;
- enqueue_rt_entity(rt_se);
+ enqueue_rt_entity(rt_se, head);
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
@@ -938,10 +942,9 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
-static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+static int
+select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
- struct rq *rq = task_rq(p);
-
if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();
@@ -1485,7 +1488,7 @@ static void post_schedule_rt(struct rq *
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
-static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
+static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
@@ -1734,7 +1737,7 @@ static void set_curr_task_rt(struct rq *
dequeue_pushable_task(rq, p);
}
-unsigned int get_rr_interval_rt(struct task_struct *task)
+unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks
@@ -1766,7 +1769,7 @@ static const struct sched_class rt_sched
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
- .task_wake_up = task_wake_up_rt,
+ .task_woken = task_woken_rt,
.switched_from = switched_from_rt,
#endif
file:00889bd3c5903812ed89d722135b15330b513dbb -> file:3514c4449604bda6333ff158d52652a6086c7025
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -640,7 +640,7 @@ int delayed_slow_work_enqueue(struct del
goto cancelled;
/* the timer holds a reference whilst it is pending */
- ret = work->ops->get_ref(work);
+ ret = slow_work_get_ref(work);
if (ret < 0)
goto cant_get_ref;
file:ce17760d9c516720af8dcf1522b47da937ba0023 -> file:440ca69d202df389a2d7debd3954c87165326d09
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -911,16 +911,15 @@ change_okay:
void do_sys_times(struct tms *tms)
{
- struct task_cputime cputime;
- cputime_t cutime, cstime;
+ cputime_t tgutime, tgstime, cutime, cstime;
- thread_group_cputime(current, &cputime);
spin_lock_irq(&current->sighand->siglock);
+ thread_group_times(current, &tgutime, &tgstime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
spin_unlock_irq(&current->sighand->siglock);
- tms->tms_utime = cputime_to_clock_t(cputime.utime);
- tms->tms_stime = cputime_to_clock_t(cputime.stime);
+ tms->tms_utime = cputime_to_clock_t(tgutime);
+ tms->tms_stime = cputime_to_clock_t(tgstime);
tms->tms_cutime = cputime_to_clock_t(cutime);
tms->tms_cstime = cputime_to_clock_t(cstime);
}
@@ -963,6 +962,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid
pgid = pid;
if (pgid < 0)
return -EINVAL;
+ rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
@@ -1016,6 +1016,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
return err;
}
@@ -1338,8 +1339,7 @@ static void k_getrusage(struct task_stru
{
struct task_struct *t;
unsigned long flags;
- cputime_t utime, stime;
- struct task_cputime cputime;
+ cputime_t tgutime, tgstime, utime, stime;
unsigned long maxrss = 0;
memset((char *) r, 0, sizeof *r);
@@ -1373,9 +1373,9 @@ static void k_getrusage(struct task_stru
break;
case RUSAGE_SELF:
- thread_group_cputime(p, &cputime);
- utime = cputime_add(utime, cputime.utime);
- stime = cputime_add(stime, cputime.stime);
+ thread_group_times(p, &tgutime, &tgstime);
+ utime = cputime_add(utime, tgutime);
+ stime = cputime_add(stime, tgstime);
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
file:44320b1e2a6c6704ca4cde51a5882cdfdb1d23e1 -> file:b63cfebc680b384b69a0263b3afd4c65dcd20f1e
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,18 +134,13 @@ __setup("nohz=", setup_tick_nohz);
* value. We do this unconditionally on any cpu, as we don't know whether the
* cpu, which has the update task assigned is in a long sleep.
*/
-static void tick_nohz_update_jiffies(void)
+static void tick_nohz_update_jiffies(ktime_t now)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags;
- ktime_t now;
-
- if (!ts->tick_stopped)
- return;
cpumask_clear_cpu(cpu, nohz_cpu_mask);
- now = ktime_get();
ts->idle_waketime = now;
local_irq_save(flags);
@@ -155,20 +150,17 @@ static void tick_nohz_update_jiffies(voi
touch_softlockup_watchdog();
}
-static void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t delta;
- if (ts->idle_active) {
- ktime_t now, delta;
- now = ktime_get();
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_lastupdate = now;
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
- ts->idle_active = 0;
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_active = 0;
- sched_clock_idle_wakeup_event(0);
- }
+ sched_clock_idle_wakeup_event(0);
}
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
@@ -289,12 +281,15 @@ void tick_nohz_stop_sched_tick(int inidl
time_delta = KTIME_MAX;
} while (read_seqretry(&xtime_lock, seq));
- /* Get the next timer wheel timer */
- next_jiffies = get_next_timer_interrupt(last_jiffies);
- delta_jiffies = next_jiffies - last_jiffies;
-
- if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
+ if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+ arch_needs_cpu(cpu)) {
+ next_jiffies = last_jiffies + 1;
delta_jiffies = 1;
+ } else {
+ /* Get the next timer wheel timer */
+ next_jiffies = get_next_timer_interrupt(last_jiffies);
+ delta_jiffies = next_jiffies - last_jiffies;
+ }
/*
* Do not stop the tick, if we are only one off
* or if the cpu is required for rcu
@@ -460,7 +455,11 @@ void tick_nohz_restart_sched_tick(void)
ktime_t now;
local_irq_disable();
- tick_nohz_stop_idle(cpu);
+ if (ts->idle_active || (ts->inidle && ts->tick_stopped))
+ now = ktime_get();
+
+ if (ts->idle_active)
+ tick_nohz_stop_idle(cpu, now);
if (!ts->inidle || !ts->tick_stopped) {
ts->inidle = 0;
@@ -474,7 +473,6 @@ void tick_nohz_restart_sched_tick(void)
/* Update jiffies first */
select_nohz_load_balancer(0);
- now = ktime_get();
tick_do_update_jiffies64(now);
cpumask_clear_cpu(cpu, nohz_cpu_mask);
@@ -608,22 +606,18 @@ static void tick_nohz_switch_to_nohz(voi
* timer and do not touch the other magic bits which need to be done
* when idle is left.
*/
-static void tick_nohz_kick_tick(int cpu)
+static void tick_nohz_kick_tick(int cpu, ktime_t now)
{
#if 0
/* Switch back to 2.6.27 behaviour */
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- ktime_t delta, now;
-
- if (!ts->tick_stopped)
- return;
+ ktime_t delta;
/*
* Do not touch the tick device, when the next expiry is either
* already reached or less/equal than the tick period.
*/
- now = ktime_get();
delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
if (delta.tv64 <= tick_period.tv64)
return;
@@ -632,9 +626,26 @@ static void tick_nohz_kick_tick(int cpu)
#endif
}
+static inline void tick_check_nohz(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now;
+
+ if (!ts->idle_active && !ts->tick_stopped)
+ return;
+ now = ktime_get();
+ if (ts->idle_active)
+ tick_nohz_stop_idle(cpu, now);
+ if (ts->tick_stopped) {
+ tick_nohz_update_jiffies(now);
+ tick_nohz_kick_tick(cpu, now);
+ }
+}
+
#else
static inline void tick_nohz_switch_to_nohz(void) { }
+static inline void tick_check_nohz(int cpu) { }
#endif /* NO_HZ */
@@ -644,11 +655,7 @@ static inline void tick_nohz_switch_to_n
void tick_check_idle(int cpu)
{
tick_check_oneshot_broadcast(cpu);
-#ifdef CONFIG_NO_HZ
- tick_nohz_stop_idle(cpu);
- tick_nohz_update_jiffies();
- tick_nohz_kick_tick(cpu);
-#endif
+ tick_check_nohz(cpu);
}
/*
file:8b709dee8e3415ca03fd46316dcb300e58abeaa4 -> file:26e2f3705cc159e75324c28e3caacfdfea379484
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -177,7 +177,7 @@ void timekeeping_leap_insert(int leapsec
{
xtime.tv_sec += leapsecond;
wall_to_monotonic.tv_sec -= leapsecond;
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
#ifdef CONFIG_GENERIC_TIME
@@ -337,7 +337,7 @@ int do_settimeofday(struct timespec *tv)
timekeeper.ntp_error = 0;
ntp_clear();
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -822,7 +822,7 @@ void update_wall_time(void)
update_xtime_cache(nsecs);
/* check to see if there is a new clocksource to use */
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
/**
file:0cccb6c460da74d4f8e03214002baf41b8577ca9 -> file:22cf21e9e792a2c70713427f2a224301e2313c10
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -369,11 +369,18 @@ static int function_stat_show(struct seq
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
+ int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
#endif
+ mutex_lock(&ftrace_profile_lock);
+
+ /* we raced with function_profile_reset() */
+ if (unlikely(rec->counter == 0)) {
+ ret = -EBUSY;
+ goto out;
+ }
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -383,17 +390,17 @@ static int function_stat_show(struct seq
avg = rec->time;
do_div(avg, rec->counter);
- mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
trace_print_graph_duration(avg, &s);
trace_print_seq(m, &s);
- mutex_unlock(&mutex);
#endif
seq_putc(m, '\n');
+out:
+ mutex_unlock(&ftrace_profile_lock);
- return 0;
+ return ret;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1473,6 +1480,8 @@ static void *t_start(struct seq_file *m,
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
+ /* reset in case of seek/pread */
+ iter->flags &= ~FTRACE_ITER_HASH;
return iter;
}
@@ -2393,7 +2402,7 @@ static const struct file_operations ftra
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_regex_lseek,
+ .llseek = no_llseek,
.release = ftrace_filter_release,
};
file:c88b21c9fb64b9a99b2147ac4e1fa6bd290464fa -> file:e749a054915ce14d14afae6b7f04136dfeea01cb
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -389,7 +389,7 @@ static inline int test_time_stamp(u64 de
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
/* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
+#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
int ring_buffer_print_page_header(struct trace_seq *s)
{
file:aeaa6d7344475518a3b73b3a7062106e77cadcea -> file:9d942128c152d035bfc62d9bda6632bb254be0ed
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -76,6 +76,7 @@ int __percpu_counter_init(struct percpu_
if (!fbc->counters)
return -ENOMEM;
#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc->list);
mutex_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
mutex_unlock(&percpu_counters_lock);
file:a2b76a588e348e0d46f2bfed4131af6ac3eea86b -> file:1d5fa0870ca91d2692894506549e0dec22275f7a
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -115,8 +115,8 @@ static void copy_to_high_bio_irq(struct
*/
vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
- flush_dcache_page(tovec->bv_page);
bounce_copy_vec(tovec, vfrom);
+ flush_dcache_page(tovec->bv_page);
}
}
file:46e3f8a11b2a3435ff319619334bfcde692ac8b0 -> file:9e0826ea7bbe759dfb2356d13bdfb038cc3e0869
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1030,6 +1030,9 @@ find_page:
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
+ /* Did it get truncated before we got the lock? */
+ if (!page->mapping)
+ goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
desc, offset))
goto page_not_up_to_date_locked;
file:17bc0df273bb5c84937f8973c0489aedbf0279a5 -> file:f03e8e2c2270685936ab1c72ddd269a66fe68c65
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -59,7 +59,7 @@ extern void prep_compound_page(struct pa
*/
static inline unsigned long page_order(struct page *page)
{
- VM_BUG_ON(!PageBuddy(page));
+ /* PageBuddy() must be checked by the caller */
return page_private(page);
}
file:dacc64183874c739e6a501c780d28ee220c4ac2b -> file:8aeba5341ccdc833d80547901ada58891453bfa9
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -589,7 +589,6 @@ static struct page_state {
{ lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
{ lru|dirty, lru, "clean LRU", me_pagecache_clean },
- { swapbacked, swapbacked, "anonymous", me_pagecache_clean },
/*
* Catchall entry: must be at end.
@@ -638,7 +637,7 @@ static int page_action(struct page_state
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
*/
-static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
+static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
int trapno)
{
enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
@@ -648,15 +647,18 @@ static void hwpoison_user_mappings(struc
int i;
int kill = 1;
- if (PageReserved(p) || PageCompound(p) || PageSlab(p) || PageKsm(p))
- return;
+ if (PageReserved(p) || PageSlab(p))
+ return SWAP_SUCCESS;
/*
* This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills.
*/
if (!page_mapped(p))
- return;
+ return SWAP_SUCCESS;
+
+ if (PageCompound(p) || PageKsm(p))
+ return SWAP_FAIL;
if (PageSwapCache(p)) {
printk(KERN_ERR
@@ -718,6 +720,8 @@ static void hwpoison_user_mappings(struc
*/
kill_procs_ao(&tokill, !!PageDirty(p), trapno,
ret != SWAP_SUCCESS, pfn);
+
+ return ret;
}
int __memory_failure(unsigned long pfn, int trapno, int ref)
@@ -787,8 +791,13 @@ int __memory_failure(unsigned long pfn,
/*
* Now take care of user space mappings.
+ * Abort on fail: __remove_from_page_cache() assumes unmapped page.
*/
- hwpoison_user_mappings(p, pfn, trapno);
+ if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
+ printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+ res = -EBUSY;
+ goto out;
+ }
/*
* Torn down by someone else?
file:4e5945588c73f298961256c9361869d1cf9df9e2 -> file:53c1da0d04a68d02bd1fd9eba8c925ceda59fec8
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1282,10 +1282,20 @@ int __get_user_pages(struct task_struct
return i ? : -EFAULT;
}
if (pages) {
- struct page *page = vm_normal_page(gate_vma, start, *pte);
+ struct page *page;
+
+ page = vm_normal_page(gate_vma, start, *pte);
+ if (!page) {
+ if (!(gup_flags & FOLL_DUMP) &&
+ is_zero_pfn(pte_pfn(*pte)))
+ page = pte_page(*pte);
+ else {
+ pte_unmap(pte);
+ return i ? : -EFAULT;
+ }
+ }
pages[i] = page;
- if (page)
- get_page(page);
+ get_page(page);
}
pte_unmap(pte);
if (vmas)
@@ -2620,6 +2630,40 @@ out_release:
}
/*
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+ address &= PAGE_MASK;
+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+ struct vm_area_struct *prev = vma->vm_prev;
+
+ /*
+ * Is there a mapping abutting this one below?
+ *
+ * That's only ok if it's the same stack mapping
+ * that has gotten split..
+ */
+ if (prev && prev->vm_end == address)
+ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+ expand_stack(vma, address - PAGE_SIZE);
+ }
+ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+ struct vm_area_struct *next = vma->vm_next;
+
+ /* As VM_GROWSDOWN but s/below/above/ */
+ if (next && next->vm_start == address + PAGE_SIZE)
+ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+ expand_upwards(vma, address + PAGE_SIZE);
+ }
+ return 0;
+}
+
+/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2632,19 +2676,23 @@ static int do_anonymous_page(struct mm_s
spinlock_t *ptl;
pte_t entry;
+ pte_unmap(page_table);
+
+ /* Check if we need to add a guard page to the stack */
+ if (check_stack_guard_page(vma, address) < 0)
+ return VM_FAULT_SIGBUS;
+
+ /* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
- ptl = pte_lockptr(mm, pmd);
- spin_lock(ptl);
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto unlock;
goto setpte;
}
/* Allocate our own private page. */
- pte_unmap(page_table);
-
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
file:2047465cd27cf5b1829979057459857184a42b2e -> file:f4be4649d5954140a2f41f191cf5974820897871
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -551,19 +551,19 @@ static inline int pageblock_free(struct
/* Return the start of the next active pageblock after a given page */
static struct page *next_active_pageblock(struct page *page)
{
- int pageblocks_stride;
-
/* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
- /* Move forward by at least 1 * pageblock_nr_pages */
- pageblocks_stride = 1;
-
/* If the entire pageblock is free, move to the end of free page */
- if (pageblock_free(page))
- pageblocks_stride += page_order(page) - pageblock_order;
+ if (pageblock_free(page)) {
+ int order;
+ /* be careful. we don't have locks, page_order can be changed.*/
+ order = page_order(page);
+ if ((order < MAX_ORDER) && (order >= pageblock_order))
+ return page + (1 << order);
+ }
- return page + (pageblocks_stride * pageblock_nr_pages);
+ return page + pageblock_nr_pages;
}
/* Checks if this range of memory is likely to be hot-removable. */
@@ -626,7 +626,7 @@ static int test_pages_in_a_zone(unsigned
* Scanning pfn is much easier than scanning lru list.
* Scan pfn from start to end and Find LRU page.
*/
-int scan_lru_pages(unsigned long start, unsigned long end)
+unsigned long scan_lru_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
file:f29d8d70884d7fb2e170983bf0e551b02740f617 -> file:3c6e3e29255caa2e89d446f21892625e0324212e
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1482,7 +1482,7 @@ unsigned slab_node(struct mempolicy *pol
(void)first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes,
&zone);
- return zone->node;
+ return zone ? zone->node : numa_node_id();
}
default:
file:2e05c97b04f5bb7ec799a170b76bab6d4dc48918 -> file:2d846cfe3990daff87cba2361e05debcd9daba06
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -138,6 +138,13 @@ void munlock_vma_page(struct page *page)
}
}
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_stack_continue(vma->vm_prev, addr);
+}
+
/**
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
@@ -170,6 +177,12 @@ static long __mlock_vma_pages_range(stru
if (vma->vm_flags & VM_WRITE)
gup_flags |= FOLL_WRITE;
+ /* We don't try to access the guard page of a stack vma */
+ if (stack_guard_page(vma, start)) {
+ addr += PAGE_SIZE;
+ nr_pages--;
+ }
+
while (nr_pages > 0) {
int i;
file:ae197468b352bbdcecfaeb04cd0ebce73382aaef -> file:866a666690372a38032e3e49263f834a95867b69
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -389,17 +389,23 @@ static inline void
__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent)
{
+ struct vm_area_struct *next;
+
+ vma->vm_prev = prev;
if (prev) {
- vma->vm_next = prev->vm_next;
+ next = prev->vm_next;
prev->vm_next = vma;
} else {
mm->mmap = vma;
if (rb_parent)
- vma->vm_next = rb_entry(rb_parent,
+ next = rb_entry(rb_parent,
struct vm_area_struct, vm_rb);
else
- vma->vm_next = NULL;
+ next = NULL;
}
+ vma->vm_next = next;
+ if (next)
+ next->vm_prev = vma;
}
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -487,7 +493,11 @@ static inline void
__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev)
{
- prev->vm_next = vma->vm_next;
+ struct vm_area_struct *next = vma->vm_next;
+
+ prev->vm_next = next;
+ if (next)
+ next->vm_prev = prev;
rb_erase(&vma->vm_rb, &mm->mm_rb);
if (mm->mmap_cache == vma)
mm->mmap_cache = prev;
@@ -1590,9 +1600,6 @@ static int acct_stack_growth(struct vm_a
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
-#ifndef CONFIG_IA64
-static
-#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
int error;
@@ -1798,6 +1805,7 @@ detach_vmas_to_be_unmapped(struct mm_str
unsigned long addr;
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ vma->vm_prev = NULL;
do {
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
@@ -1805,6 +1813,8 @@ detach_vmas_to_be_unmapped(struct mm_str
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
*insertion_point = vma;
+ if (vma)
+ vma->vm_prev = prev;
tail_vma->vm_next = NULL;
if (mm->unmap_area == arch_unmap_area)
addr = prev ? prev->vm_end : mm->mmap_base;
file:f5b7d1760213e53db3c46e84dde56daf219ea0cd -> file:e35bfb82c8555b7377334dbea42bfcf588b0bab8
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pf
return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+ unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+ /*
+ * While kswapd is awake, it is considered the zone is under some
+ * memory pressure. Under pressure, there is a risk that
+ * per-cpu-counter-drift will allow the min watermark to be breached
+ * potentially causing a live-lock. While kswapd is awake and
+ * free pages are low, get a better estimate for free pages
+ */
+ if (nr_free_pages < zone->percpu_drift_mark &&
+ !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+ return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+ return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
file:8bc969d8112d28adb32b3754d339820739e2ba8c -> file:1737c7e7af536c587c23c0f00f13b602203ac6c8
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -212,6 +212,7 @@ success:
mmu_notifier_invalidate_range_end(mm, start, end);
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+ perf_event_mmap(vma);
return 0;
fail:
@@ -300,7 +301,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
- perf_event_mmap(vma);
nstart = tmp;
if (nstart < prev->vm_end)
file:9876fa0c3ad30e75d842f965ac4417e66be6f07a -> file:406e8d47ce49f76180667647cffc717b029ff950
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -608,7 +608,7 @@ static void protect_vma(struct vm_area_s
*/
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
{
- struct vm_area_struct *pvma, **pp;
+ struct vm_area_struct *pvma, **pp, *next;
struct address_space *mapping;
struct rb_node **p, *parent;
@@ -668,8 +668,11 @@ static void add_vma_to_mm(struct mm_stru
break;
}
- vma->vm_next = *pp;
+ next = *pp;
*pp = vma;
+ vma->vm_next = next;
+ if (next)
+ next->vm_prev = vma;
}
/*
@@ -1612,6 +1615,7 @@ void exit_mmap(struct mm_struct *mm)
mm->mmap = vma->vm_next;
delete_vma_from_mm(vma);
delete_vma(mm, vma);
+ cond_resched();
}
kleave("");
file:a4fd7cda2378663ed4b7bb2afe5b01d6e617b868 -> file:79521e4206aedfd7df02009f69bd5b04231650ad
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -552,13 +552,13 @@ static void free_pcppages_bulk(struct zo
{
int migratetype = 0;
int batch_free = 0;
+ int to_free = count;
spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
- while (count) {
+ while (to_free) {
struct page *page;
struct list_head *list;
@@ -583,8 +583,9 @@ static void free_pcppages_bulk(struct zo
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, page_private(page));
trace_mm_page_pcpu_drain(page, 0, page_private(page));
- } while (--count && --batch_free && !list_empty(list));
+ } while (--to_free && --batch_free && !list_empty(list));
}
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
spin_unlock(&zone->lock);
}
@@ -595,8 +596,8 @@ static void free_one_page(struct zone *z
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
__free_one_page(page, zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
spin_unlock(&zone->lock);
}
@@ -1386,7 +1387,7 @@ int zone_watermark_ok(struct zone *z, in
{
/* free_pages my go negative - that's OK */
long min = mark;
- long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+ long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
int o;
if (alloc_flags & ALLOC_HIGH)
@@ -1702,6 +1703,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_m
struct page *page = NULL;
struct reclaim_state reclaim_state;
struct task_struct *p = current;
+ bool drained = false;
cond_resched();
@@ -1720,14 +1722,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_m
cond_resched();
- if (order != 0)
- drain_all_pages();
+ if (unlikely(!(*did_some_progress)))
+ return NULL;
- if (likely(*did_some_progress))
- page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+ page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags, preferred_zone,
migratetype);
+
+ /*
+ * If an allocation failed after direct reclaim, it could be because
+ * pages are pinned on the per-cpu lists. Drain them and try again
+ */
+ if (!page && !drained) {
+ drain_all_pages();
+ drained = true;
+ goto retry;
+ }
+
return page;
}
@@ -2259,7 +2272,7 @@ void show_free_areas(void)
" all_unreclaimable? %s"
"\n",
zone->name,
- K(zone_page_state(zone, NR_FREE_PAGES)),
+ K(zone_nr_free_pages(zone)),
K(min_wmark_pages(zone)),
K(low_wmark_pages(zone)),
K(high_wmark_pages(zone)),
file:5adfc268b408936a3d18ae9014ce70e49590738d -> file:3bfd6e251f1948b6b748a14a5d0917476c76fc68
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1702,9 +1702,9 @@ int __init pcpu_setup_first_chunk(const
if (pcpu_first_unit_cpu == NR_CPUS)
pcpu_first_unit_cpu = cpu;
+ pcpu_last_unit_cpu = cpu;
}
}
- pcpu_last_unit_cpu = cpu;
pcpu_nr_units = unit;
for_each_possible_cpu(cpu)
file:5d1a782289f2af6532b7baf58ea3c7198a27830c -> file:c8d466a4e9b7119fab913fecb3819e94d2784b00
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2249,8 +2249,8 @@ kmem_cache_create (const char *name, siz
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
- && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
- cachep->obj_offset += PAGE_SIZE - size;
+ && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
size = PAGE_SIZE;
}
#endif
file:9c590eef79122dc0597dac27912f60e6b7dc6a72 -> file:270e136349a0efc72359ad37e9956e65ba4a4eef
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -330,8 +330,10 @@ checks:
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
- /* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ /* reuse swap entry of cache-only swap if not hibernation. */
+ if (vm_swap_full()
+ && cache == SWAP_CACHE
+ && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
file:c2287313b98598917bc6f7e194c25a2a6b0b6cb3 -> file:680dcbb2d91ee75ddaaa1d0236962974a96d4696
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -513,6 +513,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_IN
static void purge_fragmented_blocks_allcpus(void);
/*
+ * called before a call to iounmap() if the caller wants vm_area_struct's
+ * immediately freed.
+ */
+void set_iounmap_nonlazy(void)
+{
+ atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+}
+
+/*
* Purges all lazily-freed vmap areas.
*
* If sync is 0 then don't purge if there is already a purge in progress.
file:692807f2228b25125e25963ab35e99777facdf8e -> file:4649929401f888e87814ae35d87a106a9db410ae
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1083,6 +1083,48 @@ static int too_many_isolated(struct zone
}
/*
+ * Returns true if the caller should wait to clean dirty/writeback pages.
+ *
+ * If we are direct reclaiming for contiguous pages and we do not reclaim
+ * everything in the list, try again and wait for writeback IO to complete.
+ * This will stall high-order allocations noticeably. Only do that when really
+ * need to free the pages under high memory pressure.
+ */
+static inline bool should_reclaim_stall(unsigned long nr_taken,
+ unsigned long nr_freed,
+ int priority,
+ int lumpy_reclaim,
+ struct scan_control *sc)
+{
+ int lumpy_stall_priority;
+
+ /* kswapd should not stall on sync IO */
+ if (current_is_kswapd())
+ return false;
+
+ /* Only stall on lumpy reclaim */
+ if (!lumpy_reclaim)
+ return false;
+
+ /* If we have relaimed everything on the isolated list, no stall */
+ if (nr_freed == nr_taken)
+ return false;
+
+ /*
+ * For high-order allocations, there are two stall thresholds.
+ * High-cost allocations stall immediately where as lower
+ * order allocations such as stacks require the scanning
+ * priority to be much higher before stalling.
+ */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+ lumpy_stall_priority = DEF_PRIORITY;
+ else
+ lumpy_stall_priority = DEF_PRIORITY / 3;
+
+ return priority <= lumpy_stall_priority;
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
@@ -1176,14 +1218,9 @@ static unsigned long shrink_inactive_lis
nr_scanned += nr_scan;
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
- /*
- * If we are direct reclaiming for contiguous pages and we do
- * not reclaim everything in the list, try again and wait
- * for IO to complete. This will stall high-order allocations
- * but that should be acceptable to the caller
- */
- if (nr_freed < nr_taken && !current_is_kswapd() &&
- lumpy_reclaim) {
+ /* Check if we should syncronously wait for writeback */
+ if (should_reclaim_stall(nr_taken, nr_freed, priority,
+ lumpy_reclaim, sc)) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/*
file:c81321f9feec1cb28eee1b7e07237017336b89d0 -> file:42d76c65e9f29719ea206887cf1c597df99ae17d
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -136,10 +136,23 @@ static void refresh_zone_stat_thresholds
int threshold;
for_each_populated_zone(zone) {
+ unsigned long max_drift, tolerate_drift;
+
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
zone_pcp(zone, cpu)->stat_threshold = threshold;
+
+ /*
+ * Only set percpu_drift_mark if there is a danger that
+ * NR_FREE_PAGES reports the low watermark is ok when in fact
+ * the min watermark could be breached by an allocation
+ */
+ tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+ max_drift = num_online_cpus() * threshold;
+ if (max_drift > tolerate_drift)
+ zone->percpu_drift_mark = high_wmark_pages(zone) +
+ max_drift;
}
}
@@ -715,7 +728,7 @@ static void zoneinfo_show_print(struct s
"\n scanned %lu"
"\n spanned %lu"
"\n present %lu",
- zone_page_state(zone, NR_FREE_PAGES),
+ zone_nr_free_pages(zone),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
file:8d934dd7fd5424049a811906e0467da91da208c3 -> file:a2d298472254f41c3b14e493626b70ba8cc84d04
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -948,7 +948,7 @@ p9_fd_create_unix(struct p9_client *clie
csocket = NULL;
- if (strlen(addr) > UNIX_PATH_MAX) {
+ if (strlen(addr) >= UNIX_PATH_MAX) {
P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
addr);
err = -ENAMETOOLONG;
file:a16a2342f6bf2ee0b6a30329736c91acaff19bb9 -> file:3072272eca0e922a99996d0250121e19e9798a96
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -600,6 +600,9 @@ static unsigned int br_nf_pre_routing(un
pskb_trim_rcsum(skb, len);
+ /* BUG: Should really parse the IP options here. */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
@@ -797,9 +800,11 @@ static int br_nf_dev_queue_xmit(struct s
if (skb->nfct != NULL &&
(skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) &&
skb->len > skb->dev->mtu &&
- !skb_is_gso(skb))
+ !skb_is_gso(skb)) {
+ /* BUG: Should really parse the IP options here. */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
return ip_fragment(skb, br_dev_queue_push_xmit);
- else
+ } else
return br_dev_queue_push_xmit(skb);
}
#else
file:e8d58f33fe097186d596778c7756a867752b1eac -> file:4a192f7d93f8b3b53cfc74b4757909631597799f
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -59,6 +59,13 @@
#include <net/sock.h>
#include <net/net_namespace.h>
+/*
+ * To send multiple CAN frame content within TX_SETUP or to filter
+ * CAN messages with multiplex index within RX_SETUP, the number of
+ * different filters is limited to 256 due to the one byte index value.
+ */
+#define MAX_NFRAMES 256
+
/* use of last_frames[index].can_dlc */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -88,16 +95,16 @@ struct bcm_op {
struct list_head list;
int ifindex;
canid_t can_id;
- int flags;
+ u32 flags;
unsigned long frames_abs, frames_filtered;
struct timeval ival1, ival2;
struct hrtimer timer, thrtimer;
struct tasklet_struct tsklet, thrtsklet;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
- int count;
- int nframes;
- int currframe;
+ u32 count;
+ u32 nframes;
+ u32 currframe;
struct can_frame *frames;
struct can_frame *last_frames;
struct can_frame sframe;
@@ -117,7 +124,7 @@ struct bcm_sock {
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
struct proc_dir_entry *bcm_proc_read;
- char procname [9]; /* pointer printed in ASCII with \0 */
+ char procname [20]; /* pointer printed in ASCII with \0 */
};
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
@@ -174,7 +181,7 @@ static int bcm_proc_show(struct seq_file
seq_printf(m, "rx_op: %03X %-5s ",
op->can_id, bcm_proc_getifname(ifname, op->ifindex));
- seq_printf(m, "[%d]%c ", op->nframes,
+ seq_printf(m, "[%u]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64)
seq_printf(m, "timeo=%lld ",
@@ -197,7 +204,7 @@ static int bcm_proc_show(struct seq_file
list_for_each_entry(op, &bo->tx_ops, list) {
- seq_printf(m, "tx_op: %03X %s [%d] ",
+ seq_printf(m, "tx_op: %03X %s [%u] ",
op->can_id,
bcm_proc_getifname(ifname, op->ifindex),
op->nframes);
@@ -282,7 +289,7 @@ static void bcm_send_to_user(struct bcm_
struct can_frame *firstframe;
struct sockaddr_can *addr;
struct sock *sk = op->sk;
- int datalen = head->nframes * CFSIZ;
+ unsigned int datalen = head->nframes * CFSIZ;
int err;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -467,7 +474,7 @@ rx_changed_settime:
* bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
* received data stored in op->last_frames[]
*/
-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
+static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
const struct can_frame *rxdata)
{
/*
@@ -553,7 +560,8 @@ static enum hrtimer_restart bcm_rx_timeo
/*
* bcm_rx_do_flush - helper for bcm_rx_thr_flush
*/
-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
+ unsigned int index)
{
if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
if (update)
@@ -574,7 +582,7 @@ static int bcm_rx_thr_flush(struct bcm_o
int updated = 0;
if (op->nframes > 1) {
- int i;
+ unsigned int i;
/* for MUX filter we start at index 1 */
for (i = 1; i < op->nframes; i++)
@@ -623,7 +631,7 @@ static void bcm_rx_handler(struct sk_buf
{
struct bcm_op *op = (struct bcm_op *)data;
const struct can_frame *rxframe = (struct can_frame *)skb->data;
- int i;
+ unsigned int i;
/* disable timeout */
hrtimer_cancel(&op->timer);
@@ -823,14 +831,15 @@ static int bcm_tx_setup(struct bcm_msg_h
{
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
- int i, err;
+ unsigned int i;
+ int err;
/* we need a real device to send frames */
if (!ifindex)
return -ENODEV;
- /* we need at least one can_frame */
- if (msg_head->nframes < 1)
+ /* check nframes boundaries - we need at least one can_frame */
+ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL;
/* check the given can_id */
@@ -994,6 +1003,10 @@ static int bcm_rx_setup(struct bcm_msg_h
msg_head->nframes = 0;
}
+ /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
+ if (msg_head->nframes > MAX_NFRAMES + 1)
+ return -EINVAL;
+
if ((msg_head->flags & RX_RTR_FRAME) &&
((msg_head->nframes != 1) ||
(!(msg_head->can_id & CAN_RTR_FLAG))))
file:a407c3addbae432913a20dce2560d2bab1d248f2 -> file:9559afc639c138a52d0d9ce8d49b83c7ce8abfaa
--- a/net/compat.c
+++ b/net/compat.c
@@ -40,10 +40,12 @@ static inline int iov_from_user_compat_t
compat_size_t len;
if (get_user(len, &uiov32->iov_len) ||
- get_user(buf, &uiov32->iov_base)) {
- tot_len = -EFAULT;
- break;
- }
+ get_user(buf, &uiov32->iov_base))
+ return -EFAULT;
+
+ if (len > INT_MAX - tot_len)
+ len = INT_MAX - tot_len;
+
tot_len += len;
kiov->iov_base = compat_ptr(buf);
kiov->iov_len = (__kernel_size_t) len;
file:74d0ccef22df55672142cac7ed5a868366e784f8 -> file:d04cd93f22b5cf262f60fc23ec6400c2f58282b9
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1484,10 +1484,10 @@ EXPORT_SYMBOL(netif_device_attach);
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_IP_CSUM) &&
+ return ((features & NETIF_F_NO_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_IPV6_CSUM) &&
+ ((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
@@ -2519,7 +2519,7 @@ pull:
put_page(skb_shinfo(skb)->frags[0].page);
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
- --skb_shinfo(skb)->nr_frags);
+ --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
}
}
file:5aef51eb3d1f5fd01c2d6766e3062476f43440da -> file:abbe8fa66a51baff4bb51c35966491351837639f
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -265,7 +265,7 @@ static int ethtool_get_rxnfc(struct net_
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
- rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+ rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
GFP_USER);
if (!rule_buf)
return -ENOMEM;
@@ -311,7 +311,7 @@ static int ethtool_get_regs(struct net_d
if (regs.len > reglen)
regs.len = reglen;
- regbuf = kmalloc(reglen, GFP_USER);
+ regbuf = kzalloc(reglen, GFP_USER);
if (!regbuf)
return -ENOMEM;
file:16ad45d4882b56a2c531a0a944bc31c690ff5a4b -> file:f911e665a7dbb2eb43981603439381782d98df8c
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -38,7 +38,7 @@
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, err, ct;
+ int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
@@ -60,14 +60,13 @@ int verify_iovec(struct msghdr *m, struc
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
- err += iov[ct].iov_len;
- /*
- * Goal is not to verify user data, but to prevent returning
- * negative value, which is interpreted as errno.
- * Overflow is still possible, but it is harmless.
- */
- if (err < 0)
- return -EMSGSIZE;
+ size_t len = iov[ct].iov_len;
+
+ if (len > INT_MAX - err) {
+ len = INT_MAX - err;
+ iov[ct].iov_len = len;
+ }
+ err += len;
}
return err;
file:ec85681a7dd83765878cd6c791538a7941732178 -> file:283f4412847996317dc24f0c1f0e7aa7b617bcd4
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2575,6 +2575,10 @@ struct sk_buff *skb_segment(struct sk_bu
__copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
+ /* nskb and skb might have different headroom */
+ if (nskb->ip_summed == CHECKSUM_PARTIAL)
+ nskb->csum_start += skb_headroom(nskb) - headroom;
+
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb->mac_len);
nskb->transport_header = (nskb->network_header +
@@ -2705,7 +2709,7 @@ int skb_gro_receive(struct sk_buff **hea
return -E2BIG;
headroom = skb_headroom(p);
- nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+ nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
if (unlikely(!nskb))
return -ENOMEM;
@@ -2726,6 +2730,7 @@ int skb_gro_receive(struct sk_buff **hea
*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
skb_shinfo(nskb)->frag_list = p;
skb_shinfo(nskb)->gso_size = pinfo->gso_size;
+ pinfo->gso_size = 0;
skb_header_release(p);
nskb->prev = p;
file:a37debfeb1b2bcb3309d93fc5d45b5f82e202d6e -> file:e48c85f3b95d2335b60a9320cdabc948680de175
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -140,10 +140,10 @@ int sk_stream_wait_memory(struct sock *s
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, !sk->sk_err &&
- !(sk->sk_shutdown & SEND_SHUTDOWN) &&
- sk_stream_memory_free(sk) &&
- vm_wait);
+ sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (sk_stream_memory_free(sk) &&
+ !vm_wait));
sk->sk_write_pending--;
if (vm_wait) {
file:7a58c87baf172f9384fab475f8243eab58716e58 -> file:5df7b5453556bb489bb7ee8fb17ca7e4a3e03cc7
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1555,6 +1555,8 @@ static int __dn_getsockopt(struct socket
if (r_len > sizeof(struct linkinfo_dn))
r_len = sizeof(struct linkinfo_dn);
+ memset(&link, 0, sizeof(link));
+
switch(sock->state) {
case SS_CONNECTING:
link.idn_linkstate = LL_CONNECTING;
file:0e0254fd767de359769b080ff4bacdcb12c52eb4 -> file:85672e7cb69cbbc9a9b071d02501ca5eda27d261
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -296,23 +296,14 @@ static int econet_sendmsg(struct kiocb *
mutex_lock(&econet_mutex);
- if (saddr == NULL) {
- struct econet_sock *eo = ec_sk(sk);
-
- addr.station = eo->station;
- addr.net = eo->net;
- port = eo->port;
- cb = eo->cb;
- } else {
- if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
- mutex_unlock(&econet_mutex);
- return -EINVAL;
- }
- addr.station = saddr->addr.station;
- addr.net = saddr->addr.net;
- port = saddr->port;
- cb = saddr->cb;
- }
+ if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
+ mutex_unlock(&econet_mutex);
+ return -EINVAL;
+ }
+ addr.station = saddr->addr.station;
+ addr.net = saddr->addr.net;
+ port = saddr->port;
+ cb = saddr->cb;
/* Look for a device with the right network number. */
dev = net2dev_map[addr.net];
@@ -350,7 +341,6 @@ static int econet_sendmsg(struct kiocb *
eb = (struct ec_cb *)&skb->cb;
- /* BUG: saddr may be NULL */
eb->cookie = saddr->cookie;
eb->sec = *saddr;
eb->sent = ec_tx_done;
@@ -669,6 +659,9 @@ static int ec_dev_ioctl(struct socket *s
err = 0;
switch (cmd) {
case SIOCSIFADDR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
edev = dev->ec_ptr;
if (edev == NULL) {
/* Magic up a new one. */
file:efff6a2830f8fcb895ba3a5fcf4fdd4108653783 -> file:fc28ac2463cf7027cb73fbe8ae3508ba3308f89b
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1083,6 +1083,7 @@ static int inetdev_event(struct notifier
}
ip_mc_up(in_dev);
/* fall through */
+ case NETDEV_NOTIFY_PEERS:
case NETDEV_CHANGEADDR:
/* Send gratuitous ARP to notify of link change */
if (IN_DEV_ARP_NOTIFY(in_dev)) {
file:4d50daab0c3e7824dcf2fc35637ee36d1c031c7c -> file:2ef9026526b84d3ea5a7081d734462bb78efaf0e
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -476,9 +476,8 @@ int ip_fragment(struct sk_buff *skb, int
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frags(skb)) {
- struct sk_buff *frag;
+ struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
- int truesizes = 0;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -491,18 +490,18 @@ int ip_fragment(struct sk_buff *skb, int
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
- truesizes += frag->truesize;
+ skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
@@ -512,7 +511,6 @@ int ip_fragment(struct sk_buff *skb, int
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
@@ -564,6 +562,15 @@ int ip_fragment(struct sk_buff *skb, int
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
file:5b1050a5d874e35ffd608f8e6d0bbb2f1a5204f4 -> file:6c8f6c9e5cdb7de6eb0cf82d1f5588d5ef4bd5c7
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2712,6 +2712,11 @@ slow_output:
EXPORT_SYMBOL_GPL(__ip_route_output_key);
+static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ return NULL;
+}
+
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -2720,7 +2725,7 @@ static struct dst_ops ipv4_dst_blackhole
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
- .check = ipv4_dst_check,
+ .check = ipv4_blackhole_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.entries = ATOMIC_INIT(0),
};
file:f1813bc7108811a50ff8284775ad053623903a2e -> file:734fe94827dc7716e05581ff3013a2c41644fcda
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file,
*/
mask = 0;
- if (sk->sk_err)
- mask = POLLERR;
/*
* POLLHUP is certainly not done right. But poll() doesn't
@@ -451,11 +449,17 @@ unsigned int tcp_poll(struct file *file,
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
- }
+ } else
+ mask |= POLLOUT | POLLWRNORM;
if (tp->urg_data & TCP_URG_VALID)
mask |= POLLPRI;
}
+ /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ smp_rmb();
+ if (sk->sk_err)
+ mask |= POLLERR;
+
return mask;
}
@@ -934,7 +938,7 @@ int tcp_sendmsg(struct kiocb *iocb, stru
goto out_err;
while (--iovlen >= 0) {
- int seglen = iov->iov_len;
+ size_t seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
@@ -1334,6 +1338,7 @@ int tcp_read_sock(struct sock *sk, read_
sk_eat_skb(sk, skb, 0);
if (!desc->count)
break;
+ tp->copied_seq = seq;
}
tp->copied_seq = seq;
@@ -1975,11 +1980,8 @@ adjudge_to_death:
}
}
if (sk->sk_state != TCP_CLOSE) {
- int orphan_count = percpu_counter_read_positive(
- sk->sk_prot->orphan_count);
-
sk_mem_reclaim(sk);
- if (tcp_too_many_orphans(sk, orphan_count)) {
+ if (tcp_too_many_orphans(sk, 0)) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
"sockets\n");
@@ -2880,7 +2882,7 @@ void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long nr_pages, limit;
- int order, i, max_share;
+ int i, max_share, cnt;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -2929,31 +2931,23 @@ void __init tcp_init(void)
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
- /* Try to be a bit smarter and adjust defaults depending
- * on available memory.
- */
- for (order = 0; ((1 << order) << PAGE_SHIFT) <
- (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
- order++)
- ;
- if (order >= 4) {
- tcp_death_row.sysctl_max_tw_buckets = 180000;
- sysctl_tcp_max_orphans = 4096 << (order - 4);
- sysctl_max_syn_backlog = 1024;
- } else if (order < 3) {
- tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
- sysctl_tcp_max_orphans >>= (3 - order);
- sysctl_max_syn_backlog = 128;
- }
+
+ cnt = tcp_hashinfo.ehash_size;
+
+ tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+ sysctl_tcp_max_orphans = cnt / 2;
+ sysctl_max_syn_backlog = max(128, cnt / 256);
/* Set the pressure threshold to be a fraction of global memory that
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
- * memory, with a floor of 128 pages.
+ * memory, with a floor of 128 pages, and a ceiling that prevents an
+ * integer overflow.
*/
nr_pages = totalram_pages - totalhigh_pages;
limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
limit = max(limit, 128UL);
+ limit = min(limit, INT_MAX * 4UL / 3 / 2);
sysctl_tcp_mem[0] = limit / 4 * 3;
sysctl_tcp_mem[1] = limit;
sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
file:2433bcde524cee3826fe14b5d02e240860224359 -> file:ce1ce82e46b1462299c40879a40c3320d6f545fb
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3969,6 +3969,8 @@ static void tcp_reset(struct sock *sk)
default:
sk->sk_err = ECONNRESET;
}
+ /* This barrier is coupled with smp_rmb() in tcp_poll() */
+ smp_wmb();
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
file:cdb2ca7684d4ab0821ca5c1bcc2ad72fc3aa9651 -> file:57d550193df3e9d0433b1ca1e37a6f8716790a0b
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -65,18 +65,18 @@ static void tcp_write_err(struct sock *s
static int tcp_out_of_resources(struct sock *sk, int do_reset)
{
struct tcp_sock *tp = tcp_sk(sk);
- int orphans = percpu_counter_read_positive(&tcp_orphan_count);
+ int shift = 0;
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
- orphans <<= 1;
+ shift++;
/* If some dubious ICMP arrived, penalize even more. */
if (sk->sk_err_soft)
- orphans <<= 1;
+ shift++;
- if (tcp_too_many_orphans(sk, orphans)) {
+ if (tcp_too_many_orphans(sk, shift)) {
if (net_ratelimit())
printk(KERN_INFO "Out of socket memory\n");
file:c322f4429fc91576ac954114658152f9f667630d -> file:31db78ca746985165ac38118aa9ebc7e80850526
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1832,12 +1832,14 @@ void __init udp_init(void)
udp_table_init(&udp_table);
/* Set the pressure threshold up by the same strategy of TCP. It is a
* fraction of global memory that is up to 1/2 at 256 MB, decreasing
- * toward zero with the amount of memory, with a floor of 128 pages.
+ * toward zero with the amount of memory, with a floor of 128 pages,
+ * and a ceiling that prevents an integer overflow.
*/
nr_pages = totalram_pages - totalhigh_pages;
limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
limit = max(limit, 128UL);
+ limit = min(limit, INT_MAX * 4UL / 3 / 2);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
file:74fb2eb833ec16719f5bbdf8ae379007e653abba -> file:e3a9a655e6adc4202c870a9bf7a89e38be31cc6d
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -71,7 +71,7 @@ __xfrm4_find_bundle(struct flowi *fl, st
if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
xdst->u.rt.fl.fl4_src == fl->fl4_src &&
- xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
+ !((xdst->u.rt.fl.fl4_tos ^ fl->fl4_tos) & IPTOS_RT_MASK) &&
xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
dst_clone(dst);
break;
@@ -83,7 +83,7 @@ __xfrm4_find_bundle(struct flowi *fl, st
static int xfrm4_get_tos(struct flowi *fl)
{
- return fl->fl4_tos;
+ return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
}
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
file:cd48801a8d6f465d546b61a2e9629bf6e2091714 -> file:eca3ef77f22760fa4bb0a0fbb05ec293b0409005
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -643,7 +643,7 @@ static int ip6_fragment(struct sk_buff *
if (skb_has_frags(skb)) {
int first_len = skb_pagelen(skb);
- int truesizes = 0;
+ struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -655,18 +655,18 @@ static int ip6_fragment(struct sk_buff *
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
- truesizes += frag->truesize;
}
+ skb->truesize -= frag->truesize;
}
err = 0;
@@ -697,7 +697,6 @@ static int ip6_fragment(struct sk_buff *
first_len = skb_pagelen(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
@@ -760,6 +759,15 @@ static int ip6_fragment(struct sk_buff *
IPSTATS_MIB_FRAGFAILS);
dst_release(&rt->u.dst);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
file:d6fe7646a8ff7d8599c3565e6e6c9deb68732cef -> file:e307517cbddcfa3767f7777b0e69aa55973119e8
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1561,14 +1561,13 @@ out:
* i.e. Path MTU discovery
*/
-void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
- struct net_device *dev, u32 pmtu)
+static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
+ struct net *net, u32 pmtu, int ifindex)
{
struct rt6_info *rt, *nrt;
- struct net *net = dev_net(dev);
int allfrag = 0;
- rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
+ rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
if (rt == NULL)
return;
@@ -1636,6 +1635,27 @@ out:
dst_release(&rt->u.dst);
}
+void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+ struct net_device *dev, u32 pmtu)
+{
+ struct net *net = dev_net(dev);
+
+ /*
+ * RFC 1981 states that a node "MUST reduce the size of the packets it
+ * is sending along the path" that caused the Packet Too Big message.
+ * Since it's not possible in the general case to determine which
+ * interface was used to send the original packet, we update the MTU
+ * on the interface that will be used to send future packets. We also
+ * update the MTU on the interface that received the Packet Too Big in
+ * case the original packet was forced out that interface with
+ * SO_BINDTODEVICE or similar. This is the next best thing to the
+ * correct behaviour, which would be to update the MTU on all
+ * interfaces.
+ */
+ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
+ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+}
+
/*
* Misc support functions
*/
file:dd35641835f408d161a7d1c70550a15f1a0309ac -> file:b6cef980095fe745340dc78ce550ed20985c91e9
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -810,8 +810,8 @@ static int irda_bind(struct socket *sock
err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
if (err < 0) {
- kfree(self->ias_obj->name);
- kfree(self->ias_obj);
+ irias_delete_object(self->ias_obj);
+ self->ias_obj = NULL;
return err;
}
file:294e34d3517cb87cc191c1cfec9d7b39de30245d -> file:f7d6150e0903cb7ad0d873ed22958f0c5f97f9df
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -501,7 +501,8 @@ static void iriap_getvaluebyclass_confir
IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
/* Make sure the string is null-terminated */
- fp[n+value_len] = 0x00;
+ if (n + value_len < skb->len)
+ fp[n + value_len] = 0x00;
IRDA_DEBUG(4, "Got string %s\n", fp+n);
/* Will truncate to IAS_MAX_STRING bytes */
file:315ead3cb926beb93550b9a89f54b8e7497eb504 -> file:cfef331a41d89c2dc986fc8f627966e9cc61f41f
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1101,7 +1101,7 @@ int irlan_extract_param(__u8 *buf, char
memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
le16_to_cpus(&val_len); n+=2;
- if (val_len > 1016) {
+ if (val_len >= 1016) {
IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
return -RSP_INVALID_COMMAND_FORMAT;
}
file:fc1a20565e2d7ab3cbef897ac4ed2d5793189a62 -> file:71cd38c1a67fcfe241215effff78efba06be966c
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -298,6 +298,8 @@ static int irda_extract_string(void *sel
p.pi = pi; /* In case handler needs to know */
p.pl = buf[1]; /* Extract length of value */
+ if (p.pl > 32)
+ p.pl = 32;
IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
p.pi, p.pl);
@@ -318,7 +320,7 @@ static int irda_extract_string(void *sel
(__u8) str[0], (__u8) str[1]);
/* Null terminate string */
- str[p.pl+1] = '\0';
+ str[p.pl] = '\0';
p.pv.c = str; /* Handler will need to take a copy */
file:7aa4fd170104452be2d9daa1194bc313dfd759eb -> file:2da8d14c28ce93eec8f3bbbe16e1f01c3571358e
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -977,7 +977,8 @@ static int llc_ui_setsockopt(struct sock
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- int rc = -EINVAL, opt;
+ unsigned int opt;
+ int rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
file:1374179bc503395533e77c7344ce3a53ab80ac5b -> file:8df3477450c2f78d113ffad5f8e20955191bb104
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1173,7 +1173,8 @@ void *nf_ct_alloc_hashtable(unsigned int
if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
- hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
}
if (hash && nulls)
file:19e98007691cce17b68e3fc29896b7dc3626a526 -> file:5a7dcdf2b1dec4570ade1b2919b65589a5288db5
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1363,7 +1363,7 @@ static int netlink_recvmsg(struct kiocb
struct netlink_sock *nlk = nlk_sk(sk);
int noblock = flags&MSG_DONTWAIT;
size_t copied;
- struct sk_buff *skb, *frag __maybe_unused = NULL;
+ struct sk_buff *skb, *data_skb;
int err;
if (flags&MSG_OOB)
@@ -1375,45 +1375,35 @@ static int netlink_recvmsg(struct kiocb
if (skb == NULL)
goto out;
+ data_skb = skb;
+
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
if (unlikely(skb_shinfo(skb)->frag_list)) {
- bool need_compat = !!(flags & MSG_CMSG_COMPAT);
-
/*
- * If this skb has a frag_list, then here that means that
- * we will have to use the frag_list skb for compat tasks
- * and the regular skb for non-compat tasks.
+ * If this skb has a frag_list, then here that means that we
+ * will have to use the frag_list skb's data for compat tasks
+ * and the regular skb's data for normal (non-compat) tasks.
*
- * The skb might (and likely will) be cloned, so we can't
- * just reset frag_list and go on with things -- we need to
- * keep that. For the compat case that's easy -- simply get
- * a reference to the compat skb and free the regular one
- * including the frag. For the non-compat case, we need to
- * avoid sending the frag to the user -- so assign NULL but
- * restore it below before freeing the skb.
+ * If we need to send the compat skb, assign it to the
+ * 'data_skb' variable so that it will be used below for data
+ * copying. We keep 'skb' for everything else, including
+ * freeing both later.
*/
- if (need_compat) {
- struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
- skb_get(compskb);
- kfree_skb(skb);
- skb = compskb;
- } else {
- frag = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = NULL;
- }
+ if (flags & MSG_CMSG_COMPAT)
+ data_skb = skb_shinfo(skb)->frag_list;
}
#endif
msg->msg_namelen = 0;
- copied = skb->len;
+ copied = data_skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- skb_reset_transport_header(skb);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ skb_reset_transport_header(data_skb);
+ err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
if (msg->msg_name) {
struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1433,11 +1423,7 @@ static int netlink_recvmsg(struct kiocb
}
siocb->scm->creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
- copied = skb->len;
-
-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
- skb_shinfo(skb)->frag_list = frag;
-#endif
+ copied = data_skb->len;
skb_free_datagram(sk, skb);
file:f60c0c2aacba5f1a99fe4a2fe159f43a09b78f07 -> file:519ff9d46e40fb973a192a9cec779f29ae75a286
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -67,6 +67,8 @@ static int pn_socket_create(struct net *
struct phonet_protocol *pnp;
int err;
+ if (!net_eq(net, &init_net))
+ return -EAFNOSUPPORT;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -353,6 +355,8 @@ static int phonet_rcv(struct sk_buff *sk
struct sockaddr_pn sa;
u16 len;
+ if (!net_eq(net, &init_net))
+ goto out;
/* check we have at least a full Phonet header */
if (!pskb_pull(skb, sizeof(struct phonethdr)))
goto out;
file:5f32d217535b4695667dc3e75ea3e0bcaab9a15e -> file:9cdd35e787594c2fb5fe04548a2f471d2f046cce
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -224,12 +224,13 @@ static void pipe_grant_credits(struct so
static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
- struct pnpipehdr *hdr = pnp_hdr(skb);
+ struct pnpipehdr *hdr;
int wake = 0;
if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
+ hdr = pnp_hdr(skb);
if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
(unsigned)hdr->data[0]);
file:5f42f30dd1682bdc2427e7b387390fe231ab305e -> file:5a2275c4ee79d8ad6b4432916af37e73982c92ea
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -246,7 +246,11 @@ static struct notifier_block phonet_devi
/* Per-namespace Phonet devices handling */
static int phonet_init_net(struct net *net)
{
- struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
+ struct phonet_net *pnn;
+
+ if (!net_eq(net, &init_net))
+ return 0;
+ pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
if (!pnn)
return -ENOMEM;
@@ -263,9 +267,13 @@ static int phonet_init_net(struct net *n
static void phonet_exit_net(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn;
struct net_device *dev;
+ if (!net_eq(net, &init_net))
+ return;
+ pnn = net_generic(net, phonet_net_id);
+
rtnl_lock();
for_each_netdev(net, dev)
phonet_device_destroy(dev);
file:d21fd357661022d7b451d26ad93f43301f12b2ac -> file:7acab1ea4fc53ded176566ca91660151a8faddc8
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -68,6 +68,8 @@ static int addr_doit(struct sk_buff *skb
int err;
u8 pnaddr;
+ if (!net_eq(net, &init_net))
+ return -EOPNOTSUPP;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -124,12 +126,16 @@ nla_put_failure:
static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
struct phonet_device_list *pndevs;
struct phonet_device *pnd;
int dev_idx = 0, dev_start_idx = cb->args[0];
int addr_idx = 0, addr_start_idx = cb->args[1];
- pndevs = phonet_device_list(sock_net(skb->sk));
+ if (!net_eq(net, &init_net))
+ goto skip;
+
+ pndevs = phonet_device_list(net);
spin_lock_bh(&pndevs->lock);
list_for_each_entry(pnd, &pndevs->list, list) {
u8 addr;
@@ -154,6 +160,7 @@ static int getaddr_dumpit(struct sk_buff
out:
spin_unlock_bh(&pndevs->lock);
+skip:
cb->args[0] = dev_idx;
cb->args[1] = addr_idx;
file:36790122dfd4c3cb869960ddb2141cd235973e16 -> file:b442a481a12e65b3c284bf0f1883ce5dd75246e1
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -56,30 +56,17 @@ int rds_page_copy_user(struct page *page
unsigned long ret;
void *addr;
- if (to_user)
+ addr = kmap(page);
+ if (to_user) {
rds_stats_add(s_copy_to_user, bytes);
- else
+ ret = copy_to_user(ptr, addr + offset, bytes);
+ } else {
rds_stats_add(s_copy_from_user, bytes);
-
- addr = kmap_atomic(page, KM_USER0);
- if (to_user)
- ret = __copy_to_user_inatomic(ptr, addr + offset, bytes);
- else
- ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
- kunmap_atomic(addr, KM_USER0);
-
- if (ret) {
- addr = kmap(page);
- if (to_user)
- ret = copy_to_user(ptr, addr + offset, bytes);
- else
- ret = copy_from_user(addr + offset, ptr, bytes);
- kunmap(page);
- if (ret)
- return -EFAULT;
+ ret = copy_from_user(addr + offset, ptr, bytes);
}
+ kunmap(page);
- return 0;
+ return ret ? -EFAULT : 0;
}
EXPORT_SYMBOL_GPL(rds_page_copy_user);
file:8dc83d2caa58d02d7036ea697a5fac65f470b564 -> file:6b09b941d3a77b220d88f288844aa15b7efa3362
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -447,7 +447,7 @@ static struct rds_rdma_op *rds_rdma_prep
goto out;
}
- if (args->nr_local > (u64)UINT_MAX) {
+ if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out;
}
file:fdff33c7b432448b43d31ce76fc3fd224156f9ab -> file:6a2654ad3628cfa103f80c40735abd8cb0782f6c
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -296,7 +296,7 @@ static int rds_still_queued(struct rds_s
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
{
struct rds_notifier *notifier;
- struct rds_rdma_notify cmsg;
+ struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
unsigned int count = 0, max_messages = ~0U;
unsigned long flags;
LIST_HEAD(copy);
file:502cce76621d07abdd624798c5c76e3802db63aa -> file:7d188bca2eec2f0533d67d8b6b1880fde0d91d7b
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -677,7 +677,7 @@ static int rose_bind(struct socket *sock
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -737,7 +737,7 @@ static int rose_connect(struct socket *s
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
file:e7f796aec657f90089a4872ab79c831b62bde467 -> file:f9fc6ec1fef66e4c9a445da367b7a3f42a826ca7
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb,
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
- struct tc_gact opt;
struct tcf_gact *gact = a->priv;
+ struct tc_gact opt = {
+ .index = gact->tcf_index,
+ .refcnt = gact->tcf_refcnt - ref,
+ .bindcnt = gact->tcf_bindcnt - bind,
+ .action = gact->tcf_action,
+ };
struct tcf_t t;
- opt.index = gact->tcf_index;
- opt.refcnt = gact->tcf_refcnt - ref;
- opt.bindcnt = gact->tcf_bindcnt - bind;
- opt.action = gact->tcf_action;
NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
#ifdef CONFIG_GACT_PROB
if (gact->tcfg_ptype) {
- struct tc_gact_p p_opt;
- p_opt.paction = gact->tcfg_paction;
- p_opt.pval = gact->tcfg_pval;
- p_opt.ptype = gact->tcfg_ptype;
+ struct tc_gact_p p_opt = {
+ .paction = gact->tcfg_paction,
+ .pval = gact->tcfg_pval,
+ .ptype = gact->tcfg_ptype,
+ };
+
NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
}
#endif
file:b9aaab4e03548a848c4f9bba9b527803fa602157 -> file:5e49286548383d41609ff92b51e0cc1ed2b396c4
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -205,15 +205,16 @@ static int tcf_mirred_dump(struct sk_buf
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mirred *m = a->priv;
- struct tc_mirred opt;
+ struct tc_mirred opt = {
+ .index = m->tcf_index,
+ .action = m->tcf_action,
+ .refcnt = m->tcf_refcnt - ref,
+ .bindcnt = m->tcf_bindcnt - bind,
+ .eaction = m->tcfm_eaction,
+ .ifindex = m->tcfm_ifindex,
+ };
struct tcf_t t;
- opt.index = m->tcf_index;
- opt.action = m->tcf_action;
- opt.refcnt = m->tcf_refcnt - ref;
- opt.bindcnt = m->tcf_bindcnt - bind;
- opt.eaction = m->tcfm_eaction;
- opt.ifindex = m->tcfm_ifindex;
NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
file:d885ba311564d9a766fce6eac37edbaf0ed82b71 -> file:047c234bfabf83f2f1713e465f03b61296e6b125
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -240,7 +240,7 @@ static int tcf_nat(struct sk_buff *skb,
iph->saddr = new_addr;
inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
- 1);
+ 0);
break;
}
default:
@@ -261,40 +261,29 @@ static int tcf_nat_dump(struct sk_buff *
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = a->priv;
- struct tc_nat *opt;
+ struct tc_nat opt = {
+ .old_addr = p->old_addr,
+ .new_addr = p->new_addr,
+ .mask = p->mask,
+ .flags = p->flags,
+
+ .index = p->tcf_index,
+ .action = p->tcf_action,
+ .refcnt = p->tcf_refcnt - ref,
+ .bindcnt = p->tcf_bindcnt - bind,
+ };
struct tcf_t t;
- int s;
- s = sizeof(*opt);
-
- /* netlink spinlocks held above us - must use ATOMIC */
- opt = kzalloc(s, GFP_ATOMIC);
- if (unlikely(!opt))
- return -ENOBUFS;
-
- opt->old_addr = p->old_addr;
- opt->new_addr = p->new_addr;
- opt->mask = p->mask;
- opt->flags = p->flags;
-
- opt->index = p->tcf_index;
- opt->action = p->tcf_action;
- opt->refcnt = p->tcf_refcnt - ref;
- opt->bindcnt = p->tcf_bindcnt - bind;
-
- NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
+ NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
- kfree(opt);
-
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
- kfree(opt);
return -1;
}
file:723964c3ee4ff78c4e442db790d960b261fd04df -> file:e02a4d04d16a61a7232075d9d9582ea285180f22
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -340,22 +340,19 @@ tcf_act_police_dump(struct sk_buff *skb,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = a->priv;
- struct tc_police opt;
+ struct tc_police opt = {
+ .index = police->tcf_index,
+ .action = police->tcf_action,
+ .mtu = police->tcfp_mtu,
+ .burst = police->tcfp_burst,
+ .refcnt = police->tcf_refcnt - ref,
+ .bindcnt = police->tcf_bindcnt - bind,
+ };
- opt.index = police->tcf_index;
- opt.action = police->tcf_action;
- opt.mtu = police->tcfp_mtu;
- opt.burst = police->tcfp_burst;
- opt.refcnt = police->tcf_refcnt - ref;
- opt.bindcnt = police->tcf_bindcnt - bind;
if (police->tcfp_R_tab)
opt.rate = police->tcfp_R_tab->rate;
- else
- memset(&opt.rate, 0, sizeof(opt.rate));
if (police->tcfp_P_tab)
opt.peakrate = police->tcfp_P_tab->rate;
- else
- memset(&opt.peakrate, 0, sizeof(opt.peakrate));
NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
if (police->tcfp_result)
NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
file:8daa1ebc7413b971dafde75606af039d52a7e83e -> file:41c8a445f1f23f616676466246f1b4a2141c5f37
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -163,13 +163,14 @@ static inline int tcf_simp_dump(struct s
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_defact *d = a->priv;
- struct tc_defact opt;
+ struct tc_defact opt = {
+ .index = d->tcf_index,
+ .refcnt = d->tcf_refcnt - ref,
+ .bindcnt = d->tcf_bindcnt - bind,
+ .action = d->tcf_action,
+ };
struct tcf_t t;
- opt.index = d->tcf_index;
- opt.refcnt = d->tcf_refcnt - ref;
- opt.bindcnt = d->tcf_bindcnt - bind;
- opt.action = d->tcf_action;
NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
file:4ab916b8074be28daea9fe9f329b4b3b460132c7 -> file:1df16d8155ef3c431ba6aa4cefe15cf0c0f15fb7
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -147,13 +147,14 @@ static inline int tcf_skbedit_dump(struc
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = a->priv;
- struct tc_skbedit opt;
+ struct tc_skbedit opt = {
+ .index = d->tcf_index,
+ .refcnt = d->tcf_refcnt - ref,
+ .bindcnt = d->tcf_bindcnt - bind,
+ .action = d->tcf_action,
+ };
struct tcf_t t;
- opt.index = d->tcf_index;
- opt.refcnt = d->tcf_refcnt - ref;
- opt.bindcnt = d->tcf_bindcnt - bind;
- opt.action = d->tcf_action;
NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
if (d->flags & SKBEDIT_F_PRIORITY)
NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
file:4ae6aa562f2bbfd6b90e7d40a43eba1ced694ad3 -> file:30280017b05505fe3630b4451cca547309c49fae
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -330,6 +330,24 @@ void netif_carrier_off(struct net_device
}
EXPORT_SYMBOL(netif_carrier_off);
+/**
+ * netif_notify_peers - notify network peers about existence of @dev
+ * @dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netif_notify_peers(struct net_device *dev)
+{
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL(netif_notify_peers);
+
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
file:5cbda8f1ddfd2f4025a66826cb2aa143cb9e6b06 -> file:d494100956af8af2e63a029e39ecd1c638bb9619
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -91,7 +91,6 @@ struct sctp_packet *sctp_packet_config(s
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
- sctp_packet_reset(packet);
packet->vtag = vtag;
if (ecn_capable && sctp_packet_empty(packet)) {
file:612dc878e05c9aaab9970d322e5ba2798f91bd6a -> file:619f96568e8e88bea2ce22186841afe8c8986311
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1157,7 +1157,8 @@ SCTP_STATIC __init int sctp_init(void)
/* Set the pressure threshold to be a fraction of global memory that
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
- * memory, with a floor of 128 pages.
+ * memory, with a floor of 128 pages, and a ceiling that prevents an
+ * integer overflow.
* Note this initalizes the data in sctpv6_prot too
* Unabashedly stolen from tcp_init
*/
@@ -1165,6 +1166,7 @@ SCTP_STATIC __init int sctp_init(void)
limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
limit = max(limit, 128UL);
+ limit = min(limit, INT_MAX * 4UL / 3 / 2);
sysctl_sctp_mem[0] = limit / 4 * 3;
sysctl_sctp_mem[1] = limit;
sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
file:ed84099da26d0e03f1b249082ad38dd9ee9b86c0 -> file:caff26a3653861f3a4bdd28a5a47656e83db34eb
--- a/net/socket.c
+++ b/net/socket.c
@@ -1687,6 +1687,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __
struct iovec iov;
int fput_needed;
+ if (len > INT_MAX)
+ len = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
@@ -1744,6 +1746,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void
int err, err2;
int fput_needed;
+ if (size > INT_MAX)
+ size = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
file:2370ab49dd4b16eed44550cba208885c08b846e3 -> file:4c327008d0d11a454e73c07d8fd4584d5ed84e04
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -717,17 +717,18 @@ gss_pipe_release(struct inode *inode)
struct rpc_inode *rpci = RPC_I(inode);
struct gss_upcall_msg *gss_msg;
+restart:
spin_lock(&inode->i_lock);
- while (!list_empty(&rpci->in_downcall)) {
+ list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
- gss_msg = list_entry(rpci->in_downcall.next,
- struct gss_upcall_msg, list);
+ if (!list_empty(&gss_msg->msg.list))
+ continue;
gss_msg->msg.errno = -EPIPE;
atomic_inc(&gss_msg->count);
__gss_unhash_msg(gss_msg);
spin_unlock(&inode->i_lock);
gss_release_msg(gss_msg);
- spin_lock(&inode->i_lock);
+ goto restart;
}
spin_unlock(&inode->i_lock);
file:27a23785a50d3aea8e4e1e8aa78e8949d9c46c72 -> file:ea1e6de3d6d9a54e68cffc431d0c3e8828d94964
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -47,7 +47,7 @@ static void rpc_purge_list(struct rpc_in
return;
do {
msg = list_entry(head->next, struct rpc_pipe_msg, list);
- list_del(&msg->list);
+ list_del_init(&msg->list);
msg->errno = err;
destroy_msg(msg);
} while (!list_empty(head));
@@ -207,7 +207,7 @@ rpc_pipe_release(struct inode *inode, st
if (msg != NULL) {
spin_lock(&inode->i_lock);
msg->errno = -EAGAIN;
- list_del(&msg->list);
+ list_del_init(&msg->list);
spin_unlock(&inode->i_lock);
rpci->ops->destroy_msg(msg);
}
@@ -267,7 +267,7 @@ rpc_pipe_read(struct file *filp, char __
if (res < 0 || msg->len == msg->copied) {
filp->private_data = NULL;
spin_lock(&inode->i_lock);
- list_del(&msg->list);
+ list_del_init(&msg->list);
spin_unlock(&inode->i_lock);
rpci->ops->destroy_msg(msg);
}
file:fc820cd7545309645c018da6a29d170bc67ad0f2 -> file:065dc6677e90e606205112d0aa846b549c4e2284
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -671,6 +671,7 @@ static int unix_autobind(struct socket *
static u32 ordernum = 1;
struct unix_address *addr;
int err;
+ unsigned int retries = 0;
mutex_lock(&u->readlock);
@@ -696,9 +697,17 @@ retry:
if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
addr->hash)) {
spin_unlock(&unix_table_lock);
- /* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF))
- yield();
+ /*
+ * __unix_find_socket_byname() may take long time if many names
+ * are already in use.
+ */
+ cond_resched();
+ /* Give up if all names seems to be in use. */
+ if (retries++ == 0xFFFFF) {
+ err = -ENOSPC;
+ kfree(addr);
+ goto out;
+ }
goto retry;
}
addr->hash ^= sk->sk_type;
file:0d862482def8e1685549d083263a8fab6187950a -> file:ec9a9d40a6c667ce3b8f5f4ed168508c98d457eb
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -43,10 +43,10 @@ void cfg80211_send_rx_auth(struct net_de
}
}
- WARN_ON(!done);
-
- nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
- cfg80211_sme_rx_auth(dev, buf, len);
+ if (done) {
+ nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
+ cfg80211_sme_rx_auth(dev, buf, len);
+ }
wdev_unlock(wdev);
}
file:e5f92ee758f44226999112603a4f3d59a23f222f -> file:91ec925ebdbfb48f6cadb6043e3616e2477a0362
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -270,6 +270,7 @@ struct cfg80211_bss *cfg80211_get_bss(st
{
struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
struct cfg80211_internal_bss *bss, *res = NULL;
+ unsigned long now = jiffies;
spin_lock_bh(&dev->bss_lock);
@@ -278,6 +279,10 @@ struct cfg80211_bss *cfg80211_get_bss(st
continue;
if (channel && bss->pub.channel != channel)
continue;
+ /* Don't get expired BSS structs */
+ if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
+ !atomic_read(&bss->hold))
+ continue;
if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
res = bss;
kref_get(&res->ref);
file:6a60c5a2eac83401f197aa892a116473e9874046 -> file:62cfc0cf55e2fee476b8cae97ddd2f0bf19171a0
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1358,6 +1358,9 @@ int cfg80211_wext_giwessid(struct net_de
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ data->flags = 0;
+ data->length = 0;
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
file:60fe57761ca94bb5d9faf0638c00d373618301e2 -> file:a2e4c60ac23904da29d40fa05f074dcd490e5618
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -854,6 +854,22 @@ static int ioctl_standard_iw_point(struc
}
}
+ if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+ /*
+ * If this is a GET, but not NOMAX, it means that the extra
+ * data is not bounded by userspace, but by max_tokens. Thus
+ * set the length to max_tokens. This matches the extra data
+ * allocation.
+ * The driver should fill it with the number of tokens it
+ * provided, and it may check iwp->length rather than having
+ * knowledge of max_tokens. If the driver doesn't change the
+ * iwp->length, this ioctl just copies back max_token tokens
+ * filled with zeroes. Hopefully the driver isn't claiming
+ * them to be valid data.
+ */
+ iwp->length = descr->max_tokens;
+ }
+
err = handler(dev, info, (union iwreq_data *) iwp, extra);
iwp->length += essid_compat;
@@ -1013,7 +1029,7 @@ static int ioctl_private_iw_point(struct
} else if (!iwp->pointer)
return -EFAULT;
- extra = kmalloc(extra_size, GFP_KERNEL);
+ extra = kzalloc(extra_size, GFP_KERNEL);
if (!extra)
return -ENOMEM;
file:7fa9c7ad3d3b5b7efbe4c612511425628900a138 -> file:d00681646d1de54b31101ae8bbab2a0a209cc4fa
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -81,6 +81,41 @@ struct compat_x25_subscrip_struct {
};
#endif
+
+int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr)
+{
+ unsigned char len;
+ int needed;
+ int rc;
+
+ if (skb->len < 1) {
+ /* packet has no address block */
+ rc = 0;
+ goto empty;
+ }
+
+ len = *skb->data;
+ needed = 1 + (len >> 4) + (len & 0x0f);
+
+ if (skb->len < needed) {
+ /* packet is too short to hold the addresses it claims
+ to hold */
+ rc = -1;
+ goto empty;
+ }
+
+ return x25_addr_ntoa(skb->data, called_addr, calling_addr);
+
+empty:
+ *called_addr->x25_addr = 0;
+ *calling_addr->x25_addr = 0;
+
+ return rc;
+}
+
+
int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
struct x25_address *calling_addr)
{
@@ -871,16 +906,26 @@ int x25_rx_call_request(struct sk_buff *
/*
* Extract the X.25 addresses and convert them to ASCII strings,
* and remove them.
+ *
+ * Address block is mandatory in call request packets
*/
- addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
+ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
+ if (addr_len <= 0)
+ goto out_clear_request;
skb_pull(skb, addr_len);
/*
* Get the length of the facilities, skip past them for the moment
* get the call user data because this is needed to determine
* the correct listener
+ *
+ * Facilities length is mandatory in call request packets
*/
+ if (skb->len < 1)
+ goto out_clear_request;
len = skb->data[0] + 1;
+ if (skb->len < len)
+ goto out_clear_request;
skb_pull(skb,len);
/*
file:a21f6646eb3a85cd1275708ba53960ed085402da -> file:804afd3a1f7a3c1833c914f6c9c6addde04dbd57
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
{
unsigned char *p = skb->data;
- unsigned int len = *p++;
+ unsigned int len;
*vc_fac_mask = 0;
@@ -50,9 +50,19 @@ int x25_parse_facilities(struct sk_buff
memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
+ if (skb->len < 1)
+ return 0;
+
+ len = *p++;
+
+ if (len >= skb->len)
+ return -1;
+
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
+ if (len < 2)
+ return 0;
switch (*p) {
case X25_FAC_REVERSE:
if((p[1] & 0x81) == 0x81) {
@@ -96,6 +106,8 @@ int x25_parse_facilities(struct sk_buff
len -= 2;
break;
case X25_FAC_CLASS_B:
+ if (len < 3)
+ return 0;
switch (*p) {
case X25_FAC_PACKET_SIZE:
facilities->pacsize_in = p[1];
@@ -117,6 +129,8 @@ int x25_parse_facilities(struct sk_buff
len -= 3;
break;
case X25_FAC_CLASS_C:
+ if (len < 4)
+ return 0;
printk(KERN_DEBUG "X.25: unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
@@ -124,26 +138,26 @@ int x25_parse_facilities(struct sk_buff
len -= 4;
break;
case X25_FAC_CLASS_D:
+ if (len < p[1] + 2)
+ return 0;
switch (*p) {
case X25_FAC_CALLING_AE:
- if (p[1] > X25_MAX_DTE_FACIL_LEN)
- break;
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return 0;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
- if (p[1] > X25_MAX_DTE_FACIL_LEN)
- break;
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return 0;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
break;
default:
printk(KERN_DEBUG "X.25: unknown facility %02X,"
- "length %d, values %02X, %02X, "
- "%02X, %02X\n",
- p[0], p[1], p[2], p[3], p[4], p[5]);
+ "length %d\n", p[0], p[1]);
break;
}
len -= p[1] + 2;
@@ -247,6 +261,8 @@ int x25_negotiate_facilities(struct sk_b
memcpy(new, ours, sizeof(*new));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ if (len < 0)
+ return len;
/*
* They want reverse charging, we won't accept it.
file:7d7c3abf38b573fa64f1bb35026dc5bf80cdcd3c -> file:88d7652cf166be30c583e40ff3c81a923478447b
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -89,6 +89,7 @@ static int x25_queue_rx_frame(struct soc
static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct x25_address source_addr, dest_addr;
+ int len;
switch (frametype) {
case X25_CALL_ACCEPTED: {
@@ -106,11 +107,19 @@ static int x25_state1_machine(struct soc
* Parse the data in the frame.
*/
skb_pull(skb, X25_STD_MIN_LEN);
- skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
- skb_pull(skb,
- x25_parse_facilities(skb, &x25->facilities,
+
+ len = x25_parse_address_block(skb, &source_addr,
+ &dest_addr);
+ if (len > 0)
+ skb_pull(skb, len);
+
+ len = x25_parse_facilities(skb, &x25->facilities,
&x25->dte_facilities,
- &x25->vc_facil_mask));
+ &x25->vc_facil_mask);
+ if (len > 0)
+ skb_pull(skb, len);
+ else
+ return -1;
/*
* Copy any Call User Data.
*/
file:67d59c7a18dc57b1580a833ed37e832eff227c85 -> file:5325423ceab483833c8f09efb37a336aaa8f9588
--- a/scripts/mkmakefile
+++ b/scripts/mkmakefile
@@ -44,7 +44,9 @@ all:
Makefile:;
-\$(all) %/: all
+\$(all): all
@:
+%/: all
+ @:
EOF
file:1cad4c7b1c337ba9e3a4db0066a7776937f12c44 -> file:b0bd910b1de7e0a4dc4e4db20dd51fec7118310e
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1259,6 +1259,7 @@ long keyctl_session_to_parent(void)
keyring_r = NULL;
me = current;
+ rcu_read_lock();
write_lock_irq(&tasklist_lock);
parent = me->real_parent;
@@ -1291,7 +1292,8 @@ long keyctl_session_to_parent(void)
goto not_permitted;
/* the keyrings must have the same UID */
- if (pcred ->tgcred->session_keyring->uid != mycred->euid ||
+ if ((pcred->tgcred->session_keyring &&
+ pcred->tgcred->session_keyring->uid != mycred->euid) ||
mycred->tgcred->session_keyring->uid != mycred->euid)
goto not_permitted;
@@ -1313,6 +1315,7 @@ long keyctl_session_to_parent(void)
set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
if (oldcred)
put_cred(oldcred);
return 0;
@@ -1321,6 +1324,7 @@ already_same:
ret = 0;
not_permitted:
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
put_cred(cred);
return ret;
file:a8b7fabe645ef1d0bf7fc82f4d15e4cd7024b1ed -> file:7834a5438f756b8aab0efd6fe77e93bb46820a1f
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -31,6 +31,7 @@
/* max number of user-defined controls */
#define MAX_USER_CONTROLS 32
+#define MAX_CONTROL_COUNT 1028
struct snd_kctl_ioctl {
struct list_head list; /* list of all ioctls */
@@ -190,6 +191,10 @@ static struct snd_kcontrol *snd_ctl_new(
if (snd_BUG_ON(!control || !control->count))
return NULL;
+
+ if (control->count > MAX_CONTROL_COUNT)
+ return NULL;
+
kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
if (kctl == NULL) {
snd_printk(KERN_ERR "Cannot allocate control instance\n");
file:7ba779d9e2e797c55e40d629c23e7fc67c2779f9 -> file:e6d2d974d23143e42b24fbd1fff56f6cc2e23eb6
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -972,6 +972,10 @@ static int snd_pcm_do_pause(struct snd_p
{
if (substream->runtime->trigger_master != substream)
return 0;
+ /* some drivers might use hw_ptr to recover from the pause -
+ update the hw_ptr now */
+ if (push)
+ snd_pcm_update_hw_ptr(substream);
/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
* a delta betwen the current jiffies, this gives a large enough
* delta, effectively to skip the check once.
file:70d6f25ba526147d50148cd04fff2f9bf612d0dd -> file:e4c12a1ee30448b78df95e17aedda87832b52314
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -530,13 +530,15 @@ static int snd_rawmidi_release(struct in
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi *rmidi;
+ struct module *module;
rfile = file->private_data;
rmidi = rfile->rmidi;
rawmidi_release_priv(rfile);
kfree(rfile);
+ module = rmidi->card->module;
snd_card_file_remove(rmidi->card, file);
- module_put(rmidi->card->module);
+ module_put(module);
return 0;
}
file:d0d721c22eacca6d4379dab12f9fc4dd57238eaa -> file:1f133fe4228bba74ecaf4fccef895b470650549d
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -280,13 +280,10 @@ snd_seq_oss_open(struct file *file, int
return 0;
_error:
- snd_seq_oss_writeq_delete(dp->writeq);
- snd_seq_oss_readq_delete(dp->readq);
snd_seq_oss_synth_cleanup(dp);
snd_seq_oss_midi_cleanup(dp);
- delete_port(dp);
delete_seq_queue(dp->queue);
- kfree(dp);
+ delete_port(dp);
return rc;
}
@@ -349,8 +346,10 @@ create_port(struct seq_oss_devinfo *dp)
static int
delete_port(struct seq_oss_devinfo *dp)
{
- if (dp->port < 0)
+ if (dp->port < 0) {
+ kfree(dp);
return 0;
+ }
debug_printk(("delete_port %i\n", dp->port));
return snd_seq_event_port_detach(dp->cseq, dp->port);
file:168af67d938e1415993b29eb166763327ec0048b -> file:92626f3aab2dbc9f4c0c7b16f098056f25854ba1
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS]
static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
static int enable_ir[SNDRV_CARDS];
static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
+static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL
MODULE_PARM_DESC(enable_ir, "Enable IR.");
module_param_array(subsystem, uint, NULL, 0444);
MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
+module_param_array(delay_pcm_irq, uint, NULL, 0444);
+MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
/*
* Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
*/
@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_pr
&emu)) < 0)
goto error;
card->private_data = emu;
+ emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
goto error;
if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
file:55b83ef73c630e83b2d098a45cce1f570923df7f -> file:622bace148e3c4e5e4de3efa1936b56245004c69
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(s
evoice->epcm->ccca_start_addr = start_addr + ccis;
if (extra) {
start_addr += ccis;
- end_addr += ccis;
+ end_addr += ccis + emu->delay_pcm_irq;
}
if (stereo && !extra) {
snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(s
/* Assumption that PT is already 0 so no harm overwriting */
snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
- snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
+ snd_emu10k1_ptr_write(emu, PSST, voice,
+ (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
+ (send_amount[2] << 24));
if (emu->card_capabilities->emu_model)
pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
else
@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_vo
snd_emu10k1_ptr_write(emu, IP, voice, 0);
}
+static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
+ struct snd_emu10k1_pcm *epcm,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_runtime *runtime)
+{
+ unsigned int ptr, period_pos;
+
+ /* try to sychronize the current position for the interrupt
+ source voice */
+ period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
+ period_pos %= runtime->period_size;
+ ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
+ ptr &= ~0x00ffffff;
+ ptr |= epcm->ccca_start_addr + period_pos;
+ snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
+}
+
static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(
/* follow thru */
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
+ if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
+ snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
mix = &emu->pcm_mixer[substream->number];
snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_pla
#endif
/*
printk(KERN_DEBUG
- "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
- ptr, runtime->buffer_size, runtime->period_size);
+ "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
+ (long)ptr, (long)runtime->buffer_size,
+ (long)runtime->period_size);
*/
return ptr;
}
file:6a47672f930aedfe67163ce56a0e5a726470e734 -> file:7c3ce5f379f2ba5b5023170748119a6b0033e763
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -309,8 +309,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10
if (snd_BUG_ON(!hdr))
return NULL;
+ idx = runtime->period_size >= runtime->buffer_size ?
+ (emu->delay_pcm_irq * 2) : 0;
mutex_lock(&hdr->block_mutex);
- blk = search_empty(emu, runtime->dma_bytes);
+ blk = search_empty(emu, runtime->dma_bytes + idx);
if (blk == NULL) {
mutex_unlock(&hdr->block_mutex);
return NULL;
file:bd0794e09a8947d6bfc6f298501eba5ee35040e9 -> file:3736bc4fda599bd352a68846203c3a5bf8f520f7
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3510,6 +3510,7 @@ static struct snd_pci_quirk ad1984_cfg_t
/* Lenovo Thinkpad T61/X61 */
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
+ SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
{}
};
file:aa2ec237c197aee11e1401a9bf45265415199ffd -> file:bc91a80d817b5259f95d0630dbee1543fdbb5a28
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6455,6 +6455,7 @@ static int patch_alc260(struct hda_codec
spec->stream_analog_playback = &alc260_pcm_analog_playback;
spec->stream_analog_capture = &alc260_pcm_analog_capture;
+ spec->stream_analog_alt_capture = &alc260_pcm_analog_capture;
spec->stream_digital_playback = &alc260_pcm_digital_playback;
spec->stream_digital_capture = &alc260_pcm_digital_capture;
@@ -6588,7 +6589,7 @@ static struct hda_input_mux alc883_lenov
.num_items = 4,
.items = {
{ "Mic", 0x0 },
- { "iMic", 0x1 },
+ { "Int Mic", 0x1 },
{ "Line", 0x2 },
{ "CD", 0x4 },
},
@@ -8037,8 +8038,8 @@ static struct snd_kcontrol_new alc883_le
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("iMic Playback Volume", 0x0b, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("iMic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+ HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+ HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
{ } /* end */
};
@@ -12388,6 +12389,9 @@ static int alc268_new_analog_output(stru
dac = 0x02;
break;
case 0x15:
+ case 0x1a: /* ALC259/269 only */
+ case 0x1b: /* ALC259/269 only */
+ case 0x21: /* ALC269vb has this pin, too */
dac = 0x03;
break;
default:
@@ -17256,6 +17260,8 @@ static inline hda_nid_t alc662_mix_to_da
return 0x02;
else if (nid >= 0x0c && nid <= 0x0e)
return nid - 0x0c + 0x02;
+ else if (nid == 0x26) /* ALC887-VD has this DAC too */
+ return 0x25;
else
return 0;
}
@@ -17264,7 +17270,7 @@ static inline hda_nid_t alc662_mix_to_da
static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
hda_nid_t dac)
{
- hda_nid_t mix[4];
+ hda_nid_t mix[5];
int i, num;
num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
file:fee411b43ef6849d8673544360bb5af8240e3ed4 -> file:a280a68293d377d86e913cdec87b6d134d07e463
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1602,6 +1602,8 @@ static struct snd_pci_quirk stac92hd73xx
static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
"Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
+ "Alienware M17x", STAC_ALIENWARE_M17X),
{} /* terminator */
};
file:aac20fb4aad23e0592cc658e45ea3cc6ced4c607 -> file:1a76e63a1310d1983da66fe3dc3461adcf3385d5
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] _
},
{
.subvendor = 0x1014,
+ .subdevice = 0x0534,
+ .name = "ThinkPad X31",
+ .type = AC97_TUNE_INV_EAPD
+ },
+ {
+ .subvendor = 0x1014,
.subdevice = 0x1f00,
.name = "MS-9128",
.type = AC97_TUNE_ALC_JACK
@@ -1859,6 +1865,12 @@ static struct ac97_quirk ac97_quirks[] _
.type = AC97_TUNE_HP_ONLY
},
{
+ .subvendor = 0x1028,
+ .subdevice = 0x0182,
+ .name = "Dell Latitude D610", /* STAC9750/51 */
+ .type = AC97_TUNE_HP_ONLY
+ },
+ {
.subvendor = 0x1028,
.subdevice = 0x0186,
.name = "Dell Latitude D810", /* cf. Malone #41015 */
file:72db4c39007fbc8bd8739819a580c0b2b2da48a8 -> file:6811433a5f96ebcee3861d6effcc1d05ef695b78
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -393,6 +393,10 @@ static int __devinit get_oxygen_model(st
chip->model.suspend = claro_suspend;
chip->model.resume = claro_resume;
chip->model.set_adc_params = set_ak5385_params;
+ chip->model.device_config = PLAYBACK_0_TO_I2S |
+ PLAYBACK_1_TO_SPDIF |
+ CAPTURE_0_FROM_I2S_2 |
+ CAPTURE_1_FROM_SPDIF;
break;
}
if (id->driver_data == MODEL_MERIDIAN ||
file:b5ca02e2038c9bb42c35c8a194c57974e15b6686 -> file:246b7c66eb0df2db2c3011a635c0cb950e2cd5f1
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct c
firmware.firmware.ASIC, firmware.firmware.CODEC,
firmware.firmware.AUXDSP, firmware.firmware.PROG);
+ if (!chip)
+ return 1;
+
for (i = 0; i < FIRMWARE_VERSIONS; i++) {
if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
- break;
- }
- if (i >= FIRMWARE_VERSIONS)
- return 0; /* no match */
+ return 1; /* OK */
- if (!chip)
- return 1; /* OK */
+ }
snd_printdd("Writing Firmware\n");
if (!chip->fw_entry) {
file:7bb827c7d8061bfea72e0c2579015f8668bbebc9 -> file:401518c8c86dcd17c27ac660954524e008ed37fd
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4610,6 +4610,7 @@ static int snd_hdsp_hwdep_ioctl(struct s
if (err < 0)
return err;
+ memset(&info, 0, sizeof(info));
spin_lock_irqsave(&hdsp->lock, flags);
info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
file:0dce331a2a3b105e565e7deaa315fdb39c575d4d -> file:ec2125caa640151c9652e0aea0c4a32425b26e4a
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct
case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
+ memset(&info, 0, sizeof(info));
spin_lock_irq(&hdspm->lock);
info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
file:6bded8c78150d7f64c52cf7cc4eca8fd89df0866 -> file:c1e767d2e2204ca393cd60223c2013a1123d6d84
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -268,9 +268,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_
SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0),
SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
-SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0),
-SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0),
-SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0),
+SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
+SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
+SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0),
SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
file:8286c62e4df3051fe06d00073f5f861765e42f92 -> file:38a53a689acaa757600e4ffea134a41596c16d75
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -177,13 +177,6 @@ static int wm8776_set_fmt(struct snd_soc
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0001;
break;
- /* FIXME: CHECK A/B */
- case SND_SOC_DAIFMT_DSP_A:
- iface |= 0x0003;
- break;
- case SND_SOC_DAIFMT_DSP_B:
- iface |= 0x0007;
- break;
default:
return -EINVAL;
}
file:a31a8cd67570384be6b622c8e643c5f46a79d566 -> file:3c6d14132ff091a19fc3706402972eae6cc62075
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -49,6 +49,7 @@ static inline void callchain_init(struct
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->val);
+ node->children_hit = 0;
node->parent = NULL;
node->hit = 0;
}