Move source tree closer to the 2.6.32.17 mainline

file:2c48f945546b4dc3f6ebfa0463687445429ad5cb -> file:5c86fe95555ab468d044259d41b8cbd0a1d687ab
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -176,7 +176,6 @@ read the file /proc/PID/status:
CapBnd: ffffffffffffffff
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1
- Stack usage: 12 kB
This shows you nearly the same information you would get if you viewed it with
the ps command. In fact, ps uses the proc file system to obtain its
@@ -230,7 +229,6 @@ Table 1-2: Contents of the statm files (
Mems_allowed_list Same as previous, but in "list format"
voluntary_ctxt_switches number of voluntary context switches
nonvoluntary_ctxt_switches number of non voluntary context switches
- Stack usage: stack usage high water mark (round up to page size)
..............................................................................
Table 1-3: Contents of the statm files (as of 2.6.8-rc3)
@@ -309,7 +307,7 @@ address perms offset dev in
08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4]
+a7cb2000-a7eb2000 rw-p 00000000 00:00 0
a7eb2000-a7eb3000 ---p 00000000 00:00 0
a7eb3000-a7ed5000 rw-p 00000000 00:00 0
a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
@@ -345,7 +343,6 @@ is not associated with a file:
[stack] = the stack of the main process
[vdso] = the "virtual dynamic shared object",
the kernel system call handler
- [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size
or if empty, the mapping is anonymous.
file:3015da0c6b2a253c4a1b65559a8a8ec82b24e3e9 -> file:fe09a2cb1858de038b39746bdc862af3f6dc21bc
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA
all files in that instance (if CONFIG_NUMA is enabled) - which can be
adjusted on the fly via 'mount -o remount ...'
-mpol=default prefers to allocate memory from the local node
+mpol=default use the process allocation policy
+ (see set_mempolicy(2))
mpol=prefer:Node prefers to allocate memory from the given Node
mpol=bind:NodeList allocates memory only from nodes in NodeList
mpol=interleave prefers to allocate from each node in turn
mpol=interleave:NodeList allocates from each node of NodeList in turn
+mpol=local prefers to allocate memory from the local node
NodeList format is a comma-separated list of decimal numbers and ranges,
a range being two hyphen-separated decimal numbers, the smallest and
@@ -134,3 +136,5 @@ Author:
Christoph Rohland <cr@sap.com>, 1.12.01
Updated:
Hugh Dickins, 4 June 2007
+Updated:
+ KOSAKI Motohiro, 16 Mar 2010
file:02838a47d86210fa8bee469f595c94851594bd44 -> file:86b5880d8502b123e508c94330e61ea255db1292
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245
@@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage a
in7_min_alarm 3v output undervoltage alarm
in8_min_alarm Vee (-12v) output undervoltage alarm
-in9_input GPIO #1 voltage data
-in10_input GPIO #2 voltage data
-in11_input GPIO #3 voltage data
+in9_input GPIO voltage data
power1_input 12v power usage (mW)
power2_input 5v power usage (mW)
file:81c0c59a60eae20da2bd33a8037fdceb002f7fa7 -> file:e1bb5b261693ead30b736a214c969b20e833f2bc
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,7 +15,8 @@ Supported adapters:
* Intel 82801I (ICH9)
* Intel EP80579 (Tolapai)
* Intel 82801JI (ICH10)
- * Intel PCH
+ * Intel 3400/5 Series (PCH)
+ * Intel Cougar Point (PCH)
Datasheets: Publicly available at the Intel website
Authors:
file:5bc4eaaa5b7fb302d51548c385b25ccd5569c4b3 -> file:5f6aa11fb457b9e36610a1e4a5fd2cd2cd1d6768
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -241,7 +241,7 @@ and is between 256 and 4096 characters.
acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
- old_ordering, s4_nonvs }
+ old_ordering, s4_nonvs, sci_force_enable }
See Documentation/power/video.txt for information on
s3_bios and s3_mode.
s3_beep is for debugging; it makes the PC's speaker beep
@@ -254,6 +254,9 @@ and is between 256 and 4096 characters.
of _PTS is used by default).
s4_nonvs prevents the kernel from saving/restoring the
ACPI NVS memory during hibernation.
+ sci_force_enable causes the kernel to set SCI_EN directly
+ on resume from S1/S3 (which is against the ACPI spec,
+ but some broken systems don't work without it).
acpi_use_timer_override [HW,ACPI]
Use timer override. For some broken Nvidia NF5 boards
@@ -2668,6 +2671,13 @@ and is between 256 and 4096 characters.
medium is write-protected).
Example: quirks=0419:aaf5:rl,0421:0433:rc
+ userpte=
+ [X86] Flags controlling user PTE allocations.
+
+ nohigh = do not allocate PTE pages in
+ HIGHMEM regardless of setting
+ of CONFIG_HIGHPTE.
+
vdso= [X86,SH]
vdso=2: enable compat VDSO (default with COMPAT_VDSO)
vdso=1: enable VDSO (default)
file:aafcaa6341915bb001c27d1bd05006add6606224 -> file:387eb9c6bf5d26979181b633b34b64bc8265bd7a
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -460,6 +460,8 @@ event code Key Notes
For Lenovo ThinkPads with a new
BIOS, it has to be handled either
by the ACPI OSI, or by userspace.
+ The driver does the right thing,
+ never mess with this.
0x1011 0x10 FN+END Brightness down. See brightness
up for details.
@@ -582,46 +584,15 @@ with hotkey_report_mode.
Brightness hotkey notes:
-These are the current sane choices for brightness key mapping in
-thinkpad-acpi:
+Don't mess with the brightness hotkeys in a Thinkpad. If you want
+notifications for OSD, use the sysfs backlight class event support.
-For IBM and Lenovo models *without* ACPI backlight control (the ones on
-which thinkpad-acpi will autoload its backlight interface by default,
-and on which ACPI video does not export a backlight interface):
-
-1. Don't enable or map the brightness hotkeys in thinkpad-acpi, as
- these older firmware versions unfortunately won't respect the hotkey
- mask for brightness keys anyway, and always reacts to them. This
- usually work fine, unless X.org drivers are doing something to block
- the BIOS. In that case, use (3) below. This is the default mode of
- operation.
-
-2. Enable the hotkeys, but map them to something else that is NOT
- KEY_BRIGHTNESS_UP/DOWN or any other keycode that would cause
- userspace to try to change the backlight level, and use that as an
- on-screen-display hint.
-
-3. IF AND ONLY IF X.org drivers find a way to block the firmware from
- automatically changing the brightness, enable the hotkeys and map
- them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN, and feed that to
- something that calls xbacklight. thinkpad-acpi will not be able to
- change brightness in that case either, so you should disable its
- backlight interface.
-
-For Lenovo models *with* ACPI backlight control:
-
-1. Load up ACPI video and use that. ACPI video will report ACPI
- events for brightness change keys. Do not mess with thinkpad-acpi
- defaults in this case. thinkpad-acpi should not have anything to do
- with backlight events in a scenario where ACPI video is loaded:
- brightness hotkeys must be disabled, and the backlight interface is
- to be kept disabled as well. This is the default mode of operation.
-
-2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
- and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN. Process
- these keys on userspace somehow (e.g. by calling xbacklight).
- The driver will do this automatically if it detects that ACPI video
- has been disabled.
+The driver will issue KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN events
+automatically for the cases were userspace has to do something to
+implement brightness changes. When you override these events, you will
+either fail to handle properly the ThinkPads that require explicit
+action to change backlight brightness, or the ThinkPads that require
+that no action be taken to work properly.
Bluetooth
@@ -679,6 +650,10 @@ LCD, CRT or DVI (if available). The foll
echo expand_toggle > /proc/acpi/ibm/video
echo video_switch > /proc/acpi/ibm/video
+NOTE: Access to this feature is restricted to processes owning the
+CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
+enough with some versions of X.org to crash it.
+
Each video output device can be enabled or disabled individually.
Reading /proc/acpi/ibm/video shows the status of each device.
@@ -1465,3 +1440,5 @@ Sysfs interface changelog:
and it is always able to disable hot keys. Very old
thinkpads are properly supported. hotkey_bios_mask
is deprecated and marked for removal.
+
+0x020600: Marker for backlight change event support.
file:0643e3b7168cccb44acf65735c71beb5e273845c -> file:3c45d5dcd63b692c9719d0d37a37b1c809bfa7d4
--- a/Documentation/networking/3c509.txt
+++ b/Documentation/networking/3c509.txt
@@ -48,11 +48,11 @@ for LILO parameters for doing this:
This configures the first found 3c509 card for IRQ 10, base I/O 0x310, and
transceiver type 3 (10base2). The flag "0x3c509" must be set to avoid conflicts
with other card types when overriding the I/O address. When the driver is
-loaded as a module, only the IRQ and transceiver setting may be overridden.
-For example, setting two cards to 10base2/IRQ10 and AUI/IRQ11 is done by using
-the xcvr and irq module options:
+loaded as a module, only the IRQ may be overridden. For example,
+setting two cards to IRQ10 and IRQ11 is done by using the irq module
+option:
- options 3c509 xcvr=3,1 irq=10,11
+ options 3c509 irq=10,11
(2) Full-duplex mode
@@ -77,6 +77,8 @@ operation.
itself full-duplex capable. This is almost certainly one of two things: a full-
duplex-capable Ethernet switch (*not* a hub), or a full-duplex-capable NIC on
another system that's connected directly to the 3c509B via a crossover cable.
+
+Full-duplex mode can be enabled using 'ethtool'.
/////Extremely important caution concerning full-duplex mode/////
Understand that the 3c509B's hardware's full-duplex support is much more
@@ -113,6 +115,8 @@ This insured that merely upgrading the d
never automatically enable full-duplex mode in an existing installation;
it must always be explicitly enabled via one of these code in order to be
activated.
+
+The transceiver type can be changed using 'ethtool'.
(4a) Interpretation of error messages and common problems
file:25b1da9a5035469fcf4355e80082f3dd7af586c8 -> file:42ae29b288a1a93155da39ec4e823fca734fba2e
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -64,8 +64,6 @@ static inline unsigned long __lbus_to_vi
(dma_addr_t)page_to_phys(page); \
})
-#define __arch_dma_to_page(dev, addr) phys_to_page(addr)
-
#endif /* CONFIG_ARCH_IOP13XX */
#endif /* !ASSEMBLY */
file:ffa19aae6e054676f039b4691b946966205597b7 -> file:76e5308685a40e81e45dcc42c66312ef3e0f4d1f
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -41,13 +41,6 @@ extern struct bus_type platform_bus_type
__dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \
__dma; })
-#define __arch_dma_to_page(dev, x) \
- ({ dma_addr_t __dma = x; \
- if (!is_lbus_device(dev)) \
- __dma += PHYS_OFFSET - KS8695_PCIMEM_PA; \
- phys_to_page(__dma); \
- })
-
#endif
#endif
file:e34d96a825e3a60259a48cbd6dd92a3f79499881 -> file:6879cfec2c7b8cd28fe8d825d87aab3112cd1ad9
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -37,6 +37,10 @@
#define SYSTEM_REV_S_USES_VAUX3 0x8
static int board_keymap[] = {
+ /*
+ * Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
+ * connected to the ground" matrix state.
+ */
KEY(0, 0, KEY_Q),
KEY(0, 1, KEY_O),
KEY(0, 2, KEY_P),
@@ -44,6 +48,7 @@ static int board_keymap[] = {
KEY(0, 4, KEY_BACKSPACE),
KEY(0, 6, KEY_A),
KEY(0, 7, KEY_S),
+
KEY(1, 0, KEY_W),
KEY(1, 1, KEY_D),
KEY(1, 2, KEY_F),
@@ -52,6 +57,7 @@ static int board_keymap[] = {
KEY(1, 5, KEY_J),
KEY(1, 6, KEY_K),
KEY(1, 7, KEY_L),
+
KEY(2, 0, KEY_E),
KEY(2, 1, KEY_DOT),
KEY(2, 2, KEY_UP),
@@ -59,6 +65,8 @@ static int board_keymap[] = {
KEY(2, 5, KEY_Z),
KEY(2, 6, KEY_X),
KEY(2, 7, KEY_C),
+ KEY(2, 8, KEY_F9),
+
KEY(3, 0, KEY_R),
KEY(3, 1, KEY_V),
KEY(3, 2, KEY_B),
@@ -67,20 +75,23 @@ static int board_keymap[] = {
KEY(3, 5, KEY_SPACE),
KEY(3, 6, KEY_SPACE),
KEY(3, 7, KEY_LEFT),
+
KEY(4, 0, KEY_T),
KEY(4, 1, KEY_DOWN),
KEY(4, 2, KEY_RIGHT),
KEY(4, 4, KEY_LEFTCTRL),
KEY(4, 5, KEY_RIGHTALT),
KEY(4, 6, KEY_LEFTSHIFT),
+ KEY(4, 8, KEY_F10),
+
KEY(5, 0, KEY_Y),
+ KEY(5, 8, KEY_F11),
+
KEY(6, 0, KEY_U),
+
KEY(7, 0, KEY_I),
KEY(7, 1, KEY_F7),
KEY(7, 2, KEY_F8),
- KEY(0xff, 2, KEY_F9),
- KEY(0xff, 4, KEY_F10),
- KEY(0xff, 5, KEY_F11),
};
static struct matrix_keymap_data board_map_data = {
file:811743c5614769d5fb283201fe77e5c606633d98 -> file:5f2ba8d9015cfce31a02f901e7f41742dd1e1fa9
--- a/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/arch/arm/mach-pxa/include/mach/colibri.h
@@ -2,6 +2,7 @@
#define _COLIBRI_H_
#include <net/ax88796.h>
+#include <mach/mfp.h>
/*
* common settings for all modules
file:c48e1f2c3349492472d02316fb77f0cb7f29c8ed -> file:6727c783e0d6f70ab1265eb7674bf3a11309a806
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -18,6 +18,7 @@ config REALVIEW_EB_ARM11MP
bool "Support ARM11MPCore tile"
depends on MACH_REALVIEW_EB
select CPU_V6
+ select ARCH_HAS_BARRIERS if SMP
help
Enable support for the ARM11MPCore tile on the Realview platform.
@@ -35,6 +36,7 @@ config MACH_REALVIEW_PB11MP
select CPU_V6
select ARM_GIC
select HAVE_PATA_PLATFORM
+ select ARCH_HAS_BARRIERS if SMP
help
Include support for the ARM(R) RealView MPCore Platform Baseboard.
PB11MPCore is a platform with an on-board ARM11MPCore and has
file:0c5d749d7b5f17f8ad7241506a90dc8dc2d63a89(new)
--- /dev/null
+++ b/arch/arm/mach-realview/include/mach/barriers.h
@@ -0,0 +1,8 @@
+/*
+ * Barriers redefined for RealView ARM11MPCore platforms with L220 cache
+ * controller to work around hardware errata causing the outer_sync()
+ * operation to deadlock the system.
+ */
+#define mb() dsb()
+#define rmb() dmb()
+#define wmb() mb()
file:3325f7b49eaa23766d3ea9208230b50972af7226 -> file:9ad41dc484c17ff2da3449d71df2ad7a925b4701
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -68,13 +68,6 @@
__dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \
__dma; })
-#define __arch_dma_to_page(dev, addr) \
- ({ dma_addr_t __dma = addr; \
- if (is_lbus_device(dev)) \
- __dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \
- phys_to_page(__dma); \
- })
-
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
lbus_to_virt(addr) : \
__phys_to_virt(addr)); })
file:8542bc31f63cae7445482a4a46b41d797f06acfd -> file:93f6c634fdf4cbfdb5399e6327baef82096cf390
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -15,6 +15,8 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#ifdef CONFIG_SMP
#define __cacheline_aligned
#else
file:2797163b8f4f12cb543495664610de65376b1319 -> file:7dc0f0f85b7c56f7dc9622242d9e92fd39930415
--- a/arch/frv/include/asm/cache.h
+++ b/arch/frv/include/asm/cache.h
@@ -17,6 +17,8 @@
#define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
file:91df9686a0da88ac8b299ddf81c0ca697d3f075d -> file:8a20b58ba34f9f7f5e47d88a1d9049ef5a637a15
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+#define acpi_ht 0 /* no HT-only mode on IA64 */
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
file:0ad09f05efa97698e28b4ed2eeeccbbc7ac9f878 -> file:2eb636506496dd98fa61406c1ece4e3e33b306f5
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1797,7 +1797,8 @@ static int kvm_ia64_sync_dirty_log(struc
{
struct kvm_memory_slot *memslot;
int r, i;
- long n, base;
+ long base;
+ unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
@@ -1810,7 +1811,7 @@ static int kvm_ia64_sync_dirty_log(struc
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;
for (i = 0; i < n/sizeof(long); ++i) {
@@ -1826,7 +1827,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
struct kvm_dirty_log *log)
{
int r;
- int n;
+ unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
@@ -1844,7 +1845,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
file:ee09d261f2e6c0be4071e1de30aee0339d6b9706 -> file:e2cde52116f142d7729a24b3d16e74e3ca9662d4
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -120,7 +120,7 @@ static inline void down_spin(struct spin
ia64_invala();
for (;;) {
- asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
+ asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
if (time_before(t, serve))
return;
cpu_relax();
file:fed3fd30de7e468797a85ff5945119c436f92fce -> file:ecafbe1718c3cccee4a0441efaca9670b0548c59
--- a/arch/m68k/include/asm/cache.h
+++ b/arch/m68k/include/asm/cache.h
@@ -8,4 +8,6 @@
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#endif
file:7950ef4f032c218a2382be9560c2138b2c05d8a9 -> file:743385d7b5f22b36bcea1f0aa740ad923af41ca4
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -16,7 +16,11 @@
#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
-#define BCM1250_M3_WAR 1
+#ifndef __ASSEMBLY__
+extern int sb1250_m3_workaround_needed(void);
+#endif
+
+#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
#define SIBYTE_1956_WAR 1
#else
file:a581d60cbcc21e395a1e7a9ba772c4b0be5dcb4b -> file:608dc976455b19ab76c10bc393873eea56c2fb8d
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -135,6 +135,12 @@
#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
/*
+ * Bits 18 - 20 of the FPU Status Register will be read as 0,
+ * and should be written as zero.
+ */
+#define FPU_CSR_RSVD 0x001c0000
+
+/*
* X the exception cause indicator
* E the exception enable
* S the sticky/flag bit
@@ -161,7 +167,8 @@
#define FPU_CSR_UDF_S 0x00000008
#define FPU_CSR_INE_S 0x00000004
-/* rounding mode */
+/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x00000003
#define FPU_CSR_RN 0x0 /* nearest */
#define FPU_CSR_RZ 0x1 /* towards zero */
#define FPU_CSR_RU 0x2 /* towards +Infinity */
file:454b53924490c58ed383f047f84f68e8be117afe -> file:c15d94b2f3a08f928b3639cbe496c366786accdc
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -75,6 +75,9 @@ struct mips_fpu_emulator_stats fpuemusta
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_CSR 31 /* $31 = csr */
+/* Determine rounding mode from the RM bits of the FCSR */
+#define modeindex(v) ((v) & FPU_CSR_RM)
+
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
@@ -381,10 +384,14 @@ static int cop1Emulate(struct pt_regs *x
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
- value &= (FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
- ctx->fcr31 &= ~(FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
- /* convert to ieee library modes */
- ctx->fcr31 |= (value & ~0x3) | ieee_rm[value & 0x3];
+
+ /*
+ * Don't write reserved bits,
+ * and convert to ieee library modes
+ */
+ ctx->fcr31 = (value &
+ ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
+ ieee_rm[modeindex(value)];
}
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
file:bb1719a55d227dc595467aafd88173dc9507b937 -> file:5adba4fc7c1015c6cf4e0638be471ac78e797ddd
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -73,9 +73,6 @@ static int __cpuinit m4kc_tlbp_war(void)
enum label_id {
label_second_part = 1,
label_leave,
-#ifdef MODULE_START
- label_module_alloc,
-#endif
label_vmalloc,
label_vmalloc_done,
label_tlbw_hazard,
@@ -92,9 +89,6 @@ enum label_id {
UASM_L_LA(_second_part)
UASM_L_LA(_leave)
-#ifdef MODULE_START
-UASM_L_LA(_module_alloc)
-#endif
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
UASM_L_LA(_tlbw_hazard)
@@ -802,8 +796,6 @@ static void __cpuinit build_r4000_tlb_re
} else {
#if defined(CONFIG_HUGETLB_PAGE)
const enum label_id ls = label_tlb_huge_update;
-#elif defined(MODULE_START)
- const enum label_id ls = label_module_alloc;
#else
const enum label_id ls = label_vmalloc;
#endif
file:0444da1e23c24e2dc675a40034a2e7adc087ba41 -> file:92da3155ce074ac16840c715fde220946a13b4fd
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
return ret;
}
+int sb1250_m3_workaround_needed(void)
+{
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2:
+ case K_SYS_SOC_TYPE_BCM1125:
+ case K_SYS_SOC_TYPE_BCM1125H:
+ return soc_pass < K_SYS_REVISION_BCM1250_C0;
+
+ default:
+ return 0;
+ }
+}
+
static int __init setup_bcm112x(void)
{
int ret = 0;
file:e03cfa2e997e7299e1215a3a8c25a516d19d6751 -> file:6e2fe28dde4e7d68f260871c9377b96f30e8e644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -21,6 +21,8 @@
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
/* data cache purge registers
* - read from the register to unconditionally purge that cache line
* - write address & 0xffffff00 to conditionally purge that cache line
file:3ca1c61492182d3321c2fe7d2cf00d003ff5a8a5 -> file:27a7492ddb0d66f7762e94dea40ff3f3ac1fcd85
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[],
return SIGNALCODE(SIGFPE, FPE_FLTINV);
case DIVISIONBYZEROEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ Clear_excp_register(exception_index);
return SIGNALCODE(SIGFPE, FPE_FLTDIV);
case INEXACTEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
file:abbc2aaaced5bbc68d5be5699e9123a35cd99fdd -> file:c1de3c9a1d65ecb6df4f6185bf9d5f93c4eb6b45
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -135,43 +135,5 @@ static inline int irqs_disabled_flags(un
*/
struct irq_chip;
-#ifdef CONFIG_PERF_EVENTS
-
-#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
-{
- unsigned long x;
-
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_event_pending)));
- return x;
-}
-
-static inline void set_perf_event_pending(void)
-{
- asm volatile("stb %0,%1(13)" : :
- "r" (1),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-
-static inline void clear_perf_event_pending(void)
-{
- asm volatile("stb %0,%1(13)" : :
- "r" (0),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-#endif /* CONFIG_PPC64 */
-
-#else /* CONFIG_PERF_EVENTS */
-
-static inline unsigned long test_perf_event_pending(void)
-{
- return 0;
-}
-
-static inline void clear_perf_event_pending(void) {}
-#endif /* CONFIG_PERF_EVENTS */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
file:0812b0f414bbe486f38db00ab1a171c4810410ef -> file:692c056566c7d9702cfa24ce026e6f4e612128c1
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,6 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
file:9763267e38b46cbcdb2d6c9bfffd83d61b1eeb08 -> file:917cebc2f262b2cd2b3d1f125759611cefd5d7ae
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_
2:
TRACE_AND_RESTORE_IRQ(r5);
-#ifdef CONFIG_PERF_EVENTS
- /* check paca->perf_event_pending if we're enabling ints */
- lbz r3,PACAPERFPEND(r13)
- and. r3,r3,r5
- beq 27f
- bl .perf_event_do_pending
-27:
-#endif /* CONFIG_PERF_EVENTS */
-
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
file:e5d1211779840f029841e1591952c3b5ec9b4fae -> file:8564a412e7a66f28b0403007f7a6615d23167816
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,6 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
-#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -138,11 +137,6 @@ notrace void raw_local_irq_restore(unsig
}
#endif /* CONFIG_PPC_STD_MMU_64 */
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
-
/*
* if (get_paca()->hard_enabled) return;
* But again we need to take care that gcc gets hard_enabled directly
file:a136a11c490d0f36a9b32812132e682c38262f8f -> file:02aab7f90295b40ba03cdfc8469aeef00f6846cb
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -530,25 +530,60 @@ void __init iSeries_time_init_early(void
}
#endif /* CONFIG_PPC_ISERIES */
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
-DEFINE_PER_CPU(u8, perf_event_pending);
+#ifdef CONFIG_PERF_EVENTS
-void set_perf_event_pending(void)
+/*
+ * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
+ */
+#ifdef CONFIG_PPC64
+static inline unsigned long test_perf_event_pending(void)
{
- get_cpu_var(perf_event_pending) = 1;
- set_dec(1);
- put_cpu_var(perf_event_pending);
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
+ return x;
}
+static inline void set_perf_event_pending_flag(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (1),
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+static inline void clear_perf_event_pending(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (0),
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+#else /* 32-bit */
+
+DEFINE_PER_CPU(u8, perf_event_pending);
+
+#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
-#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
+#endif /* 32 vs 64 bit */
+
+void set_perf_event_pending(void)
+{
+ preempt_disable();
+ set_perf_event_pending_flag();
+ set_dec(1);
+ preempt_enable();
+}
+
+#else /* CONFIG_PERF_EVENTS */
#define test_perf_event_pending() 0
#define clear_perf_event_pending()
-#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
+#endif /* CONFIG_PERF_EVENTS */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -576,10 +611,6 @@ void timer_interrupt(struct pt_regs * re
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
@@ -597,6 +628,11 @@ void timer_interrupt(struct pt_regs * re
calculate_steal_time();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
+ }
+
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
get_lppaca()->int_dword.fields.decr_int = 0;
file:2a4551f78f60c242d3477b6742efaf8aa322aaab -> file:ff184f4771e93274953dab31d875f2b381318248
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -176,7 +176,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
{
struct kvm_vcpu *vcpu;
vcpu = kvmppc_core_vcpu_create(kvm, id);
- kvmppc_create_vcpu_debugfs(vcpu, id);
+ if (!IS_ERR(vcpu))
+ kvmppc_create_vcpu_debugfs(vcpu, id);
return vcpu;
}
file:64e2e499e32a6fecacf81cb6eba2d03a32c8e50d -> file:3ac0cd3a53738e28cd7a0cb0649d94b27c5d2d3b
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -71,7 +71,7 @@ _GLOBAL(strcmp)
_GLOBAL(strncmp)
PPC_LCMPI r5,0
- beqlr
+ ble- 2f
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
@@ -82,6 +82,8 @@ _GLOBAL(strncmp)
beqlr 1
bdnzt eq,1b
blr
+2: li r3,0
+ blr
_GLOBAL(strlen)
addi r4,r3,-1
file:dc93e95b256eae04e248900e82858d94c8fe822a -> file:45f4e61b263287992f4a83b72a875d9a5b74e9c2
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -131,15 +131,10 @@ void settlbcam(int index, unsigned long
TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
-#ifndef CONFIG_KGDB /* want user access for breakpoints */
if (flags & _PAGE_USER) {
TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
}
-#else
- TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
-#endif
tlbcam_addrs[index].start = virt;
tlbcam_addrs[index].limit = virt + size - 1;
file:ae06c6236d9c396402040be44d25257283141abd -> file:c8fc4dc8f572b6aa4490aecaf0d606c6032259dd
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
index = ENTRIES-1;
/* make sure index is valid */
- if ((index > ENTRIES) || (index < 0))
+ if ((index >= ENTRIES) || (index < 0))
index = ENTRIES-1;
return initial_lfsr[index];
file:ebff6d9a4e395ae0088da5a26e99459cedc2f0f5 -> file:c2c172042db26329e61625dfa37baca80e131a34
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -66,30 +66,6 @@ static void pseries_mach_cpu_die(void)
for(;;);
}
-static int qcss_tok; /* query-cpu-stopped-state token */
-
-/* Get state of physical CPU.
- * Return codes:
- * 0 - The processor is in the RTAS stopped state
- * 1 - stop-self is in progress
- * 2 - The processor is not in the RTAS stopped state
- * -1 - Hardware Error
- * -2 - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
- int cpu_status, status;
-
- status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
- if (status != 0) {
- printk(KERN_ERR
- "RTAS query-cpu-stopped-state failed: %i\n", status);
- return status;
- }
-
- return cpu_status;
-}
-
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -113,8 +89,9 @@ static void pseries_cpu_die(unsigned int
unsigned int pcpu = get_hard_smp_processor_id(cpu);
for (tries = 0; tries < 25; tries++) {
- cpu_status = query_cpu_stopped(pcpu);
- if (cpu_status == 0 || cpu_status == -1)
+ cpu_status = smp_query_cpu_stopped(pcpu);
+ if (cpu_status == QCSS_STOPPED ||
+ cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
}
@@ -256,6 +233,7 @@ static int __init pseries_cpu_hotplug_in
{
struct device_node *np;
const char *typep;
+ int qcss_tok;
for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
file:a24a6b2333b2388521989ac5ac2b38365a61e6ab -> file:45f634c022081425d72ac024ae689efe91646858
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -4,6 +4,14 @@
#include <asm/hvcall.h>
#include <asm/page.h>
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
static inline long poll_pending(void)
{
return plpar_hcall_norets(H_POLL_PENDING);
file:440000cc71307ac83df8ac251af13085c910aec6 -> file:3afa079d178b4805c9a43137bb3b13b05a3a5bbd
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -56,6 +56,28 @@
*/
static cpumask_t of_spin_map;
+/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
+int smp_query_cpu_stopped(unsigned int pcpu)
+{
+ int cpu_status, status;
+ int qcss_tok = rtas_token("query-cpu-stopped-state");
+
+ if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_INFO "Firmware doesn't support "
+ "query-cpu-stopped-state\n");
+ return QCSS_HARDWARE_ERROR;
+ }
+
+ status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+ if (status != 0) {
+ printk(KERN_ERR
+ "RTAS query-cpu-stopped-state failed: %i\n", status);
+ return status;
+ }
+
+ return cpu_status;
+}
+
/**
* smp_startup_cpu() - start the given cpu
*
@@ -81,6 +103,12 @@ static inline int __devinit smp_startup_
pcpu = get_hard_smp_processor_id(lcpu);
+ /* Check to see if the CPU out of FW already for kexec */
+ if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
+ cpu_set(lcpu, of_spin_map);
+ return 1;
+ }
+
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;
file:653c6a1787404480f532d7f033223540e2fcc616 -> file:08f883842e21bac1d673e82fe7560602501e0c16
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -632,7 +632,7 @@ long compat_arch_ptrace(struct task_stru
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret;
+ long ret = 0;
/* Do the secure computing check first. */
secure_computing(regs->gprs[2]);
@@ -641,7 +641,6 @@ asmlinkage long do_syscall_trace_enter(s
* The sysc_tracesys code in entry.S stored the system
* call number to gprs[2].
*/
- ret = regs->gprs[2];
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
(tracehook_report_syscall_entry(regs) ||
regs->gprs[2] >= NR_syscalls)) {
@@ -663,7 +662,7 @@ asmlinkage long do_syscall_trace_enter(s
regs->gprs[2], regs->orig_gpr2,
regs->gprs[3], regs->gprs[4],
regs->gprs[5]);
- return ret;
+ return ret ?: regs->gprs[2];
}
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
file:ca2d31277b3b46eee836d1c34403521377732692 -> file:75fbf199d71ec79cc2d9b5e61a66232b305bd3d9
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -338,11 +338,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
- goto out_free_cpu;
+ goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block);
return vcpu;
+out_free_sie_block:
+ free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu:
kfree(vcpu);
out_nomem:
file:fd56a71ca9d9bbf47c3080d2473f2d4ad2ac0478 -> file:974ba71df4a86095cd1985e6b5fdeed1fd4fc673
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -132,7 +132,7 @@ void decompress_kernel(void)
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
output_addr |= P2SEG;
#endif
#endif
file:ccb1d93bb04361678b967fd80151c43427263b27 -> file:bf6939cc4744836a5a42671810bdff2681919bbf
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -212,7 +212,9 @@ extern void __kernel_vsyscall;
#define VSYSCALL_AUX_ENT \
if (vdso_enabled) \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+ else \
+ NEW_AUX_ENT(AT_IGNORE, 0);
#else
#define VSYSCALL_AUX_ENT
#endif /* CONFIG_VSYSCALL */
@@ -220,7 +222,7 @@ extern void __kernel_vsyscall;
#ifdef CONFIG_SH_FPU
#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
#else
-#define FPU_AUX_ENT
+#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif
extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
file:160db1003cfb121dde1b41b1463de400805495eb -> file:71a9c3c47e6fb732fb3e9fc3cd489a4cb163b4d1
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondar
unsigned int cpu;
struct mm_struct *mm = &init_mm;
+ enable_mmu();
atomic_inc(&mm->mm_count);
atomic_inc(&mm->mm_users);
current->active_mm = mm;
file:93fe21e02c86b0c7607f06c813532223220a68cf -> file:679c7504625acc86deab57d1e1feada4ef408317
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -8,7 +8,7 @@
#include <asm/page.h> /* IO address mapping routines need this */
#include <asm/system.h>
-#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
static inline u32 flip_dword (u32 l)
{
file:f72080bdda947ec81edce5206842d836755f4521 -> file:156707b0f18d1ba4a5fbd01fc972fbec8c529c0d
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -143,7 +143,7 @@ extern unsigned long pfn_base;
#define phys_to_virt __va
#define ARCH_PFN_OFFSET (pfn_base)
-#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
file:55db5eca08e25da163b4c2156c51783b5b036f74 -> file:a232e9e1f4e515d19e8f089872a797c746241777
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
ino_t st_ino;
mode_t st_mode;
short st_nlink;
- uid_t st_uid;
- gid_t st_gid;
+ unsigned short st_uid;
+ unsigned short st_gid;
unsigned short st_rdev;
off_t st_size;
time_t st_atime;
file:f3b5466c389cc5fe1a4e44decf12b3e1a65bae9c -> file:4589ca33220ff6463dbd7191259d01debb5fb13f
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -99,7 +99,7 @@ static int __devinit clock_board_probe(s
p->leds_resource.start = (unsigned long)
(p->clock_regs + CLOCK_CTRL);
- p->leds_resource.end = p->leds_resource.end;
+ p->leds_resource.end = p->leds_resource.start;
p->leds_resource.name = "leds";
p->leds_pdev.name = "sunfire-clockboard-leds";
@@ -194,7 +194,7 @@ static int __devinit fhc_probe(struct of
if (!p->central) {
p->leds_resource.start = (unsigned long)
(p->pregs + FHC_PREGS_CTRL);
- p->leds_resource.end = p->leds_resource.end;
+ p->leds_resource.end = p->leds_resource.start;
p->leds_resource.name = "leds";
p->leds_pdev.name = "sunfire-fhc-leds";
file:2830b415e2147ecfda36acc99d620408472128bd -> file:c49865b30719d2e8113313591834f34d4d5fa063
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flag
* Set some valid stack frames to give to the child.
*/
childstack = (struct sparc_stackf __user *)
- (sp & ~0x7UL);
+ (sp & ~0xfUL);
parentstack = (struct sparc_stackf __user *)
regs->u_regs[UREG_FP];
file:c3f1cce0e95e78ec40d466fb2a82f747e1590c48 -> file:cb70476bd8f5ccd72a6f72900195f8a74f6abfcb
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(un
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
- /* Now 8-byte align the stack as this is mandatory in the
- * Sparc ABI due to how register windows work. This hides
- * the restriction from thread libraries etc. -DaveM
+ /* Now align the stack as this is mandatory in the Sparc ABI
+ * due to how register windows work. This hides the
+ * restriction from thread libraries etc.
*/
- csp &= ~7UL;
+ csp &= ~15UL;
distance = fp - psp;
rval = (csp - distance);
file:ba5b09ad6666397a953af76945205961868edb29 -> file:ea22cd373c64f4bc371478d2b7be14f71cd32560
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
};
/* Align macros */
-#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7)))
-#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
+#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
+#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct
sp = current->sas_ss_sp + current->sas_ss_size;
}
+ sp -= framesize;
+
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
- sp &= ~7UL;
+ sp &= ~15UL;
- return (void __user *)(sp - framesize);
+ return (void __user *) sp;
}
static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
file:7ce1a1005b1da4c3b13f8b87927d060376279f2a -> file:9882df92ba0a2c8b8da4639f7e181214930c8ed6
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(
sp = current->sas_ss_sp + current->sas_ss_size;
}
+ sp -= framesize;
+
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
- sp &= ~7UL;
+ sp &= ~15UL;
- return (void __user *)(sp - framesize);
+ return (void __user *) sp;
}
static inline int
file:647afbda7ae1f170896675dcc4603dfe5c58ec0b -> file:9fa48c30037e5356c2f686be695ea8bcfb3613f3
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -353,7 +353,7 @@ segv:
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
- if (((unsigned long) fp) & 7)
+ if (((unsigned long) fp) & 15)
return 1;
return 0;
}
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(
sp = current->sas_ss_sp + current->sas_ss_size;
}
+ sp -= framesize;
+
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
- sp &= ~7UL;
+ sp &= ~15UL;
- return (void __user *)(sp - framesize);
+ return (void __user *) sp;
}
static inline void
file:8c91d9b29a2f24023849498a41f440b5ec1e6938 -> file:db15d123f05447f9eac74c5fd168c14084eb25a3
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -191,10 +191,12 @@ tsb_dtlb_load:
tsb_itlb_load:
/* Executable bit must be set. */
-661: andcc %g5, _PAGE_EXEC_4U, %g0
- .section .sun4v_1insn_patch, "ax"
+661: sethi %hi(_PAGE_EXEC_4U), %g4
+ andcc %g5, %g4, %g0
+ .section .sun4v_2insn_patch, "ax"
.word 661b
andcc %g5, _PAGE_EXEC_4V, %g0
+ nop
.previous
be,pn %xcc, tsb_do_fault
file:4b7c937bba61704cf386e76c7907bbdcf1bcb759 -> file:815cab6be5a39aeb47dc4734f44de977b95cde74
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -32,8 +32,7 @@ extern void prom_cif_interface(void);
extern void prom_cif_callback(void);
/*
- * This provides SMP safety on the p1275buf. prom_callback() drops this lock
- * to allow recursuve acquisition.
+ * This provides SMP safety on the p1275buf.
*/
DEFINE_SPINLOCK(prom_entry_lock);
@@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long
p = p1275buf.prom_buffer;
- spin_lock_irqsave(&prom_entry_lock, flags);
+ raw_local_save_flags(flags);
+ raw_local_irq_restore(PIL_NMI);
+ spin_lock(&prom_entry_lock);
p1275buf.prom_args[0] = (unsigned long)p; /* service */
strcpy (p, service);
@@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long
va_end(list);
x = p1275buf.prom_args [nargs + 3];
- spin_unlock_irqrestore(&prom_entry_lock, flags);
+ spin_unlock(&prom_entry_lock);
+ raw_local_irq_restore(flags);
return x;
}
file:2201e9c20e4a85ec4673939f27e15a3ef3431f94 -> file:c1ea9eb04466b00a35c8a895d09887546e78abfc
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt
setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
sysrq.o ksyms.o tls.o
-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
+subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
+ lib/rwsem_64.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
ldt-y = ../sys-i386/ldt.o
file:4fdb669e84692a53dc07c2b21494ec18bd444e8d -> file:fbc161d05fed2c365bd3e1ae15021bf177002217
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -622,7 +622,7 @@ config GART_IOMMU
bool "GART IOMMU support" if EMBEDDED
default y
select SWIOTLB
- depends on X86_64 && PCI
+ depends on X86_64 && PCI && K8_NB
---help---
Support for full DMA access of devices with 32bit memory access only
on systems with more than 3GB. This is usually needed for USB,
@@ -1236,6 +1236,11 @@ config ARCH_MEMORY_PROBE
def_bool X86_64
depends on MEMORY_HOTPLUG
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0 if X86_32
+ default 0xdead000000000000 if X86_64
+
source "mm/Kconfig"
config HIGHPTE
@@ -2022,7 +2027,7 @@ endif # X86_32
config K8_NB
def_bool y
- depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
+ depends on CPU_SUP_AMD && PCI
source "drivers/pcmcia/Kconfig"
file:f2824fb8c79cad23ad747977e1e32ab4d6b555a1 -> file:0e56610335655d8d379105bb3f6d50060ffd2efa
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -323,7 +323,7 @@ config X86_L1_CACHE_SHIFT
config X86_XADD
def_bool y
- depends on X86_32 && !M386
+ depends on X86_64 || !M386
config X86_PPRO_FENCE
bool "PentiumPro memory ordering errata workaround"
file:f9f472462753c1dd41f1a4a3087769dfdc0af6ec -> file:14531abdd0ced75cc2f5a60e9ff29fe0308a91b8
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
current->mm->cached_hole_size = 0;
- current->mm->mmap = NULL;
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
file:9cfc88b97742c45492d814b1d30ae3b60139da90 -> file:2cbf0a279ab9de1a796cfa9adea053eff165b740
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -153,6 +153,7 @@
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
+#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
/*
* Auxiliary flags: Linux defined - For features scattered in various
file:14f9890eb495a31a203fa780b3caf16b018d68ef -> file:c22a1648113dc5b4a255677a125c90a2c8d9a927
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -82,6 +82,9 @@ enum fixed_addresses {
#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+ FIX_OHCI1394_BASE,
+#endif
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
@@ -126,9 +129,6 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
- FIX_OHCI1394_BASE,
-#endif
#ifdef CONFIG_X86_32
FIX_WP_TEST,
#endif
file:7c7c16cde1f8f3b5ddcf65f6a2f40d537368ad86 -> file:5f61f6e0ffdd73039bff49159ff41034a6efdde4
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -160,6 +160,7 @@ extern int io_apic_get_redir_entries(int
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
+void setup_IO_APIC_irq_extra(u32 gsi);
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
file:c2d1f3b58e5f1342607280be6a71d434796482dd -> file:f0746f46990dd17f2cac51493086f53de40ea682
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -13,11 +13,16 @@ extern void k8_flush_garts(void);
extern int k8_scan_nodes(unsigned long start, unsigned long end);
#ifdef CONFIG_K8_NB
+extern int num_k8_northbridges;
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
+
#else
+#define num_k8_northbridges 0
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return NULL;
file:7c18e1230f5490f1f7a58bd103302ffcb8bf20b1 -> file:5ed59ec92534124348301c54403277f02c8459e2
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
struct x86_emulate_ops {
/*
* read_std: Read bytes of standard (non-emulated/special) memory.
- * Used for instruction fetch, stack operations, and others.
+ * Used for descriptor reading.
* @addr: [IN ] Linear address from which to read.
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to read from memory.
*/
int (*read_std)(unsigned long addr, void *val,
- unsigned int bytes, struct kvm_vcpu *vcpu);
+ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+
+ /*
+ * fetch: Read bytes of standard (non-emulated/special) memory.
+ * Used for instruction fetch.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*fetch)(unsigned long addr, void *val,
+ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
/*
* read_emulated: Read bytes from emulated/special memory area.
@@ -168,6 +178,7 @@ struct x86_emulate_ctxt {
/* Execution mode, passed to the emulator. */
#define X86EMUL_MODE_REAL 0 /* Real mode. */
+#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
file:d759a1f55084168d2b5006e74b466e4eb24d7533 -> file:91139545b122a23b8f235be0160894bc057cab59
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -193,6 +193,7 @@ union kvm_mmu_page_role {
unsigned invalid:1;
unsigned cr4_pge:1;
unsigned nxe:1;
+ unsigned cr0_wp:1;
};
};
@@ -256,7 +257,8 @@ struct kvm_mmu {
void (*new_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
void (*free)(struct kvm_vcpu *vcpu);
- gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
+ gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
+ u32 *error);
void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page);
int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -601,8 +603,7 @@ int emulator_set_dr(struct x86_emulate_c
unsigned long value);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- int type_bits, int seg);
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
@@ -645,6 +646,10 @@ void __kvm_mmu_free_some_pages(struct kv
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
@@ -658,6 +663,7 @@ void kvm_disable_tdp(void);
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
+bool kvm_check_iopl(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
file:4ffe09b2ad7511c3e50a1f4465923c2f522dd3d6 -> file:a7e502fdb16cf44ebea3cc7d7fb9ff8cedc38b38
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -104,6 +104,9 @@
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_NB_CFG 0xc001001f
#define MSR_AMD64_PATCH_LOADER 0xc0010020
+#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
@@ -123,6 +126,7 @@
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
+#define MSR_FAM10H_NODE_ID 0xc001100c
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
@@ -195,8 +199,9 @@
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
-#define FEATURE_CONTROL_LOCKED (1<<0)
-#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
file:0e8c2a0fd9222d4b75793fb664591de22461e7d7 -> file:271de94c3810d9bbdb2c01bbfbb43f221b46ee24
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -23,6 +23,11 @@ static inline void paravirt_release_pud(
#endif
/*
+ * Flags to use when allocating a user page table page.
+ */
+extern gfp_t __userpte_alloc_gfp;
+
+/*
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
file:ca7517d3377634b6251ce727326d06dd30627fc5 -> file:606ede126972e568b992268da3df6f64b7e98480
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -41,6 +41,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/lockdep.h>
+#include <asm/asm.h>
struct rwsem_waiter;
@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
/*
* the semaphore definition
+ *
+ * The bias values and the counter type limits the number of
+ * potential readers/writers to 32767 for 32 bits and 2147483647
+ * for 64 bits.
*/
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
+#ifdef CONFIG_X86_64
+# define RWSEM_ACTIVE_MASK 0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+typedef signed long rwsem_count_t;
+
struct rw_semaphore {
- signed long count;
+ rwsem_count_t count;
spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -105,7 +117,7 @@ do { \
static inline void __down_read(struct rw_semaphore *sem)
{
asm volatile("# beginning down_read\n\t"
- LOCK_PREFIX " incl (%%eax)\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
/* adds 0x00000001, returns the old value */
" jns 1f\n"
" call call_rwsem_down_read_failed\n"
@@ -121,14 +133,14 @@ static inline void __down_read(struct rw
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
- __s32 result, tmp;
+ rwsem_count_t result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
- " movl %0,%1\n\t"
+ " mov %0,%1\n\t"
"1:\n\t"
- " movl %1,%2\n\t"
- " addl %3,%2\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
" jle 2f\n\t"
- LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
@@ -143,13 +155,13 @@ static inline int __down_read_trylock(st
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
- int tmp;
+ rwsem_count_t tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile("# beginning down_write\n\t"
- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtract 0x0000ffff, returns the old value */
- " testl %%edx,%%edx\n\t"
+ " test %1,%1\n\t"
/* was the count 0 before? */
" jz 1f\n"
" call call_rwsem_down_write_failed\n"
@@ -170,9 +182,9 @@ static inline void __down_write(struct r
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- signed long ret = cmpxchg(&sem->count,
- RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
+ rwsem_count_t ret = cmpxchg(&sem->count,
+ RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
return 0;
@@ -183,9 +195,9 @@ static inline int __down_write_trylock(s
*/
static inline void __up_read(struct rw_semaphore *sem)
{
- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
+ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 1, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n"
@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_s
*/
static inline void __up_write(struct rw_semaphore *sem)
{
+ rwsem_count_t tmp;
asm volatile("# beginning __up_write\n\t"
- " movl %2,%%edx\n\t"
- LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
/* tries to transition
0xffff0001 -> 0x00000000 */
" jz 1f\n"
" call call_rwsem_wake\n"
"1:\n\t"
"# ending __up_write\n"
- : "+m" (sem->count)
- : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
- : "memory", "cc", "edx");
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc");
}
/*
@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_
static inline void __downgrade_write(struct rw_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX " addl %2,(%%eax)\n\t"
- /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+ */
" jns 1f\n\t"
" call call_rwsem_downgrade_wake\n"
"1:\n\t"
"# ending __downgrade_write\n"
: "+m" (sem->count)
- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
+ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(rwsem_count_t delta,
+ struct rw_semaphore *sem)
{
- asm volatile(LOCK_PREFIX "addl %1,%0"
+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
: "+m" (sem->count)
- : "ir" (delta));
+ : "er" (delta));
}
/*
* implement exchange and add functionality
*/
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
+ struct rw_semaphore *sem)
{
- int tmp = delta;
+ rwsem_count_t tmp = delta;
asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
file:1e796782cd7b9e606e59f2f5c39b4819fb9131c7 -> file:4cfc908240684c561b4554281c36850643fc139f
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -135,6 +135,8 @@ int native_cpu_disable(void);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
void play_dead_common(void);
+void wbinvd_on_cpu(int cpu);
+int wbinvd_on_all_cpus(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
{
return cpumask_weight(cpu_callout_mask);
}
+#else /* !CONFIG_SMP */
+#define wbinvd_on_cpu(cpu) wbinvd()
+static inline int wbinvd_on_all_cpus(void)
+{
+ wbinvd();
+ return 0;
+}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus __cpuinitdata;
file:48dcfa62ea07eb5985ef61c526ed767da3795283 -> file:fd921c3a68414e341fe8fffa8b1df1326eba2ba7
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(v
struct saved_context {
u16 es, fs, gs, ss;
unsigned long cr0, cr2, cr3, cr4;
+ u64 misc_enable;
+ bool misc_enable_saved;
struct desc_ptr gdt;
struct desc_ptr idt;
u16 ldt;
file:06284f42b7599a8f46669fc3e4f6c9d8bbea7eb7 -> file:8d942afae681bec8fe2c47a9bf57955dbc4317b0
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -27,6 +27,8 @@ struct saved_context {
u16 ds, es, fs, gs, ss;
unsigned long gs_base, gs_kernel_base, fs_base;
unsigned long cr0, cr2, cr3, cr4, cr8;
+ u64 misc_enable;
+ bool misc_enable_saved;
unsigned long efer;
u16 gdt_pad;
u16 gdt_limit;
file:f08f973748922b26a2417ed70db47fec2b54b009 -> file:e0fbf294536c6e96cb7b0169cd263cfe1bd8133d
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -449,7 +449,7 @@ void stop_this_cpu(void *dummy);
*
* (Could use an alternative three way for this if there was one.)
*/
-static inline void rdtsc_barrier(void)
+static __always_inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
file:67e929b89875bea8c06c5b8c454a43332862eb46 -> file:23c2da87df19c382c476fe798b5cdb073065b96c
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -446,6 +446,12 @@ void __init acpi_pic_sci_set_trigger(uns
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
*irq = gsi;
+
+#ifdef CONFIG_X86_IO_APIC
+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
+ setup_IO_APIC_irq_extra(gsi);
+#endif
+
return 0;
}
@@ -473,7 +479,8 @@ int acpi_register_gsi(struct device *dev
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
- acpi_gsi_to_irq(plat_gsi, &irq);
+ irq = plat_gsi;
+
return irq;
}
@@ -1184,9 +1191,6 @@ static void __init acpi_process_madt(voi
if (!error) {
acpi_lapic = 1;
-#ifdef CONFIG_X86_BIGSMP
- generic_bigsmp_probe();
-#endif
/*
* Parse MADT IO-APIC entries
*/
@@ -1196,8 +1200,6 @@ static void __init acpi_process_madt(voi
acpi_ioapic = 1;
smp_found_config = 1;
- if (apic->setup_apic_routing)
- apic->setup_apic_routing();
}
}
if (error == -EINVAL) {
@@ -1347,14 +1349,6 @@ static struct dmi_system_id __initdata a
},
},
{
- .callback = force_acpi_ht,
- .ident = "ASUS P2B-DS",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
- },
- },
- {
.callback = force_acpi_ht,
.ident = "ASUS CUR-DLS",
.matches = {
file:2e837f5080fe56d3ad0c87d308564cf157147624 -> file:fb7a5f052e2b8766d11115e3f7fc174fadf6ac2f
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsi
percpu_entry->states[cx->index].eax = cx->address;
percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
}
+
+ /*
+ * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
+ * then we should skip checking BM_STS for this C-state.
+ * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
+ */
+ if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
+ cx->bm_sts_skip = 1;
+
return retval;
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
file:23fc9fe1625f58494f2ff16780df93074b86916c -> file:f0fa7a1782d59771207a795a274456653735768b
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -544,7 +544,7 @@ static void flush_devices_by_domain(stru
for (i = 0; i <= amd_iommu_last_bdf; ++i) {
if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
- (amd_iommu_pd_table[i] != domain))
+ (domain != NULL && amd_iommu_pd_table[i] != domain))
continue;
iommu = amd_iommu_rlookup_table[i];
@@ -2239,9 +2239,7 @@ static void amd_iommu_domain_destroy(str
free_pagetable(domain);
- domain_id_free(domain->id);
-
- kfree(domain);
+ protection_domain_free(domain);
dom->priv = NULL;
}
file:362ab88c73ac0bedd5f4ce4765ebe596d8c0924b -> file:3925adfba7654c1c40da35e5b30a96141d67ea5d
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1284,6 +1284,8 @@ int __init amd_iommu_init(void)
if (ret)
goto free;
+ enable_iommus();
+
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1294,8 +1296,6 @@ int __init amd_iommu_init(void)
amd_iommu_init_api();
- enable_iommus();
-
if (iommu_pass_through)
goto out;
@@ -1314,6 +1314,8 @@ out:
return ret;
free:
+ disable_iommus();
+
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
get_order(MAX_DOMAIN_ID/8));
file:128111d8ffe0de7ffcdaf0891221ff37d0ae61f8 -> file:082089ec5594f365d3a41f272d7d86d1735c0b72
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -389,6 +389,7 @@ void __init gart_iommu_hole_init(void)
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
int dev_base, dev_limit;
+ u32 ctl;
bus = bus_dev_ranges[i].bus;
dev_base = bus_dev_ranges[i].dev_base;
@@ -401,7 +402,19 @@ void __init gart_iommu_hole_init(void)
iommu_detected = 1;
gart_iommu_aperture = 1;
- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
+ ctl = read_pci_config(bus, slot, 3,
+ AMD64_GARTAPERTURECTL);
+
+ /*
+ * Before we do anything else disable the GART. It may
+ * still be enabled if we boot into a crash-kernel here.
+ * Reconfiguring the GART while it is enabled could have
+ * unknown side-effects.
+ */
+ ctl &= ~GARTEN;
+ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+
+ aper_order = (ctl >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
aper_base <<= 25;
file:c86dbcf39e8d65142974b10a56ff752cc782ef5f -> file:bc9cd5aea79bd74e1ad230c003174a0a70e06627
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -941,7 +941,7 @@ void disable_local_APIC(void)
unsigned int value;
/* APIC hasn't been mapped yet */
- if (!apic_phys)
+ if (!x2apic_mode && !apic_phys)
return;
clear_local_APIC();
@@ -1664,8 +1664,8 @@ int __init APIC_init_uniprocessor(void)
}
#endif
+#ifndef CONFIG_SMP
enable_IR_x2apic();
-#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
@@ -1915,18 +1915,6 @@ void __cpuinit generic_processor_info(in
if (apicid > max_physical_apicid)
max_physical_apicid = apicid;
-#ifdef CONFIG_X86_32
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- if (num_processors > 8)
- def_to_bigsmp = 1;
- break;
- case X86_VENDOR_AMD:
- if (max_physical_apicid >= 8)
- def_to_bigsmp = 1;
- }
-#endif
-
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
file:c107e837ed7d6a8ff5b66a749fd0beb1e241772f -> file:dc4f486fc39e4e481b6942cb8bf94520311bf340
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1550,6 +1550,56 @@ static void __init setup_IO_APIC_irqs(vo
}
/*
+ * for the gsit that is not in first ioapic
+ * but could not use acpi_register_gsi()
+ * like some special sci in IBM x3330
+ */
+void setup_IO_APIC_irq_extra(u32 gsi)
+{
+ int apic_id = 0, pin, idx, irq;
+ int node = cpu_to_node(boot_cpu_id);
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+
+ /*
+ * Convert 'gsi' to 'ioapic.pin'.
+ */
+ apic_id = mp_find_ioapic(gsi);
+ if (apic_id < 0)
+ return;
+
+ pin = mp_find_ioapic_pin(apic_id, gsi);
+ idx = find_irq_entry(apic_id, pin, mp_INT);
+ if (idx == -1)
+ return;
+
+ irq = pin_2_irq(idx, apic_id, pin);
+#ifdef CONFIG_SPARSE_IRQ
+ desc = irq_to_desc(irq);
+ if (desc)
+ return;
+#endif
+ desc = irq_to_desc_alloc_node(irq, node);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ return;
+ }
+
+ cfg = desc->chip_data;
+ add_pin_to_irq_node(cfg, node, apic_id, pin);
+
+ if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
+ pr_debug("Pin %d-%d already programmed\n",
+ mp_ioapics[apic_id].apicid, pin);
+ return;
+ }
+ set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
+
+ setup_IO_APIC_irq(apic_id, pin, irq, desc,
+ irq_trigger(idx), irq_polarity(idx));
+}
+
+/*
* Set up the timer pin, possibly with the 8259A-master behind.
*/
static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -3165,12 +3215,9 @@ unsigned int create_irq_nr(unsigned int
}
spin_unlock_irqrestore(&vector_lock, flags);
- if (irq > 0) {
- dynamic_irq_init(irq);
- /* restore it, in case dynamic_irq_init clear it */
- if (desc_new)
- desc_new->chip_data = cfg_new;
- }
+ if (irq > 0)
+ dynamic_irq_init_keep_chip_data(irq);
+
return irq;
}
@@ -3193,17 +3240,12 @@ void destroy_irq(unsigned int irq)
{
unsigned long flags;
struct irq_cfg *cfg;
- struct irq_desc *desc;
- /* store it, in case dynamic_irq_cleanup clear it */
- desc = irq_to_desc(irq);
- cfg = desc->chip_data;
- dynamic_irq_cleanup(irq);
- /* connect back irq_cfg */
- desc->chip_data = cfg;
+ dynamic_irq_cleanup_keep_chip_data(irq);
free_irte(irq);
spin_lock_irqsave(&vector_lock, flags);
+ cfg = irq_to_desc(irq)->chip_data;
__clear_irq_vector(irq, cfg);
spin_unlock_irqrestore(&vector_lock, flags);
}
file:0c0182cc947d0ac95faf54b76e83b1472c67af15 -> file:88b9d22c84328746210fae7c27fde54f2a699160
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -54,6 +54,31 @@ late_initcall(print_ipi_mode);
void default_setup_apic_routing(void)
{
+ int version = apic_version[boot_cpu_physical_apicid];
+
+ if (num_possible_cpus() > 8) {
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ if (!APIC_XAPIC(version)) {
+ def_to_bigsmp = 0;
+ break;
+ }
+ /* If P4 and above fall through */
+ case X86_VENDOR_AMD:
+ def_to_bigsmp = 1;
+ }
+ }
+
+#ifdef CONFIG_X86_BIGSMP
+ generic_bigsmp_probe();
+#endif
+
+ if (apic->setup_apic_routing)
+ apic->setup_apic_routing();
+}
+
+void setup_apic_flat_routing(void)
+{
#ifdef CONFIG_X86_IO_APIC
printk(KERN_INFO
"Enabling APIC mode: Flat. Using %d I/O APICs\n",
@@ -103,7 +128,7 @@ struct apic apic_default = {
.init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
- .setup_apic_routing = default_setup_apic_routing,
+ .setup_apic_routing = setup_apic_flat_routing,
.multi_timer_check = NULL,
.apicid_to_node = default_apicid_to_node,
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
file:c4cbd3080c1c36e0c798d10a4652427fa4e09603 -> file:4c56f544f1676a06ce9bca808d1c6fae943840d2
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,17 +67,8 @@ void __init default_setup_apic_routing(v
}
#endif
- if (apic == &apic_flat) {
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- if (num_processors > 8)
- apic = &apic_physflat;
- break;
- case X86_VENDOR_AMD:
- if (max_physical_apicid >= 8)
- apic = &apic_physflat;
- }
- }
+ if (apic == &apic_flat && num_possible_cpus() > 8)
+ apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
file:c910a716a71ce103b878154b24b2813917624716 -> file:3940fee7ea9fb3fa4f90eaee674c6ce32fa2ba16
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -254,59 +254,36 @@ static int __cpuinit nearby_node(int api
/*
* Fixup core topology information for AMD multi-node processors.
- * Assumption 1: Number of cores in each internal node is the same.
- * Assumption 2: Mixed systems with both single-node and dual-node
- * processors are not supported.
+ * Assumption: Number of cores in each internal node is the same.
*/
#ifdef CONFIG_X86_HT
static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_PCI
- u32 t, cpn;
- u8 n, n_id;
+ unsigned long long value;
+ u32 nodes, cores_per_node;
int cpu = smp_processor_id();
+ if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
+ return;
+
/* fixup topology information only once for a core */
if (cpu_has(c, X86_FEATURE_AMD_DCM))
return;
- /* check for multi-node processor on boot cpu */
- t = read_pci_config(0, 24, 3, 0xe8);
- if (!(t & (1 << 29)))
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+
+ nodes = ((value >> 3) & 7) + 1;
+ if (nodes == 1)
return;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
+ cores_per_node = c->x86_max_cores / nodes;
- /* cores per node: each internal node has half the number of cores */
- cpn = c->x86_max_cores >> 1;
-
- /* even-numbered NB_id of this dual-node processor */
- n = c->phys_proc_id << 1;
+ /* store NodeID, use llc_shared_map to store sibling info */
+ per_cpu(cpu_llc_id, cpu) = value & 7;
- /*
- * determine internal node id and assign cores fifty-fifty to
- * each node of the dual-node processor
- */
- t = read_pci_config(0, 24 + n, 3, 0xe8);
- n = (t>>30) & 0x3;
- if (n == 0) {
- if (c->cpu_core_id < cpn)
- n_id = 0;
- else
- n_id = 1;
- } else {
- if (c->cpu_core_id < cpn)
- n_id = 1;
- else
- n_id = 0;
- }
-
- /* compute entire NodeID, use llc_shared_map to store sibling info */
- per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
-
- /* fixup core id to be in range from 0 to cpn */
- c->cpu_core_id = c->cpu_core_id % cpn;
-#endif
+ /* fixup core id to be in range from 0 to (cores_per_node - 1) */
+ c->cpu_core_id = c->cpu_core_id % cores_per_node;
}
#endif
file:ab1cd30340f52d87806a0e3c0cecf8bccbf92bae -> file:5e92606c4cf8ecc58a8396549b5f6c3198bdd69d
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(st
powernow_table[i].index = index;
/* Frequency may be rounded for these */
- if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
+ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
+ || boot_cpu_data.x86 == 0x11) {
powernow_table[i].frequency =
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
} else
file:a2a03cf4a489966e793c9b14058f462d60a7e2eb -> file:2f12d6dc012e5ab8d6251a2485f6b3afb79643c3
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(s
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+ /*
+ * Atom erratum AAE44/AAF40/AAG38/AAH41:
+ *
+ * A race condition between speculative fetches and invalidating
+ * a large page. This is worked around in microcode, but we
+ * need the microcode to have already been loaded... so if it is
+ * not, recommend a BIOS update and disable large pages.
+ */
+ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
+ u32 ucode, junk;
+
+ wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+ sync_core();
+ rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
+
+ if (ucode < 0x20e) {
+ printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
+ clear_cpu_cap(c, X86_FEATURE_PSE);
+ }
+ }
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#else
@@ -70,7 +91,8 @@ static void __cpuinit early_init_intel(s
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- sched_clock_stable = 1;
+ if (!check_tsc_unstable())
+ sched_clock_stable = 1;
}
/*
file:8178d03529354951e9fc74c6214e1b7c1f619053 -> file:417990f04b5d2e66cd5d06efd35b8fc5d4a3ffd1
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -18,6 +18,7 @@
#include <asm/processor.h>
#include <linux/smp.h>
#include <asm/k8.h>
+#include <asm/smp.h>
#define LVL_1_INST 1
#define LVL_1_DATA 2
@@ -150,7 +151,8 @@ struct _cpuid4_info {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- unsigned long can_disable;
+ bool can_disable;
+ unsigned int l3_indices;
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
@@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- unsigned long can_disable;
+ bool can_disable;
+ unsigned int l3_indices;
};
unsigned short num_cache_leaves;
@@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_
(ebx->split.ways_of_associativity + 1) - 1;
}
+struct _cache_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct _cpuid4_info *, char *);
+ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+};
+
+#ifdef CONFIG_CPU_SUP_AMD
+static unsigned int __cpuinit amd_calc_l3_indices(void)
+{
+ /*
+ * We're called over smp_call_function_single() and therefore
+ * are on the correct cpu.
+ */
+ int cpu = smp_processor_id();
+ int node = cpu_to_node(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned int sc0, sc1, sc2, sc3;
+ u32 val = 0;
+
+ pci_read_config_dword(dev, 0x1C4, &val);
+
+ /* calculate subcache sizes */
+ sc0 = !(val & BIT(0));
+ sc1 = !(val & BIT(4));
+ sc2 = !(val & BIT(8)) + !(val & BIT(9));
+ sc3 = !(val & BIT(12)) + !(val & BIT(13));
+
+ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+}
+
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
@@ -299,13 +332,108 @@ amd_check_l3_disable(int index, struct _
if (boot_cpu_data.x86 == 0x11)
return;
- /* see erratum #382 */
- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
+ /* see errata #382 and #388 */
+ if ((boot_cpu_data.x86 == 0x10) &&
+ ((boot_cpu_data.x86_model < 0x8) ||
+ (boot_cpu_data.x86_mask < 0x1)))
return;
- this_leaf->can_disable = 1;
+ /* not in virtualized environments */
+ if (num_k8_northbridges == 0)
+ return;
+
+ this_leaf->can_disable = true;
+ this_leaf->l3_indices = amd_calc_l3_indices();
+}
+
+static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int index)
+{
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+ int node = amd_get_nb_id(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned int reg = 0;
+
+ if (!this_leaf->can_disable)
+ return -EINVAL;
+
+ if (!dev)
+ return -EINVAL;
+
+ pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+ return sprintf(buf, "0x%08x\n", reg);
+}
+
+#define SHOW_CACHE_DISABLE(index) \
+static ssize_t \
+show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+{ \
+ return show_cache_disable(this_leaf, buf, index); \
+}
+SHOW_CACHE_DISABLE(0)
+SHOW_CACHE_DISABLE(1)
+
+static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
+ const char *buf, size_t count, unsigned int index)
+{
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+ int node = amd_get_nb_id(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned long val = 0;
+
+#define SUBCACHE_MASK (3UL << 20)
+#define SUBCACHE_INDEX 0xfff
+
+ if (!this_leaf->can_disable)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ /* do not allow writes outside of allowed bits */
+ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
+ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+ return -EINVAL;
+
+ val |= BIT(30);
+ pci_write_config_dword(dev, 0x1BC + index * 4, val);
+ /*
+ * We need to WBINVD on a core on the node containing the L3 cache which
+ * indices we disable therefore a simple wbinvd() is not sufficient.
+ */
+ wbinvd_on_cpu(cpu);
+ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+ return count;
}
+#define STORE_CACHE_DISABLE(index) \
+static ssize_t \
+store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+ const char *buf, size_t count) \
+{ \
+ return store_cache_disable(this_leaf, buf, count, index); \
+}
+STORE_CACHE_DISABLE(0)
+STORE_CACHE_DISABLE(1)
+
+static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
+ show_cache_disable_0, store_cache_disable_0);
+static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+ show_cache_disable_1, store_cache_disable_1);
+
+#else /* CONFIG_CPU_SUP_AMD */
+static void __cpuinit
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
+{
+};
+#endif /* CONFIG_CPU_SUP_AMD */
+
static int
__cpuinit cpuid4_cache_lookup_regs(int index,
struct _cpuid4_info_regs *this_leaf)
@@ -523,18 +651,19 @@ static void __cpuinit cache_shared_cpu_m
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
- int index_msb, i;
+ int index_msb, i, sibling;
struct cpuinfo_x86 *c = &cpu_data(cpu);
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
- struct cpuinfo_x86 *d;
- for_each_online_cpu(i) {
+ for_each_cpu(i, c->llc_shared_map) {
if (!per_cpu(cpuid4_info, i))
continue;
- d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
- cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
- d->llc_shared_map);
+ for_each_cpu(sibling, c->llc_shared_map) {
+ if (!cpu_online(sibling))
+ continue;
+ set_bit(sibling, this_leaf->shared_cpu_map);
+ }
}
return;
}
@@ -726,82 +855,6 @@ static ssize_t show_type(struct _cpuid4_
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int index)
-{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
- unsigned int reg = 0;
-
- if (!this_leaf->can_disable)
- return -EINVAL;
-
- if (!dev)
- return -EINVAL;
-
- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
- return sprintf(buf, "%x\n", reg);
-}
-
-#define SHOW_CACHE_DISABLE(index) \
-static ssize_t \
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
-{ \
- return show_cache_disable(this_leaf, buf, index); \
-}
-SHOW_CACHE_DISABLE(0)
-SHOW_CACHE_DISABLE(1)
-
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count, unsigned int index)
-{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
- unsigned long val = 0;
- unsigned int scrubber = 0;
-
- if (!this_leaf->can_disable)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!dev)
- return -EINVAL;
-
- if (strict_strtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- val |= 0xc0000000;
-
- pci_read_config_dword(dev, 0x58, &scrubber);
- scrubber &= ~0x1f000000;
- pci_write_config_dword(dev, 0x58, scrubber);
-
- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
- wbinvd();
- pci_write_config_dword(dev, 0x1BC + index * 4, val);
- return count;
-}
-
-#define STORE_CACHE_DISABLE(index) \
-static ssize_t \
-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
- const char *buf, size_t count) \
-{ \
- return store_cache_disable(this_leaf, buf, count, index); \
-}
-STORE_CACHE_DISABLE(0)
-STORE_CACHE_DISABLE(1)
-
-struct _cache_attr {
- struct attribute attr;
- ssize_t (*show)(struct _cpuid4_info *, char *);
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
-};
-
#define define_one_ro(_name) \
static struct _cache_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
@@ -816,23 +869,28 @@ define_one_ro(size);
define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_list);
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
- show_cache_disable_0, store_cache_disable_0);
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
- show_cache_disable_1, store_cache_disable_1);
+#define DEFAULT_SYSFS_CACHE_ATTRS \
+ &type.attr, \
+ &level.attr, \
+ &coherency_line_size.attr, \
+ &physical_line_partition.attr, \
+ &ways_of_associativity.attr, \
+ &number_of_sets.attr, \
+ &size.attr, \
+ &shared_cpu_map.attr, \
+ &shared_cpu_list.attr
static struct attribute *default_attrs[] = {
- &type.attr,
- &level.attr,
- &coherency_line_size.attr,
- &physical_line_partition.attr,
- &ways_of_associativity.attr,
- &number_of_sets.attr,
- &size.attr,
- &shared_cpu_map.attr,
- &shared_cpu_list.attr,
+ DEFAULT_SYSFS_CACHE_ATTRS,
+ NULL
+};
+
+static struct attribute *default_l3_attrs[] = {
+ DEFAULT_SYSFS_CACHE_ATTRS,
+#ifdef CONFIG_CPU_SUP_AMD
&cache_disable_0.attr,
&cache_disable_1.attr,
+#endif
NULL
};
@@ -923,6 +981,7 @@ static int __cpuinit cache_add_dev(struc
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct _index_kobject *this_object;
+ struct _cpuid4_info *this_leaf;
int retval;
retval = cpuid4_cache_sysfs_init(cpu);
@@ -941,6 +1000,14 @@ static int __cpuinit cache_add_dev(struc
this_object = INDEX_KOBJECT_PTR(cpu, i);
this_object->cpu = cpu;
this_object->index = i;
+
+ this_leaf = CPUID4_INFO_IDX(cpu, i);
+
+ if (this_leaf->can_disable)
+ ktype_cache.default_attrs = default_l3_attrs;
+ else
+ ktype_cache.default_attrs = default_attrs;
+
retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache,
per_cpu(cache_kobject, cpu),
file:b5801c311846304f2fc2263732e93a6a83c07217 -> file:0ff02cae710b55bc190f3f921972bad704478aed
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -190,6 +190,97 @@ static u64 __read_mostly hw_cache_event_
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
+static const u64 westmere_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
+ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
+ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
+ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
+ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
+ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
static const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -914,8 +1005,11 @@ static int __hw_perf_event_init(struct p
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
- else
+ else {
err = reserve_bts_hardware();
+ if (err)
+ release_pmc_hardware();
+ }
}
if (!err)
atomic_inc(&active_events);
@@ -1999,6 +2093,7 @@ static int intel_pmu_init(void)
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
+
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
@@ -2009,7 +2104,9 @@ static int intel_pmu_init(void)
pr_cont("Core2 events, ");
break;
default:
- case 26:
+ case 26: /* 45 nm nehalem, "Bloomfield" */
+ case 30: /* 45 nm nehalem, "Lynnfield" */
+ case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2021,6 +2118,14 @@ static int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
+
+ case 37: /* 32 nm nehalem, "Clarkdale" */
+ case 44: /* 32 nm nehalem, "Gulftown" */
+ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ pr_cont("Westmere events, ");
+ break;
}
return 0;
}
file:5e409dc298a479d3f72e9a65990edb5fedc514da -> file:ff958248e61d7d48965c7b61c3cfdf32bc551f23
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,7 +27,6 @@
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
-#include <asm/iommu.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
@@ -104,10 +103,5 @@ void native_machine_crash_shutdown(struc
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
-
-#ifdef CONFIG_X86_64
- pci_iommu_shutdown();
-#endif
-
crash_save_cpu(regs, safe_smp_processor_id());
}
file:58778736496a37170a63e56019a040267098d183 -> file:19528ef10d311f6bbd31402edfa39a4ae253eaeb
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -385,11 +385,28 @@ static int hpet_next_event(unsigned long
hpet_writel(cnt, HPET_Tn_CMP(timer));
/*
- * We need to read back the CMP register to make sure that
- * what we wrote hit the chip before we compare it to the
- * counter.
+ * We need to read back the CMP register on certain HPET
+ * implementations (ATI chipsets) which seem to delay the
+ * transfer of the compare register into the internal compare
+ * logic. With small deltas this might actually be too late as
+ * the counter could already be higher than the compare value
+ * at that point and we would wait for the next hpet interrupt
+ * forever. We found out that reading the CMP register back
+ * forces the transfer so we can rely on the comparison with
+ * the counter register below. If the read back from the
+ * compare register does not match the value we programmed
+ * then we might have a real hardware problem. We can not do
+ * much about it here, but at least alert the user/admin with
+ * a prominent warning.
+ * An erratum on some chipsets (ICH9,..), results in comparator read
+ * immediately following a write returning old value. Workaround
+ * for this is to read this value second time, when first
+ * read returns old value.
*/
- WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
+ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+ WARN_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt,
+ KERN_WARNING "hpet: compare register read back failed.\n");
+ }
return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
}
@@ -932,7 +949,7 @@ fs_initcall(hpet_late_init);
void hpet_disable(void)
{
- if (is_hpet_capable()) {
+ if (is_hpet_capable() && hpet_virt_address) {
unsigned long cfg = hpet_readl(HPET_CFG);
if (hpet_legacy_int_enabled) {
file:cbc4332a77b25927947d18c69f47188465a29d3e -> file:9b895464dd0311f9c0c4619b5cf7e3f5e94dd2de
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -121,3 +121,17 @@ void k8_flush_garts(void)
}
EXPORT_SYMBOL_GPL(k8_flush_garts);
+static __init int init_k8_nbs(void)
+{
+ int err = 0;
+
+ err = cache_k8_northbridges();
+
+ if (err < 0)
+ printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
+
+ return err;
+}
+
+/* This has to go after the PCI subsystem */
+fs_initcall(init_k8_nbs);
file:5be95ef4ffec6baa9bd390119d389dceb94793ad -> file:e07bc4ef282912f3747d94a9402f911dd80198b2
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mp
x86_init.mpparse.mpc_record(1);
}
-#ifdef CONFIG_X86_BIGSMP
- generic_bigsmp_probe();
-#endif
-
- if (apic->setup_apic_routing)
- apic->setup_apic_routing();
-
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
file:e6ec8a2df1c368fa3a0cfd5b8db62076b1157c44 -> file:1a2d4b19eb23380babc97f401355a39d43dd79cf
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -102,11 +102,16 @@ int use_calgary __read_mostly = 0;
#define PMR_SOFTSTOPFAULT 0x40000000
#define PMR_HARDSTOP 0x20000000
-#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
-#define MAX_NUM_CHASSIS 8 /* max number of chassis */
-/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
-#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
-#define PHBS_PER_CALGARY 4
+/*
+ * The maximum PHB bus number.
+ * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
+ * x3950M2: 4 chassis, 48 PHBs per chassis = 192
+ * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
+ * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
+ */
+#define MAX_PHB_BUS_NUM 256
+
+#define PHBS_PER_CALGARY 4
/* register offsets in Calgary's internal register space */
static const unsigned long tar_offsets[] = {
@@ -1053,8 +1058,6 @@ static int __init calgary_init_one(struc
struct iommu_table *tbl;
int ret;
- BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
-
bbar = busno_to_bbar(dev->bus->number);
ret = calgary_setup_tar(dev, bbar);
if (ret)
file:fcc0b5c022c10cbe545c7f781cf25bcdd805e7f8 -> file:1c766915094e9babe9ef610388ef2b4775c1ca12
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -553,6 +553,9 @@ static void enable_gart_translations(voi
enable_gart_translation(dev, __pa(agp_gatt_table));
}
+
+ /* Flush the GART-TLB to remove stale entries */
+ k8_flush_garts();
}
/*
@@ -717,7 +720,7 @@ void __init gart_iommu_init(void)
unsigned long scratch;
long i;
- if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
+ if (num_k8_northbridges == 0)
return;
#ifndef CONFIG_AGP_AMD64
file:f010ab424f1f9ccc03b3f8ed7b4e759609ef733a -> file:5fd5b07bf3a5774cd996e312d9fe55533a37b984
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -439,21 +439,39 @@ static int __cpuinit mwait_usable(const
}
/*
- * Check for AMD CPUs, which have potentially C1E support
+ * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
+ * For more information see
+ * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
+ * - Erratum #365 for family 0x11 (not affected because C1e not in use)
*/
static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
{
+ u64 val;
if (c->x86_vendor != X86_VENDOR_AMD)
- return 0;
-
- if (c->x86 < 0x0F)
- return 0;
+ goto no_c1e_idle;
/* Family 0x0f models < rev F do not have C1E */
- if (c->x86 == 0x0f && c->x86_model < 0x40)
- return 0;
+ if (c->x86 == 0x0F && c->x86_model >= 0x40)
+ return 1;
+
+ if (c->x86 == 0x10) {
+ /*
+ * check OSVW bit for CPUs that are not affected
+ * by erratum #400
+ */
+ if (cpu_has(c, X86_FEATURE_OSVW)) {
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+ if (val >= 2) {
+ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+ if (!(val & BIT(1)))
+ goto no_c1e_idle;
+ }
+ }
+ return 1;
+ }
- return 1;
+no_c1e_idle:
+ return 0;
}
static cpumask_var_t c1e_mask;
file:f9ce04f610038ca14d91efbedb3a1055b0ac3129 -> file:868fdb407bb90c912c8cb91f8d2df5ad09a90885
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -295,11 +295,10 @@ int copy_thread(unsigned long clone_flag
set_tsk_thread_flag(p, TIF_FORK);
- p->thread.fs = me->thread.fs;
- p->thread.gs = me->thread.gs;
-
savesegment(gs, p->thread.gsindex);
+ p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
savesegment(fs, p->thread.fsindex);
+ p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
@@ -546,6 +545,7 @@ void set_personality_ia32(void)
/* Make sure to be in 32bit mode */
set_thread_flag(TIF_IA32);
+ current->personality |= force_personality32;
/* Prepare the first "return" to user space */
current_thread_info()->status |= TS_COMPAT;
file:03801f2f761fc312ba80353425c1cc7243a0105f -> file:dfdfe4662e0508073fbc72f946d006c1ac299497
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvc
return pv_tsc_khz;
}
+static atomic64_t last_value = ATOMIC64_INIT(0);
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
unsigned version;
cycle_t ret, offset;
+ u64 last;
do {
version = pvclock_get_time_values(&shadow, src);
@@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct
barrier();
} while (version != src->version);
+ /*
+ * Assumption here is that last_value, a global accumulator, always goes
+ * forward. If we are less than that, we should not be much smaller.
+ * We assume there is an error marging we're inside, and then the correction
+ * does not sacrifice accuracy.
+ *
+ * For reads: global may have changed between test and return,
+ * but this means someone else updated poked the clock at a later time.
+ * We just need to make sure we are not seeing a backwards event.
+ *
+ * For updates: last_value = ret is not enough, since two vcpus could be
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+ last = atomic64_read(&last_value);
+ do {
+ if (ret < last)
+ return last;
+ last = atomic64_cmpxchg(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
return ret;
}
file:bff34d68d9d1df826310d4296828e96b8000868d -> file:269c2a3cb43da0e0e41d2a578a6974ddcc7dc706
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata p
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
+ { /* Handle problems with rebooting on the iMac9,1. */
+ .callback = set_pci_reboot,
+ .ident = "Apple iMac9,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ },
+ },
{ }
};
file:8425f7ebe89e699a15316070f4068052977f25d6 -> file:d7a08884993893d4010311b0228956d21baa1089
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -688,6 +688,17 @@ static struct dmi_system_id __initdata b
DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
},
},
+ /*
+ * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
+ * match on the product name.
+ */
+ {
+ .callback = dmi_low_memory_corruption,
+ .ident = "Phoenix BIOS",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ },
#endif
{}
};
file:565ebc65920e3e685161758acb03c4f8106c6b40 -> file:28e963d2bf2ce33ad83500d289a54d6b1d3c1521
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1066,9 +1066,7 @@ void __init native_smp_prepare_cpus(unsi
set_cpu_sibling_map(0);
enable_IR_x2apic();
-#ifdef CONFIG_X86_64
default_setup_apic_routing();
-#endif
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
file:86c9f91b48aea8ef47f0e77372fcf2d5f822d77e -> file:46b827778d16601dab7a0ae3306a25d3c4812fbc
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -46,6 +46,7 @@
/* Global pointer to shared data; NULL means no measured launch. */
struct tboot *tboot __read_mostly;
+EXPORT_SYMBOL(tboot);
/* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
#define AP_WAIT_TIMEOUT 1
file:e02dbb670d5b56b826cf9394b25300b2dc49c54e -> file:1350e43cc02fa8fe298b3c607a9cdf68bf67f9b7
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -75,6 +75,7 @@
#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
#define GroupMask 0xff /* Group number stored in bits 0:7 */
+#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
/* Source 2 operand type */
#define Src2None (0<<29)
#define Src2CL (1<<29)
@@ -86,6 +87,7 @@
enum {
Group1_80, Group1_81, Group1_82, Group1_83,
Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
+ Group8, Group9,
};
static u32 opcode_table[256] = {
@@ -203,7 +205,7 @@ static u32 opcode_table[256] = {
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
- ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
+ ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
/* 0xF8 - 0xFF */
ImplicitOps, 0, ImplicitOps, ImplicitOps,
ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
@@ -211,16 +213,20 @@ static u32 opcode_table[256] = {
static u32 twobyte_table[256] = {
/* 0x00 - 0x0F */
- 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
- ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
+ 0, Group | GroupDual | Group7, 0, 0,
+ 0, ImplicitOps, ImplicitOps | Priv, 0,
+ ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
+ 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
/* 0x20 - 0x2F */
- ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
+ ModRM | ImplicitOps | Priv, ModRM | Priv,
+ ModRM | ImplicitOps | Priv, ModRM | Priv,
+ 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x30 - 0x3F */
- ImplicitOps, 0, ImplicitOps, 0,
- ImplicitOps, ImplicitOps, 0, 0,
+ ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
+ ImplicitOps, ImplicitOps | Priv, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x40 - 0x47 */
DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -258,11 +264,12 @@ static u32 twobyte_table[256] = {
0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
DstReg | SrcMem16 | ModRM | Mov,
/* 0xB8 - 0xBF */
- 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
+ 0, 0, Group | Group8, DstMem | SrcReg | ModRM | BitOp,
0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
DstReg | SrcMem16 | ModRM | Mov,
/* 0xC0 - 0xCF */
- 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
+ 0, 0, 0, DstMem | SrcReg | ModRM | Mov,
+ 0, 0, 0, Group | GroupDual | Group9,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xD0 - 0xDF */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -311,24 +318,39 @@ static u32 group_table[] = {
SrcMem | ModRM | Stack, 0,
SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
[Group7*8] =
- 0, 0, ModRM | SrcMem, ModRM | SrcMem,
+ 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
- SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
+ SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
+ [Group8*8] =
+ 0, 0, 0, 0,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ [Group9*8] =
+ 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0,
};
static u32 group2_table[] = {
[Group7*8] =
- SrcNone | ModRM, 0, 0, SrcNone | ModRM,
+ SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
SrcNone | ModRM | DstMem | Mov, 0,
SrcMem16 | ModRM | Mov, 0,
+ [Group9*8] =
+ 0, 0, 0, 0, 0, 0, 0, 0,
};
/* EFLAGS bit definitions. */
+#define EFLG_ID (1<<21)
+#define EFLG_VIP (1<<20)
+#define EFLG_VIF (1<<19)
+#define EFLG_AC (1<<18)
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
+#define EFLG_IOPL (3<<12)
+#define EFLG_NT (1<<14)
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
#define EFLG_IF (1<<9)
+#define EFLG_TF (1<<8)
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
@@ -597,7 +619,7 @@ static int do_fetch_insn_byte(struct x86
if (linear < fc->start || linear >= fc->end) {
size = min(15UL, PAGE_SIZE - offset_in_page(linear));
- rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
+ rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
if (rc)
return rc;
fc->start = linear;
@@ -652,11 +674,11 @@ static int read_descriptor(struct x86_em
op_bytes = 3;
*address = 0;
rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
- ctxt->vcpu);
+ ctxt->vcpu, NULL);
if (rc)
return rc;
rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
- ctxt->vcpu);
+ ctxt->vcpu, NULL);
return rc;
}
@@ -880,6 +902,7 @@ x86_decode_insn(struct x86_emulate_ctxt
switch (mode) {
case X86EMUL_MODE_REAL:
+ case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
@@ -1189,6 +1212,49 @@ static int emulate_pop(struct x86_emulat
return rc;
}
+static int emulate_popf(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ void *dest, int len)
+{
+ int rc;
+ unsigned long val, change_mask;
+ int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
+
+ rc = emulate_pop(ctxt, ops, &val, len);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
+ | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
+
+ switch(ctxt->mode) {
+ case X86EMUL_MODE_PROT64:
+ case X86EMUL_MODE_PROT32:
+ case X86EMUL_MODE_PROT16:
+ if (cpl == 0)
+ change_mask |= EFLG_IOPL;
+ if (cpl <= iopl)
+ change_mask |= EFLG_IF;
+ break;
+ case X86EMUL_MODE_VM86:
+ if (iopl < 3) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ change_mask |= EFLG_IF;
+ break;
+ default: /* real mode */
+ change_mask |= (EFLG_IOPL | EFLG_IF);
+ break;
+ }
+
+ *(unsigned long *)dest =
+ (ctxt->eflags & ~change_mask) | (val & change_mask);
+
+ return rc;
+}
+
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
@@ -1330,7 +1396,7 @@ static int emulate_ret_far(struct x86_em
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
if (rc)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS);
+ rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
return rc;
}
@@ -1438,7 +1504,7 @@ emulate_syscall(struct x86_emulate_ctxt
/* syscall is not available in real mode */
if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
- || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
+ || ctxt->mode == X86EMUL_MODE_VM86)
return -1;
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1490,9 +1556,8 @@ emulate_sysenter(struct x86_emulate_ctxt
if (c->lock_prefix)
return -1;
- /* inject #GP if in real mode or paging is disabled */
- if (ctxt->mode == X86EMUL_MODE_REAL ||
- !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
+ /* inject #GP if in real mode */
+ if (ctxt->mode == X86EMUL_MODE_REAL) {
kvm_inject_gp(ctxt->vcpu, 0);
return -1;
}
@@ -1556,15 +1621,9 @@ emulate_sysexit(struct x86_emulate_ctxt
if (c->lock_prefix)
return -1;
- /* inject #GP if in real mode or paging is disabled */
- if (ctxt->mode == X86EMUL_MODE_REAL
- || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
- kvm_inject_gp(ctxt->vcpu, 0);
- return -1;
- }
-
- /* sysexit must be called from CPL 0 */
- if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
+ /* inject #GP if in real mode or Virtual 8086 mode */
+ if (ctxt->mode == X86EMUL_MODE_REAL ||
+ ctxt->mode == X86EMUL_MODE_VM86) {
kvm_inject_gp(ctxt->vcpu, 0);
return -1;
}
@@ -1611,6 +1670,57 @@ emulate_sysexit(struct x86_emulate_ctxt
return 0;
}
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
+{
+ int iopl;
+ if (ctxt->mode == X86EMUL_MODE_REAL)
+ return false;
+ if (ctxt->mode == X86EMUL_MODE_VM86)
+ return true;
+ iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
+}
+
+static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 port, u16 len)
+{
+ struct kvm_segment tr_seg;
+ int r;
+ u16 io_bitmap_ptr;
+ u8 perm, bit_idx = port & 0x7;
+ unsigned mask = (1 << len) - 1;
+
+ kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR);
+ if (tr_seg.unusable)
+ return false;
+ if (tr_seg.limit < 103)
+ return false;
+ r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu,
+ NULL);
+ if (r != X86EMUL_CONTINUE)
+ return false;
+ if (io_bitmap_ptr + port/8 > tr_seg.limit)
+ return false;
+ r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1,
+ ctxt->vcpu, NULL);
+ if (r != X86EMUL_CONTINUE)
+ return false;
+ if ((perm >> bit_idx) & mask)
+ return false;
+ return true;
+}
+
+static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 port, u16 len)
+{
+ if (emulator_bad_iopl(ctxt))
+ if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
+ return false;
+ return true;
+}
+
int
x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
@@ -1632,6 +1742,12 @@ x86_emulate_insn(struct x86_emulate_ctxt
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
saved_eip = c->eip;
+ /* Privileged instruction can be executed only in CPL=0 */
+ if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+
if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
memop = c->modrm_ea;
@@ -1764,7 +1880,12 @@ special_insn:
break;
case 0x6c: /* insb */
case 0x6d: /* insw/insd */
- if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
+ if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
+ (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+ if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
1,
(c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ?
@@ -1780,6 +1901,11 @@ special_insn:
return 0;
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */
+ if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
+ (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
0,
(c->d & ByteOp) ? 1 : c->op_bytes,
@@ -1866,25 +1992,19 @@ special_insn:
break;
case 0x8e: { /* mov seg, r/m16 */
uint16_t sel;
- int type_bits;
- int err;
sel = c->src.val;
- if (c->modrm_reg == VCPU_SREG_SS)
- toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
- if (c->modrm_reg <= 5) {
- type_bits = (c->modrm_reg == 1) ? 9 : 1;
- err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
- type_bits, c->modrm_reg);
- } else {
- printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
- c->modrm);
- goto cannot_emulate;
+ if (c->modrm_reg == VCPU_SREG_CS ||
+ c->modrm_reg > VCPU_SREG_GS) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
}
- if (err < 0)
- goto cannot_emulate;
+ if (c->modrm_reg == VCPU_SREG_SS)
+ toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
+
+ rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
@@ -1913,7 +2033,10 @@ special_insn:
c->dst.type = OP_REG;
c->dst.ptr = (unsigned long *) &ctxt->eflags;
c->dst.bytes = c->op_bytes;
- goto pop_instruction;
+ rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ break;
case 0xa0 ... 0xa1: /* mov */
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
c->dst.val = c->src.val;
@@ -2051,11 +2174,9 @@ special_insn:
case 0xe9: /* jmp rel */
goto jmp;
case 0xea: /* jmp far */
- if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9,
- VCPU_SREG_CS) < 0) {
- DPRINTF("jmp far: Failed to load CS descriptor\n");
- goto cannot_emulate;
- }
+ if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
+ VCPU_SREG_CS))
+ goto done;
c->eip = c->src.val;
break;
@@ -2073,7 +2194,13 @@ special_insn:
case 0xef: /* out (e/r)ax,dx */
port = c->regs[VCPU_REGS_RDX];
io_dir_in = 0;
- do_io: if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in,
+ do_io:
+ if (!emulator_io_permited(ctxt, ops, port,
+ (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+ if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in,
(c->d & ByteOp) ? 1 : c->op_bytes,
port) != 0) {
c->eip = saved_eip;
@@ -2098,13 +2225,21 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xfa: /* cli */
- ctxt->eflags &= ~X86_EFLAGS_IF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ if (emulator_bad_iopl(ctxt))
+ kvm_inject_gp(ctxt->vcpu, 0);
+ else {
+ ctxt->eflags &= ~X86_EFLAGS_IF;
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ }
break;
case 0xfb: /* sti */
- toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
- ctxt->eflags |= X86_EFLAGS_IF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ if (emulator_bad_iopl(ctxt))
+ kvm_inject_gp(ctxt->vcpu, 0);
+ else {
+ toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+ ctxt->eflags |= X86_EFLAGS_IF;
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ }
break;
case 0xfc: /* cld */
ctxt->eflags &= ~EFLG_DF;
file:3a01519a49f2033074d6b8e1fb7c7cc415e34936 -> file:fdf2e28f3bc6080b0a219039bb6472c8c8ee2d70
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -136,12 +136,6 @@ module_param(oos_shadow, bool, 0644);
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
-#define PFERR_PRESENT_MASK (1U << 0)
-#define PFERR_WRITE_MASK (1U << 1)
-#define PFERR_USER_MASK (1U << 2)
-#define PFERR_RSVD_MASK (1U << 3)
-#define PFERR_FETCH_MASK (1U << 4)
-
#define PT_PDPE_LEVEL 3
#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1
@@ -227,7 +221,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
-static int is_write_protection(struct kvm_vcpu *vcpu)
+static bool is_write_protection(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_WP;
}
@@ -1502,8 +1496,8 @@ static int mmu_zap_unsync_children(struc
for_each_sp(pages, sp, parents, i) {
kvm_mmu_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents);
+ zapped++;
}
- zapped += pages.nr;
kvm_mmu_pages_init(parent, &parents, &pages);
}
@@ -1554,14 +1548,16 @@ void kvm_mmu_change_mmu_pages(struct kvm
*/
if (used_pages > kvm_nr_mmu_pages) {
- while (used_pages > kvm_nr_mmu_pages) {
+ while (used_pages > kvm_nr_mmu_pages &&
+ !list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ used_pages -= kvm_mmu_zap_page(kvm, page);
used_pages--;
}
+ kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
}
else
@@ -1608,7 +1604,8 @@ static void mmu_unshadow(struct kvm *kvm
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- kvm_mmu_zap_page(kvm, sp);
+ if (kvm_mmu_zap_page(kvm, sp))
+ nn = bucket->first;
}
}
}
@@ -1639,7 +1636,7 @@ struct page *gva_to_page(struct kvm_vcpu
{
struct page *page;
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+ gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
if (gpa == UNMAPPED_GVA)
return NULL;
@@ -1846,6 +1843,9 @@ static int set_spte(struct kvm_vcpu *vcp
spte |= PT_WRITABLE_MASK;
+ if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
+ spte &= ~PT_USER_MASK;
+
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
@@ -1901,6 +1901,8 @@ static void mmu_set_spte(struct kvm_vcpu
child = page_header(pte & PT64_BASE_ADDR_MASK);
mmu_page_remove_parent_pte(child, sptep);
+ __set_spte(sptep, shadow_trap_nonpresent_pte);
+ kvm_flush_remote_tlbs(vcpu->kvm);
} else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %lx new %lx\n",
spte_to_pfn(*sptep), pfn);
@@ -2094,11 +2096,13 @@ static int mmu_alloc_roots(struct kvm_vc
direct = 1;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root;
return 0;
}
@@ -2120,11 +2124,14 @@ static int mmu_alloc_roots(struct kvm_vc
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
@@ -2162,8 +2169,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu
spin_unlock(&vcpu->kvm->mmu_lock);
}
-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
+static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
+ u32 access, u32 *error)
{
+ if (error)
+ *error = 0;
return vaddr;
}
@@ -2445,6 +2455,7 @@ static int init_kvm_softmmu(struct kvm_v
r = paging32_init_context(vcpu);
vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
+ vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
return r;
}
@@ -2484,7 +2495,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
goto out;
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
r = mmu_alloc_roots(vcpu);
+ spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
@@ -2747,7 +2760,7 @@ int kvm_mmu_unprotect_page_virt(struct k
if (tdp_enabled)
return 0;
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+ gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -3245,7 +3258,7 @@ static void audit_mappings_page(struct k
if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
audit_mappings_page(vcpu, ent, va, level - 1);
else {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
+ gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
gfn_t gfn = gpa >> PAGE_SHIFT;
pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
file:61a1b3884b4954b1173cc5c21aced330acb2c938 -> file:bac752946368401322556902000ac6717622bc5c
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -37,6 +37,12 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+#define PFERR_PRESENT_MASK (1U << 0)
+#define PFERR_WRITE_MASK (1U << 1)
+#define PFERR_USER_MASK (1U << 2)
+#define PFERR_RSVD_MASK (1U << 3)
+#define PFERR_FETCH_MASK (1U << 4)
+
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
file:5fa33255348c9fb010191f41cf987e1c4406f917 -> file:8faa821d1257522d9fe05f4dce5ab737c21cfbbd
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -491,18 +491,23 @@ static void FNAME(invlpg)(struct kvm_vcp
spin_unlock(&vcpu->kvm->mmu_lock);
}
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
+static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
+ u32 *error)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
- r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
+ r = FNAME(walk_addr)(&walker, vcpu, vaddr,
+ !!(access & PFERR_WRITE_MASK),
+ !!(access & PFERR_USER_MASK),
+ !!(access & PFERR_FETCH_MASK));
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
- }
+ } else if (error)
+ *error = walker.error_code;
return gpa;
}
file:c17404add91febfd66d12570057a12695e972d9b -> file:61ba66988aca6fcd0ec2226d51fb704c6c1703bb
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/ftrace_event.h>
+#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/virtext.h>
@@ -62,6 +63,8 @@ MODULE_LICENSE("GPL");
#define nsvm_printk(fmt, args...) do {} while(0)
#endif
+static bool erratum_383_found __read_mostly;
+
static const u32 host_save_user_msrs[] = {
#ifdef CONFIG_X86_64
MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
@@ -299,6 +302,31 @@ static void skip_emulated_instruction(st
svm_set_interrupt_shadow(vcpu, 0);
}
+static void svm_init_erratum_383(void)
+{
+ u32 low, high;
+ int err;
+ u64 val;
+
+ /* Only Fam10h is affected */
+ if (boot_cpu_data.x86 != 0x10)
+ return;
+
+ /* Use _safe variants to not break nested virtualization */
+ val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
+ if (err)
+ return;
+
+ val |= (1ULL << 47);
+
+ low = lower_32_bits(val);
+ high = upper_32_bits(val);
+
+ native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
+
+ erratum_383_found = true;
+}
+
static int has_svm(void)
{
const char *msg;
@@ -318,7 +346,6 @@ static void svm_hardware_disable(void *g
static void svm_hardware_enable(void *garbage)
{
-
struct svm_cpu_data *svm_data;
uint64_t efer;
struct descriptor_table gdt_descr;
@@ -350,6 +377,10 @@ static void svm_hardware_enable(void *ga
wrmsrl(MSR_VM_HSAVE_PA,
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+
+ svm_init_erratum_383();
+
+ return;
}
static void svm_cpu_uninit(int cpu)
@@ -625,11 +656,12 @@ static void init_vmcb(struct vcpu_svm *s
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
- /*
- * cr0 val on cpu init should be 0x60000010, we enable cpu
- * cache by default. the orderly way is to enable cache in bios.
+ /* This is the guest-visible cr0 value.
+ * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
- save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
+ svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+ kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
+
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
@@ -693,29 +725,28 @@ static struct kvm_vcpu *svm_create_vcpu(
if (err)
goto free_svm;
+ err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- err = -ENOMEM;
+ if (!page)
goto uninit;
- }
- err = -ENOMEM;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
- goto uninit;
+ goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
- goto uninit;
-
- svm->msrpm = page_address(msrpm_pages);
- svm_vcpu_init_msrpm(svm->msrpm);
+ goto free_page2;
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
- goto uninit;
+ goto free_page3;
+
svm->nested.hsave = page_address(hsave_page);
+ svm->msrpm = page_address(msrpm_pages);
+ svm_vcpu_init_msrpm(svm->msrpm);
+
svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page);
@@ -732,6 +763,12 @@ static struct kvm_vcpu *svm_create_vcpu(
return &svm->vcpu;
+free_page3:
+ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+ __free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
@@ -1251,8 +1288,59 @@ static int nm_interception(struct vcpu_s
return 1;
}
-static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+static bool is_erratum_383(void)
{
+ int err, i;
+ u64 value;
+
+ if (!erratum_383_found)
+ return false;
+
+ value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
+ if (err)
+ return false;
+
+ /* Bit 62 may or may not be set for this mce */
+ value &= ~(1ULL << 62);
+
+ if (value != 0xb600000000010015ULL)
+ return false;
+
+ /* Clear MCi_STATUS registers */
+ for (i = 0; i < 6; ++i)
+ native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
+
+ value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
+ if (!err) {
+ u32 low, high;
+
+ value &= ~(1ULL << 2);
+ low = lower_32_bits(value);
+ high = upper_32_bits(value);
+
+ native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
+ }
+
+ /* Flush tlb to evict multi-match entries */
+ __flush_tlb_all();
+
+ return true;
+}
+
+static void svm_handle_mce(struct vcpu_svm *svm)
+{
+ if (is_erratum_383()) {
+ /*
+ * Erratum 383 triggered. Guest state is corrupt so kill the
+ * guest.
+ */
+ pr_err("KVM: Guest triggered AMD Erratum 383\n");
+
+ set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
+
+ return;
+ }
+
/*
* On an #MC intercept the MCE handler is not called automatically in
* the host. So do it by hand here.
@@ -1261,6 +1349,11 @@ static int mc_interception(struct vcpu_s
"int $0x12\n");
/* not sure if we ever come back to this point */
+ return;
+}
+
+static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
return 1;
}
@@ -2711,6 +2804,14 @@ static void svm_vcpu_run(struct kvm_vcpu
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
}
+
+ /*
+ * We need to handle MC intercepts here before the vcpu has a chance to
+ * change the physical cpu
+ */
+ if (unlikely(svm->vmcb->control.exit_code ==
+ SVM_EXIT_EXCP_BASE + MC_VECTOR))
+ svm_handle_mce(svm);
}
#undef R
file:ed53b42caba119bb7b488efdf79170b44ba922e4 -> file:6a28d5d95e8fe88f7a5ee7f90ac0b2ae950ec55a
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/ftrace_event.h>
+#include <linux/tboot.h>
#include "kvm_cache_regs.h"
#include "x86.h"
@@ -61,6 +62,8 @@ module_param_named(unrestricted_guest,
static int __read_mostly emulate_invalid_guest_state = 0;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -92,7 +95,7 @@ struct vcpu_vmx {
} host_state;
struct {
int vm86_active;
- u8 save_iopl;
+ ulong save_rflags;
struct kvm_save_segment {
u16 selector;
unsigned long base;
@@ -783,18 +786,23 @@ static void vmx_fpu_deactivate(struct kv
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
- unsigned long rflags;
+ unsigned long rflags, save_rflags;
rflags = vmcs_readl(GUEST_RFLAGS);
- if (to_vmx(vcpu)->rmode.vm86_active)
- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ }
return rflags;
}
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- if (to_vmx(vcpu)->rmode.vm86_active)
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+ }
vmcs_writel(GUEST_RFLAGS, rflags);
}
@@ -1133,9 +1141,16 @@ static __init int vmx_disabled_by_bios(v
u64 msr;
rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
- return (msr & (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
- == FEATURE_CONTROL_LOCKED;
+ if (msr & FEATURE_CONTROL_LOCKED) {
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+ && tboot_enabled())
+ return 1;
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && !tboot_enabled())
+ return 1;
+ }
+
+ return 0;
/* locked but not enabled */
}
@@ -1143,18 +1158,20 @@ static void hardware_enable(void *garbag
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
- u64 old;
+ u64 old, test_bits;
INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
- if ((old & (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
- != (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
+
+ test_bits = FEATURE_CONTROL_LOCKED;
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ if (tboot_enabled())
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
+
+ if ((old & test_bits) != test_bits) {
/* enable and lock */
- wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
- FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED);
+ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
+ }
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
@@ -1431,8 +1448,8 @@ static void enter_pmode(struct kvm_vcpu
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1501,8 +1518,7 @@ static void enter_rmode(struct kvm_vcpu
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vmx->rmode.save_iopl
- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ vmx->rmode.save_rflags = flags;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
@@ -2302,8 +2318,10 @@ static int vmx_vcpu_setup(struct vcpu_vm
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
if (vmx->vpid == 0)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
- if (!enable_ept)
+ if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ enable_unrestricted_guest = 0;
+ }
if (!enable_unrestricted_guest)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
@@ -2510,7 +2528,7 @@ static int vmx_vcpu_reset(struct kvm_vcp
if (vmx->vpid != 0)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
- vmx->vcpu.arch.cr0 = 0x60000010;
+ vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
vmx_set_cr4(&vmx->vcpu, 0);
vmx_set_efer(&vmx->vcpu, 0);
@@ -2674,6 +2692,12 @@ static int handle_rmode_exception(struct
kvm_queue_exception(vcpu, vec);
return 1;
case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject the exception
+ * from user space while in guest debugging mode.
+ */
+ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return 0;
/* fall through */
@@ -2790,6 +2814,13 @@ static int handle_exception(struct kvm_v
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject #BP from
+ * user space while in guest debugging mode. Reading it for
+ * #DB as well causes no harm, it is not used in that case.
+ */
+ vmx->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
file:e78d9907e0eef7719b4b52be2bb37217e2ab8f45 -> file:281ac630d246f82d6b70583d31baa846c8128a50
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -297,21 +297,16 @@ out:
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (cr0 & CR0_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
- cr0, vcpu->arch.cr0);
kvm_inject_gp(vcpu, 0);
return;
}
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
kvm_inject_gp(vcpu, 0);
return;
}
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
- "and a clear PE flag\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -322,15 +317,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu,
int cs_db, cs_l;
if (!is_pae(vcpu)) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while PAE is disabled\n");
kvm_inject_gp(vcpu, 0);
return;
}
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while CS.L == 1\n");
kvm_inject_gp(vcpu, 0);
return;
@@ -338,8 +329,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu,
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -356,7 +345,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+ kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
@@ -366,28 +355,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu,
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE)) {
- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
- "in long mode\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (cr4 & X86_CR4_VMXE) {
- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -408,21 +392,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu,
if (is_long_mode(vcpu)) {
if (cr3 & CR3_L_MODE_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS) {
- printk(KERN_DEBUG
- "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -454,7 +433,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
kvm_inject_gp(vcpu, 0);
return;
}
@@ -505,53 +483,42 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE,
};
-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & efer_reserved_bits) {
- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
- efer);
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (efer & efer_reserved_bits)
+ return 1;
if (is_paging(vcpu)
- && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME))
+ return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
+ return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
+ return 1;
}
- kvm_x86_ops->set_efer(vcpu, efer);
-
efer &= ~EFER_LMA;
efer |= vcpu->arch.shadow_efer & EFER_LMA;
+ kvm_x86_ops->set_efer(vcpu, efer);
+
vcpu->arch.shadow_efer = efer;
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
kvm_mmu_reset_context(vcpu);
+
+ return 0;
}
void kvm_enable_efer_bits(u64 mask)
@@ -581,14 +548,22 @@ static int do_set_msr(struct kvm_vcpu *v
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
- static int version;
+ int version;
+ int r;
struct pvclock_wall_clock wc;
struct timespec boot;
if (!wall_clock)
return;
- version++;
+ r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
+ if (r)
+ return;
+
+ if (version & 1)
+ ++version; /* first time write, random junk */
+
+ ++version;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
@@ -826,9 +801,13 @@ static int set_msr_mce(struct kvm_vcpu *
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
- /* only 0 or all 1s can be written to IA32_MCi_CTL */
+ /* only 0 or all 1s can be written to IA32_MCi_CTL
+ * some Linux kernels though clear bit 10 in bank 4 to
+ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+ * this to avoid an uncatched #GP in the guest
+ */
if ((offset & 0x3) == 0 &&
- data != 0 && data != ~(u64)0)
+ data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
vcpu->arch.mce_banks[offset] = data;
break;
@@ -842,8 +821,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
{
switch (msr) {
case MSR_EFER:
- set_efer(vcpu, data);
- break;
+ return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
if (data != 0) {
@@ -1242,8 +1220,8 @@ int kvm_dev_ioctl_check_extension(long e
case KVM_CAP_NR_MEMSLOTS:
r = KVM_MEMORY_SLOTS;
break;
- case KVM_CAP_PV_MMU:
- r = !tdp_enabled;
+ case KVM_CAP_PV_MMU: /* obsolete */
+ r = 0;
break;
case KVM_CAP_IOMMU:
r = iommu_found();
@@ -1435,6 +1413,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
{
int r;
+ vcpu_load(vcpu);
r = -E2BIG;
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
@@ -1446,6 +1425,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
out:
cpuid->nent = vcpu->arch.cpuid_nent;
+ vcpu_put(vcpu);
return r;
}
@@ -1695,6 +1675,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(
int r;
unsigned bank_num = mcg_cap & 0xff, bank;
+ vcpu_load(vcpu);
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
@@ -1709,6 +1690,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
out:
+ vcpu_put(vcpu);
return r;
}
@@ -1911,7 +1893,9 @@ long kvm_arch_vcpu_ioctl(struct file *fi
r = -EFAULT;
if (copy_from_user(&mce, argp, sizeof mce))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
+ vcpu_put(vcpu);
break;
}
default:
@@ -2156,7 +2140,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
struct kvm_dirty_log *log)
{
int r;
- int n;
+ unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
@@ -2172,7 +2156,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
memslot = &kvm->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
@@ -2505,14 +2489,41 @@ static int vcpu_mmio_read(struct kvm_vcp
return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
}
-static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ access |= PFERR_FETCH_MASK;
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ access |= PFERR_WRITE_MASK;
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+/* uses this to access any guest's mapped memory without checking CPL */
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
+}
+
+static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 access,
+ u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -2535,14 +2546,37 @@ out:
return r;
}
+/* used for instruction fetching */
+static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
+ access | PFERR_FETCH_MASK, error);
+}
+
+static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 *error)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+ error);
+}
+
+static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 *error)
+{
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+}
+
static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu)
+ struct kvm_vcpu *vcpu, u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -2572,6 +2606,7 @@ static int emulator_read_emulated(unsign
struct kvm_vcpu *vcpu)
{
gpa_t gpa;
+ u32 error_code;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -2581,17 +2616,20 @@ static int emulator_read_emulated(unsign
return X86EMUL_CONTINUE;
}
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
+
+ if (gpa == UNMAPPED_GVA) {
+ kvm_inject_page_fault(vcpu, addr, error_code);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto mmio;
- if (kvm_read_guest_virt(addr, val, bytes, vcpu)
+ if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
== X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- if (gpa == UNMAPPED_GVA)
- return X86EMUL_PROPAGATE_FAULT;
mmio:
/*
@@ -2630,11 +2668,12 @@ static int emulator_write_emulated_onepa
struct kvm_vcpu *vcpu)
{
gpa_t gpa;
+ u32 error_code;
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
if (gpa == UNMAPPED_GVA) {
- kvm_inject_page_fault(vcpu, addr, 2);
+ kvm_inject_page_fault(vcpu, addr, error_code);
return X86EMUL_PROPAGATE_FAULT;
}
@@ -2698,7 +2737,7 @@ static int emulator_cmpxchg_emulated(uns
char *kaddr;
u64 val;
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -2777,7 +2816,7 @@ void kvm_report_emulation_failure(struct
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
- kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
+ kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -2785,7 +2824,8 @@ void kvm_report_emulation_failure(struct
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
static struct x86_emulate_ops emulate_ops = {
- .read_std = kvm_read_guest_virt,
+ .read_std = kvm_read_guest_virt_system,
+ .fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated,
@@ -2828,8 +2868,9 @@ int emulate_instruction(struct kvm_vcpu
vcpu->arch.emulate_ctxt.vcpu = vcpu;
vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
vcpu->arch.emulate_ctxt.mode =
+ (!(vcpu->arch.cr0 & X86_CR0_PE)) ? X86EMUL_MODE_REAL :
(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
- ? X86EMUL_MODE_REAL : cs_l
+ ? X86EMUL_MODE_VM86 : cs_l
? X86EMUL_MODE_PROT64 : cs_db
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
@@ -2921,12 +2962,17 @@ static int pio_copy_data(struct kvm_vcpu
gva_t q = vcpu->arch.pio.guest_gva;
unsigned bytes;
int ret;
+ u32 error_code;
bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
if (vcpu->arch.pio.in)
- ret = kvm_write_guest_virt(q, p, bytes, vcpu);
+ ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
else
- ret = kvm_read_guest_virt(q, p, bytes, vcpu);
+ ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
+
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(vcpu, q, error_code);
+
return ret;
}
@@ -2947,7 +2993,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
if (io->in) {
r = pio_copy_data(vcpu);
if (r)
- return r;
+ goto out;
}
delta = 1;
@@ -2974,7 +3020,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, VCPU_REGS_RSI, val);
}
}
-
+out:
io->count -= io->cur_count;
io->cur_count = 0;
@@ -3017,6 +3063,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcp
{
unsigned long val;
+ trace_kvm_pio(!in, port, size, 1);
+
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3028,9 +3076,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcp
vcpu->arch.pio.down = 0;
vcpu->arch.pio.rep = 0;
- trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
- size, 1);
-
val = kvm_register_read(vcpu, VCPU_REGS_RAX);
memcpy(vcpu->arch.pio_data, &val, 4);
@@ -3049,6 +3094,8 @@ int kvm_emulate_pio_string(struct kvm_vc
unsigned now, in_page;
int ret = 0;
+ trace_kvm_pio(!in, port, size, count);
+
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3060,9 +3107,6 @@ int kvm_emulate_pio_string(struct kvm_vc
vcpu->arch.pio.down = down;
vcpu->arch.pio.rep = rep;
- trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
- size, count);
-
if (!count) {
kvm_x86_ops->skip_emulated_instruction(vcpu);
return 1;
@@ -3094,10 +3138,8 @@ int kvm_emulate_pio_string(struct kvm_vc
if (!vcpu->arch.pio.in) {
/* string PIO write */
ret = pio_copy_data(vcpu);
- if (ret == X86EMUL_PROPAGATE_FAULT) {
- kvm_inject_gp(vcpu, 0);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
return 1;
- }
if (ret == 0 && !pio_string_write(vcpu)) {
complete_pio(vcpu);
if (vcpu->arch.pio.count == 0)
@@ -4077,7 +4119,9 @@ static int load_guest_segment_descriptor
kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
return 1;
}
- return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+ return kvm_read_guest_virt_system(dtable.base + index*8,
+ seg_desc, sizeof(*seg_desc),
+ vcpu, NULL);
}
/* allowed just for 8 bytes segments */
@@ -4091,15 +4135,23 @@ static int save_guest_segment_descriptor
if (dtable.limit < index * 8 + 7)
return 1;
- return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+ return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
}
-static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
+static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc)
+{
+ u32 base_addr = get_desc_base(seg_desc);
+
+ return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
+}
+
+static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc)
{
u32 base_addr = get_desc_base(seg_desc);
- return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
+ return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
}
static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -4110,18 +4162,6 @@ static u16 get_segment_selector(struct k
return kvm_seg.selector;
}
-static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
- u16 selector,
- struct kvm_segment *kvm_seg)
-{
- struct desc_struct seg_desc;
-
- if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
- return 1;
- seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
- return 0;
-}
-
static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
{
struct kvm_segment segvar = {
@@ -4139,7 +4179,7 @@ static int kvm_load_realmode_segment(str
.unusable = 0,
};
kvm_x86_ops->set_segment(vcpu, &segvar, seg);
- return 0;
+ return X86EMUL_CONTINUE;
}
static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
@@ -4149,24 +4189,113 @@ static int is_vm86_segment(struct kvm_vc
(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
}
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- int type_bits, int seg)
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
{
struct kvm_segment kvm_seg;
+ struct desc_struct seg_desc;
+ u8 dpl, rpl, cpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+ int ret;
if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
return kvm_load_realmode_segment(vcpu, selector, seg);
- if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
- return 1;
- kvm_seg.type |= type_bits;
- if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
- seg != VCPU_SREG_LDTR)
- if (!kvm_seg.s)
- kvm_seg.unusable = 1;
+ /* NULL selector is not valid for TR, CS and SS */
+ if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+ && null_selector)
+ goto exception;
+
+ /* TR should be in GDT only */
+ if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+ goto exception;
+
+ ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
+ if (ret)
+ return ret;
+
+ seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
+
+ if (null_selector) { /* for NULL selector skip all following checks */
+ kvm_seg.unusable = 1;
+ goto load;
+ }
+
+ err_code = selector & 0xfffc;
+ err_vec = GP_VECTOR;
+
+ /* can't load system descriptor into segment selecor */
+ if (seg <= VCPU_SREG_GS && !kvm_seg.s)
+ goto exception;
+
+ if (!kvm_seg.present) {
+ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
+ goto exception;
+ }
+
+ rpl = selector & 3;
+ dpl = kvm_seg.dpl;
+ cpl = kvm_x86_ops->get_cpl(vcpu);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+ /*
+ * segment is not a writable data segment or segment
+ * selector's RPL != CPL or segment selector's RPL != CPL
+ */
+ if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
+ goto exception;
+ break;
+ case VCPU_SREG_CS:
+ if (!(kvm_seg.type & 8))
+ goto exception;
+
+ if (kvm_seg.type & 4) {
+ /* conforming */
+ if (dpl > cpl)
+ goto exception;
+ } else {
+ /* nonconforming */
+ if (rpl > cpl || dpl != cpl)
+ goto exception;
+ }
+ /* CS(RPL) <- CPL */
+ selector = (selector & 0xfffc) | cpl;
+ break;
+ case VCPU_SREG_TR:
+ if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
+ goto exception;
+ break;
+ case VCPU_SREG_LDTR:
+ if (kvm_seg.s || kvm_seg.type != 2)
+ goto exception;
+ break;
+ default: /* DS, ES, FS, or GS */
+ /*
+ * segment is not a data or readable code segment or
+ * ((segment is a data or nonconforming code segment)
+ * and (both RPL and CPL > DPL))
+ */
+ if ((kvm_seg.type & 0xa) == 0x8 ||
+ (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
+ goto exception;
+ break;
+ }
+
+ if (!kvm_seg.unusable && kvm_seg.s) {
+ /* mark segment as accessed */
+ kvm_seg.type |= 1;
+ seg_desc.type |= 1;
+ save_guest_segment_descriptor(vcpu, selector, &seg_desc);
+ }
+load:
kvm_set_segment(vcpu, &kvm_seg, seg);
- return 0;
+ return X86EMUL_CONTINUE;
+exception:
+ kvm_queue_exception_e(vcpu, err_vec, err_code);
+ return X86EMUL_PROPAGATE_FAULT;
}
static void save_state_to_tss32(struct kvm_vcpu *vcpu,
@@ -4192,6 +4321,14 @@ static void save_state_to_tss32(struct k
tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
}
+static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
+{
+ struct kvm_segment kvm_seg;
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ kvm_seg.selector = sel;
+ kvm_set_segment(vcpu, &kvm_seg, seg);
+}
+
static int load_state_from_tss32(struct kvm_vcpu *vcpu,
struct tss_segment_32 *tss)
{
@@ -4209,25 +4346,41 @@ static int load_state_from_tss32(struct
kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
- if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
+ kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
+ kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
+ kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
+ kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
+ kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
+ kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+ if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+ if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
return 1;
return 0;
}
@@ -4268,19 +4421,33 @@ static int load_state_from_tss16(struct
kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
- if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
+ kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
+ kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
+ kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
+ kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
return 1;
- if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
return 1;
return 0;
}
@@ -4302,7 +4469,7 @@ static int kvm_task_switch_16(struct kvm
sizeof tss_segment_16))
goto out;
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
&tss_segment_16, sizeof tss_segment_16))
goto out;
@@ -4310,7 +4477,7 @@ static int kvm_task_switch_16(struct kvm
tss_segment_16.prev_task_link = old_tss_sel;
if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr(vcpu, nseg_desc),
+ get_tss_base_addr_write(vcpu, nseg_desc),
&tss_segment_16.prev_task_link,
sizeof tss_segment_16.prev_task_link))
goto out;
@@ -4341,7 +4508,7 @@ static int kvm_task_switch_32(struct kvm
sizeof tss_segment_32))
goto out;
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
&tss_segment_32, sizeof tss_segment_32))
goto out;
@@ -4349,7 +4516,7 @@ static int kvm_task_switch_32(struct kvm
tss_segment_32.prev_task_link = old_tss_sel;
if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr(vcpu, nseg_desc),
+ get_tss_base_addr_write(vcpu, nseg_desc),
&tss_segment_32.prev_task_link,
sizeof tss_segment_32.prev_task_link))
goto out;
@@ -4371,8 +4538,9 @@ int kvm_task_switch(struct kvm_vcpu *vcp
int ret = 0;
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+ u32 desc_limit;
- old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
+ old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
/* FIXME: Handle errors. Failure to read either TSS or their
* descriptors should generate a pagefault.
@@ -4393,7 +4561,10 @@ int kvm_task_switch(struct kvm_vcpu *vcp
}
}
- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+ desc_limit = get_desc_limit(&nseg_desc);
+ if (!nseg_desc.p ||
+ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+ desc_limit < 0x2b)) {
kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
return 1;
}
@@ -4581,7 +4752,7 @@ int kvm_arch_vcpu_ioctl_translate(struct
vcpu_load(vcpu);
down_read(&vcpu->kvm->slots_lock);
- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
+ gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
up_read(&vcpu->kvm->slots_lock);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
file:c2b6f395a022bfd881e37c2523925df7dd0731d8 -> file:ac2d426ea35fc867f680e27e6e389c1799bed26d
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for x86 specific library files.
#
-obj-$(CONFIG_SMP) += msr-smp.o
+obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o
lib-y += thunk_$(BITS).o
@@ -26,4 +26,5 @@ else
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
endif
file:a3c66887503848cb6a9c5ac628e4bd6d1408eb0b(new)
--- /dev/null
+++ b/arch/x86/lib/cache-smp.c
@@ -0,0 +1,19 @@
+#include <linux/smp.h>
+#include <linux/module.h>
+
+static void __wbinvd(void *dummy)
+{
+ wbinvd();
+}
+
+void wbinvd_on_cpu(int cpu)
+{
+ smp_call_function_single(cpu, __wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL(wbinvd_on_cpu);
+
+int wbinvd_on_all_cpus(void)
+{
+ return on_each_cpu(__wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL(wbinvd_on_all_cpus);
file:15acecf0d7aa0c4768493d79dedfdf518a647707(new)
--- /dev/null
+++ b/arch/x86/lib/rwsem_64.S
@@ -0,0 +1,81 @@
+/*
+ * x86-64 rwsem wrappers
+ *
+ * This interfaces the inline asm code to the slow-path
+ * C routines. We need to save the call-clobbered regs
+ * that the asm does not mark as clobbered, and move the
+ * argument from %rax to %rdi.
+ *
+ * NOTE! We don't need to save %rax, because the functions
+ * will always return the semaphore pointer in %rax (which
+ * is also the input argument to these helpers)
+ *
+ * The following can clobber %rdx because the asm clobbers it:
+ * call_rwsem_down_write_failed
+ * call_rwsem_wake
+ * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
+ */
+
+#include <linux/linkage.h>
+#include <asm/rwlock.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+#define save_common_regs \
+ pushq %rdi; \
+ pushq %rsi; \
+ pushq %rcx; \
+ pushq %r8; \
+ pushq %r9; \
+ pushq %r10; \
+ pushq %r11
+
+#define restore_common_regs \
+ popq %r11; \
+ popq %r10; \
+ popq %r9; \
+ popq %r8; \
+ popq %rcx; \
+ popq %rsi; \
+ popq %rdi
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_down_read_failed)
+ save_common_regs
+ pushq %rdx
+ movq %rax,%rdi
+ call rwsem_down_read_failed
+ popq %rdx
+ restore_common_regs
+ ret
+ ENDPROC(call_rwsem_down_read_failed)
+
+ENTRY(call_rwsem_down_write_failed)
+ save_common_regs
+ movq %rax,%rdi
+ call rwsem_down_write_failed
+ restore_common_regs
+ ret
+ ENDPROC(call_rwsem_down_write_failed)
+
+ENTRY(call_rwsem_wake)
+ decw %dx /* do nothing if still outstanding active readers */
+ jnz 1f
+ save_common_regs
+ movq %rax,%rdi
+ call rwsem_wake
+ restore_common_regs
+1: ret
+ ENDPROC(call_rwsem_wake)
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_downgrade_wake)
+ save_common_regs
+ pushq %rdx
+ movq %rax,%rdi
+ call rwsem_downgrade_wake
+ popq %rdx
+ restore_common_regs
+ ret
+ ENDPROC(call_rwsem_downgrade_wake)
file:ed34f5e35999449a488be43ced0e580319b373f1 -> file:c9ba9deafe83f30daaae9b0e998c3de715671d43
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -6,6 +6,14 @@
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#ifdef CONFIG_HIGHPTE
+#define PGALLOC_USER_GFP __GFP_HIGHMEM
+#else
+#define PGALLOC_USER_GFP 0
+#endif
+
+gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
+
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return (pte_t *)__get_free_page(PGALLOC_GFP);
@@ -15,16 +23,29 @@ pgtable_t pte_alloc_one(struct mm_struct
{
struct page *pte;
-#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
-#else
- pte = alloc_pages(PGALLOC_GFP, 0);
-#endif
+ pte = alloc_pages(__userpte_alloc_gfp, 0);
if (pte)
pgtable_page_ctor(pte);
return pte;
}
+static int __init setup_userpte(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /*
+ * "userpte=nohigh" disables allocation of user pagetables in
+ * high memory.
+ */
+ if (strcmp(arg, "nohigh") == 0)
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+ else
+ return -EINVAL;
+ return 0;
+}
+early_param("userpte", setup_userpte);
+
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
pgtable_page_dtor(pte);
file:3347f696edc77d4692f7b0b0fdb774677b573cfa -> file:f1fb411d9e4fa68948c2e5997759e3e47f69a06c
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struc
static void nmi_cpu_start(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->start(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->start(msrs);
}
static int nmi_start(void)
@@ -107,7 +110,10 @@ static int nmi_start(void)
static void nmi_cpu_stop(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->stop(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->stop(msrs);
}
static void nmi_stop(void)
@@ -159,7 +165,7 @@ static int nmi_setup_mux(void)
for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).multiplex =
- kmalloc(multiplex_size, GFP_KERNEL);
+ kzalloc(multiplex_size, GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).multiplex)
return 0;
}
@@ -179,7 +185,6 @@ static void nmi_cpu_setup_mux(int cpu, s
if (counter_config[i].enabled) {
multiplex[i].saved = -(u64)counter_config[i].count;
} else {
- multiplex[i].addr = 0;
multiplex[i].saved = 0;
}
}
@@ -189,25 +194,27 @@ static void nmi_cpu_setup_mux(int cpu, s
static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
{
+ struct op_msr *counters = msrs->counters;
struct op_msr *multiplex = msrs->multiplex;
int i;
for (i = 0; i < model->num_counters; ++i) {
int virt = op_x86_phys_to_virt(i);
- if (multiplex[virt].addr)
- rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
+ if (counters[i].addr)
+ rdmsrl(counters[i].addr, multiplex[virt].saved);
}
}
static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
{
+ struct op_msr *counters = msrs->counters;
struct op_msr *multiplex = msrs->multiplex;
int i;
for (i = 0; i < model->num_counters; ++i) {
int virt = op_x86_phys_to_virt(i);
- if (multiplex[virt].addr)
- wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
+ if (counters[i].addr)
+ wrmsrl(counters[i].addr, multiplex[virt].saved);
}
}
@@ -303,11 +310,11 @@ static int allocate_msrs(void)
int i;
for_each_possible_cpu(i) {
- per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
+ per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).counters)
return 0;
- per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
+ per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).controls)
return 0;
file:39686c29f03a2d52774dc47cc616544b47e1e426 -> file:1ed963d2e9b6730c99517cbc1a44a0eaf3e47e8c
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -76,19 +76,6 @@ static struct op_ibs_config ibs_config;
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-static void op_mux_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
- int hw_counter = op_x86_virt_to_phys(i);
- if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
- msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
- else
- msrs->multiplex[i].addr = 0;
- }
-}
-
static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
struct op_msrs const * const msrs)
{
@@ -98,7 +85,7 @@ static void op_mux_switch_ctrl(struct op
/* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) {
int virt = op_x86_phys_to_virt(i);
- if (!counter_config[virt].enabled)
+ if (!reset_value[virt])
continue;
rdmsrl(msrs->controls[i].addr, val);
val &= model->reserved;
@@ -107,10 +94,6 @@ static void op_mux_switch_ctrl(struct op
}
}
-#else
-
-static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
-
#endif
/* functions for op_amd_spec */
@@ -122,18 +105,12 @@ static void op_amd_fill_in_addresses(str
for (i = 0; i < NUM_COUNTERS; i++) {
if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
- else
- msrs->counters[i].addr = 0;
}
for (i = 0; i < NUM_CONTROLS; i++) {
if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
- else
- msrs->controls[i].addr = 0;
}
-
- op_mux_fill_in_addresses(msrs);
}
static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
@@ -144,7 +121,8 @@ static void op_amd_setup_ctrs(struct op_
/* setup reset_value */
for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
- if (counter_config[i].enabled)
+ if (counter_config[i].enabled
+ && msrs->counters[op_x86_virt_to_phys(i)].addr)
reset_value[i] = counter_config[i].count;
else
reset_value[i] = 0;
@@ -169,9 +147,7 @@ static void op_amd_setup_ctrs(struct op_
/* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) {
int virt = op_x86_phys_to_virt(i);
- if (!counter_config[virt].enabled)
- continue;
- if (!msrs->counters[i].addr)
+ if (!reset_value[virt])
continue;
/* setup counter registers */
@@ -405,16 +381,6 @@ static int init_ibs_nmi(void)
return 1;
}
-#ifdef CONFIG_NUMA
- /* Sanity check */
- /* Works only for 64bit with proper numa implementation. */
- if (nodes != num_possible_nodes()) {
- printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
- "found: %d, expected %d",
- nodes, num_possible_nodes());
- return 1;
- }
-#endif
return 0;
}
file:ac6b354becdfe9f5859aa7930cc6b2aedb9f17e2 -> file:e6a160a4684a4a9d96914acc161122d6c7f8c208
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -394,12 +394,6 @@ static void p4_fill_in_addresses(struct
setup_num_counters();
stag = get_stagger();
- /* initialize some registers */
- for (i = 0; i < num_counters; ++i)
- msrs->counters[i].addr = 0;
- for (i = 0; i < num_controls; ++i)
- msrs->controls[i].addr = 0;
-
/* the counter & cccr registers we pay attention to */
for (i = 0; i < num_counters; ++i) {
addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
file:8eb05878554cf382a0cf482947a8ebf7a93fdeeb -> file:2873c0087836e647ecc68bf9a785ce02e295f344
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -37,15 +37,11 @@ static void ppro_fill_in_addresses(struc
for (i = 0; i < num_counters; i++) {
if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
- else
- msrs->counters[i].addr = 0;
}
for (i = 0; i < num_counters; i++) {
if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
- else
- msrs->controls[i].addr = 0;
}
}
@@ -57,7 +53,7 @@ static void ppro_setup_ctrs(struct op_x8
int i;
if (!reset_value) {
- reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
+ reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
GFP_ATOMIC);
if (!reset_value)
return;
file:0696d506c4ade99b0d43232128a77cea99d65ec8 -> file:b02f6d8ac922f171b5b31be1923aa9e358e73bca
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -590,6 +590,8 @@ static __init int intel_router_probe(str
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
+ case PCI_DEVICE_ID_INTEL_CPT_LPC1:
+ case PCI_DEVICE_ID_INTEL_CPT_LPC2:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
file:8aa85f17667e5034cfd2fae3eecd217c878d0529 -> file:eeeb52276271e1e3d8759e0d59100ae9eff91405
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -104,6 +104,8 @@ static void __save_processor_state(struc
ctxt->cr4 = read_cr4();
ctxt->cr8 = read_cr8();
#endif
+ ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
+ &ctxt->misc_enable);
}
/* Needed by apm.c */
@@ -176,6 +178,8 @@ static void fix_processor_context(void)
*/
static void __restore_processor_state(struct saved_context *ctxt)
{
+ if (ctxt->misc_enable_saved)
+ wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
* control registers
*/
file:b641388d8286acfcf73bc6f7ca850609270739b3 -> file:ad47daeafa4eace0e505c51aae54d43317d62233
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
ret
ENTRY(restore_image)
+ movl mmu_cr4_features, %ecx
movl resume_pg_dir, %eax
subl $__PAGE_OFFSET, %eax
movl %eax, %cr3
+ jecxz 1f # cr4 Pentium and higher, skip if zero
+ andl $~(X86_CR4_PGE), %ecx
+ movl %ecx, %cr4; # turn off PGE
+ movl %cr3, %eax; # flush TLB
+ movl %eax, %cr3
+1:
movl restore_pblist, %edx
.p2align 4,,7
@@ -54,16 +61,8 @@ done:
movl $swapper_pg_dir, %eax
subl $__PAGE_OFFSET, %eax
movl %eax, %cr3
- /* Flush TLB, including "global" things (vmalloc) */
movl mmu_cr4_features, %ecx
jecxz 1f # cr4 Pentium and higher, skip if zero
- movl %ecx, %edx
- andl $~(X86_CR4_PGE), %edx
- movl %edx, %cr4; # turn off PGE
-1:
- movl %cr3, %eax; # flush TLB
- movl %eax, %cr3
- jecxz 1f # cr4 Pentium and higher, skip if zero
movl %ecx, %cr4; # turn PGE back on
1:
file:79f97383cde3be99da33f092c7604876be896dab -> file:3578688ab670e4298e569021ed8a4495edf74568
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -48,6 +48,7 @@
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
@@ -1092,6 +1093,12 @@ asmlinkage void __init xen_start_kernel(
__supported_pte_mask |= _PAGE_IOMAP;
+ /*
+ * Prevent page tables from being allocated in highmem, even
+ * if CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
#ifdef CONFIG_X86_64
/* Work out if we support NX */
check_efer();
file:bf4cd6bfe959f1037431668c71a263050098f35a -> file:350a3deedf25496dc12571dd950c6aca0f740ff6
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1432,14 +1432,15 @@ static void *xen_kmap_atomic_pte(struct
{
pgprot_t prot = PAGE_KERNEL;
+ /*
+ * We disable highmem allocations for page tables so we should never
+ * see any calls to kmap_atomic_pte on a highmem page.
+ */
+ BUG_ON(PageHighMem(page));
+
if (PagePinned(page))
prot = PAGE_KERNEL_RO;
- if (0 && PageHighMem(page))
- printk("mapping highpte %lx type %d prot %s\n",
- page_to_pfn(page), type,
- (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
-
return kmap_atomic_prot(page, type, prot);
}
#endif
file:987267f79bf5154648cb729cedf29e712fa58ab3 -> file:a9c6611080347bd4368beff2ab582d07347fa256
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void
void xen_arch_resume(void)
{
- smp_call_function(xen_vcpu_notify_restore,
- (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
+ on_each_cpu(xen_vcpu_notify_restore,
+ (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
}
file:f04c9891142fa7a5090d966ac4779b0af419ebca -> file:ed8cd3cbd4993de9619f1e6db12af3c26b166c9b
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -29,5 +29,6 @@
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#endif /* _XTENSA_CACHE_H */
file:943f2abac9b44fa6cab26fc763c1939702e7a6e8 -> file:ce038d861eb9d2706e4474e725b5c6f4eadc4a00
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -324,6 +324,7 @@ struct dma_async_tx_descriptor *
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
+ void *scribble = submit->scribble;
int non_zero_srcs, i;
BUG_ON(faila == failb);
@@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
- /* we need to preserve the contents of 'blocks' for the async
- * case, so punt to synchronous if a scribble buffer is not available
+ /* if a dma resource is not available or a scribble buffer is not
+ * available punt to the synchronous path. In the 'dma not
+ * available' case be sure to use the scribble buffer to
+ * preserve the content of 'blocks' as the caller intended.
*/
- if (!submit->scribble) {
- void **ptrs = (void **) blocks;
+ if (!async_dma_find_channel(DMA_PQ) || !scribble) {
+ void **ptrs = scribble ? scribble : (void **) blocks;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
@@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
- /* we need to preserve the contents of 'blocks' for the async
- * case, so punt to synchronous if a scribble buffer is not available
+ /* if a dma resource is not available or a scribble buffer is not
+ * available punt to the synchronous path. In the 'dma not
+ * available' case be sure to use the scribble buffer to
+ * preserve the content of 'blocks' as the caller intended.
*/
- if (!scribble) {
- void **ptrs = (void **) blocks;
+ if (!async_dma_find_channel(DMA_PQ) || !scribble) {
+ void **ptrs = scribble ? scribble : (void **) blocks;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
file:52fec07064f0be7581aa72160d829b521959bfbe -> file:83b62521d8d300fff08b105177b1f0bd2be5dd3e
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(str
acpi_ut_add_reference(obj_desc->field.region_obj);
+ /* allow full data read from EC address space */
+ if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_EC) {
+ if (obj_desc->common_field.bit_length > 8) {
+ unsigned width =
+ ACPI_ROUND_BITS_UP_TO_BYTES(
+ obj_desc->common_field.bit_length);
+ // access_bit_width is u8, don't overflow it
+ if (width > 8)
+ width = 8;
+ obj_desc->common_field.access_byte_width =
+ width;
+ obj_desc->common_field.access_bit_width =
+ 8 * width;
+ }
+ }
+
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
file:f1670e0ef9bb7fb7836ee30af5769bb9430f6f50 -> file:45d2aa93258ed3881a33c13fa58d83fb49821e33
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -588,12 +588,12 @@ static u32 acpi_ec_gpe_handler(void *dat
static acpi_status
acpi_ec_space_handler(u32 function, acpi_physical_address address,
- u32 bits, acpi_integer *value,
+ u32 bits, acpi_integer *value64,
void *handler_context, void *region_context)
{
struct acpi_ec *ec = handler_context;
- int result = 0, i;
- u8 temp = 0;
+ int result = 0, i, bytes = bits / 8;
+ u8 *value = (u8 *)value64;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
@@ -601,32 +601,15 @@ acpi_ec_space_handler(u32 function, acpi
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
- if (bits != 8 && acpi_strict)
- return AE_BAD_PARAMETER;
-
- if (EC_FLAGS_MSI)
+ if (EC_FLAGS_MSI || bits > 8)
acpi_ec_burst_enable(ec);
- if (function == ACPI_READ) {
- result = acpi_ec_read(ec, address, &temp);
- *value = temp;
- } else {
- temp = 0xff & (*value);
- result = acpi_ec_write(ec, address, temp);
- }
-
- for (i = 8; unlikely(bits - i > 0); i += 8) {
- ++address;
- if (function == ACPI_READ) {
- result = acpi_ec_read(ec, address, &temp);
- (*value) |= ((acpi_integer)temp) << i;
- } else {
- temp = 0xff & ((*value) >> i);
- result = acpi_ec_write(ec, address, temp);
- }
- }
+ for (i = 0; i < bytes; ++i, ++address, ++value)
+ result = (function == ACPI_READ) ?
+ acpi_ec_read(ec, address, value) :
+ acpi_ec_write(ec, address, *value);
- if (EC_FLAGS_MSI)
+ if (EC_FLAGS_MSI || bits > 8)
acpi_ec_burst_disable(ec);
switch (result) {
file:2ef7030a0c28b7d428de422d6cda082a36ec8723 -> file:c2160626131833ea46f88957065e3b93b6c0621e
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -34,7 +34,7 @@
#define ACPI_POWER_METER_NAME "power_meter"
ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
#define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
-#define ACPI_POWER_METER_CLASS "power_meter_resource"
+#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
#define NUM_SENSORS 17
file:d9f78f6cbda31f1002f2aa5390605f29c7c57bd3 -> file:a6ad608c96a2f050ccaa5adbe19921d48621fc8e
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -888,12 +888,14 @@ static int acpi_idle_enter_simple(struct
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
+ if (cx->entry_method != ACPI_CSTATE_FFH) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+ }
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
@@ -960,7 +962,7 @@ static int acpi_idle_enter_bm(struct cpu
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
- if (acpi_idle_bm_check()) {
+ if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (dev->safe_state) {
dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
@@ -973,12 +975,14 @@ static int acpi_idle_enter_bm(struct cpu
}
local_irq_disable();
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
+ if (cx->entry_method != ACPI_CSTATE_FFH) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+ }
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
file:5f2c379ab7bfba1903158f5c3f94cf37631a6c44 -> file:045809465347afeffce70a7e60075b7e30235cc9
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,6 +80,7 @@ static int acpi_sleep_prepare(u32 acpi_s
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -170,18 +171,6 @@ static void acpi_pm_end(void)
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
extern void do_suspend_lowlevel(void);
static u32 acpi_suspend_states[] = {
@@ -248,11 +237,8 @@ static int acpi_suspend_enter(suspend_st
break;
}
- /* If ACPI is not enabled by the BIOS, we need to enable it here. */
- if (set_sci_en_on_resume)
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- else
- acpi_enable();
+ /* This violates the spec but is required for bug compatibility. */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -341,12 +327,6 @@ static int __init init_old_suspend_order
return 0;
}
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
- set_sci_en_on_resume = true;
- return 0;
-}
-
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -365,22 +345,6 @@ static struct dmi_system_id __initdata a
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacMini 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
@@ -389,62 +353,6 @@ static struct dmi_system_id __initdata a
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP G7000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
file:f336bca7c4503ec1d03d7741ee8185afae942381 -> file:8a0ed2800e6359a49f781b41d2afc44cd183225b
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
- if (acpi_disabled)
+ if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, ac
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
- if (acpi_disabled)
+ if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
file:575593a8b4e66b2f6132f6541178c1d68498bbcc -> file:e7b960602b663806d01e888c4d41cdeff9c3e760
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *s
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
file:9b375028318a6806f220a8e35ff646adb038cae6 -> file:cb05205da586192689b0dcce1ed3ea7aa04035d4
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -570,6 +570,12 @@ static const struct pci_device_id ahci_p
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -2831,6 +2837,14 @@ static bool ahci_broken_suspend(struct p
* On HP dv[4-6] and HDX18 with earlier BIOSen, link
* to the harddisk doesn't become online after
* resuming from STR. Warn and fail suspend.
+ *
+ * http://bugzilla.kernel.org/show_bug.cgi?id=12276
+ *
+ * Use dates instead of versions to match as HP is
+ * apparently recycling both product and version
+ * strings.
+ *
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15462
*/
{
.ident = "dv4",
@@ -2839,7 +2853,7 @@ static bool ahci_broken_suspend(struct p
DMI_MATCH(DMI_PRODUCT_NAME,
"HP Pavilion dv4 Notebook PC"),
},
- .driver_data = "F.30", /* cutoff BIOS version */
+ .driver_data = "20090105", /* F.30 */
},
{
.ident = "dv5",
@@ -2848,7 +2862,7 @@ static bool ahci_broken_suspend(struct p
DMI_MATCH(DMI_PRODUCT_NAME,
"HP Pavilion dv5 Notebook PC"),
},
- .driver_data = "F.16", /* cutoff BIOS version */
+ .driver_data = "20090506", /* F.16 */
},
{
.ident = "dv6",
@@ -2857,7 +2871,7 @@ static bool ahci_broken_suspend(struct p
DMI_MATCH(DMI_PRODUCT_NAME,
"HP Pavilion dv6 Notebook PC"),
},
- .driver_data = "F.21", /* cutoff BIOS version */
+ .driver_data = "20090423", /* F.21 */
},
{
.ident = "HDX18",
@@ -2866,7 +2880,7 @@ static bool ahci_broken_suspend(struct p
DMI_MATCH(DMI_PRODUCT_NAME,
"HP HDX18 Notebook PC"),
},
- .driver_data = "F.23", /* cutoff BIOS version */
+ .driver_data = "20090430", /* F.23 */
},
/*
* Acer eMachines G725 has the same problem. BIOS
@@ -2874,6 +2888,8 @@ static bool ahci_broken_suspend(struct p
* work. Inbetween, there are V1.06, V2.06 and V3.03
* that we don't have much idea about. For now,
* blacklist anything older than V3.04.
+ *
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15104
*/
{
.ident = "G725",
@@ -2881,19 +2897,21 @@ static bool ahci_broken_suspend(struct p
DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
},
- .driver_data = "V3.04", /* cutoff BIOS version */
+ .driver_data = "20091216", /* V3.04 */
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
- const char *ver;
+ int year, month, date;
+ char buf[9];
if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
return false;
- ver = dmi_get_system_info(DMI_BIOS_VERSION);
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
- return !ver || strcmp(ver, dmi->driver_data) < 0;
+ return strcmp(buf, dmi->driver_data) < 0;
}
static bool ahci_broken_online(struct pci_dev *pdev)
@@ -3074,8 +3092,16 @@ static int ahci_init_one(struct pci_dev
ahci_save_initial_config(pdev, hpriv);
/* prepare host */
- if (hpriv->cap & HOST_CAP_NCQ)
- pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
+ if (hpriv->cap & HOST_CAP_NCQ) {
+ pi.flags |= ATA_FLAG_NCQ;
+ /* Auto-activate optimization is supposed to be supported on
+ all AHCI controllers indicating NCQ support, but it seems
+ to be broken at least on some NVIDIA MCP79 chipsets.
+ Until we get info on which NVIDIA chipsets don't have this
+ issue, if any, disable AA on all NVIDIA AHCIs. */
+ if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ pi.flags |= ATA_FLAG_FPDMA_AA;
+ }
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
file:0c6155f51173484a66bf80b5e5efe6f1f1fc464c -> file:4f94e2275038d497b2c8acfb8931f516f849d826
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -291,6 +291,14 @@ static const struct pci_device_id piix_p
{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
file:91fed3c93d660a8e7d2934e21142c3d1a8dfa6a2 -> file:6a96da69c482abef92f6c0cc7ce0d2113706bd30
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -159,6 +159,10 @@ int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
+static int atapi_an;
+module_param(atapi_an, int, 0444);
+MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -2570,7 +2574,8 @@ int ata_dev_configure(struct ata_device
* to enable ATAPI AN to discern between PHY status
* changed notifications and ATAPI ANs.
*/
- if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+ if (atapi_an &&
+ (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
(!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
unsigned int err_mask;
@@ -4348,6 +4353,9 @@ static const struct ata_blacklist_entry
{ "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
{ "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
+ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
+
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
file:7d8d3c3b4c80b09dbf5f9d7742094cffd2f212c6 -> file:e30b9e74794b2966571b0fb660544bd329304c65
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -870,6 +870,8 @@ static void ata_eh_set_pending(struct at
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
WARN_ON(!ap->ops->error_handler);
@@ -881,7 +883,9 @@ void ata_qc_schedule_eh(struct ata_queue
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
+ spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
@@ -1615,6 +1619,7 @@ void ata_eh_analyze_ncq_error(struct ata
}
/* okay, this error is ours */
+ memset(&tf, 0, sizeof(tf));
rc = ata_eh_read_log_10h(dev, &tag, &tf);
if (rc) {
ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
file:2ae15c3b22a7356ba89f08005b761d6e74f5b6f5 -> file:776a89599448331bae591b3986a429608d7ad4e0
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,7 +893,7 @@ static void ata_pio_sector(struct ata_qu
do_write);
}
- if (!do_write)
+ if (!do_write && !PageSlab(page))
flush_dcache_page(page);
qc->curbytes += qc->sect_size;
file:1432dc9d0ab819d899ac3a506606a6778b146f30 -> file:9434114b2ca8cb6f42e40d4ed2091360cd2add9e
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -453,7 +453,9 @@ static void ali_init_chipset(struct pci_
/* Clear CD-ROM DMA write bit */
tmp &= 0x7F;
/* Cable and UDMA */
- pci_write_config_byte(pdev, 0x4B, tmp | 0x09);
+ if (pdev->revision >= 0xc2)
+ tmp |= 0x01;
+ pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
/*
* CD_ROM DMA on (0x53 bit 0). Enable this even if we want
* to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
file:d16e87e29189a64ae2e88aaec8b887f37d174183 -> file:d9f2913f159b3a086b6d1ce6c3d4d59bd6292bab
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.8"
+#define DRV_VERSION "0.3.9"
enum {
HPT_PCI_FAST = (1 << 31),
@@ -547,16 +547,16 @@ static int hpt3x2n_init_one(struct pci_d
pci_mhz);
/* Set our private data up. We only need a few flags so we use
it directly */
- if (pci_mhz > 60) {
+ if (pci_mhz > 60)
hpriv = (void *)(PCI66 | USE_DPLL);
- /*
- * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
- * the MISC. register to stretch the UltraDMA Tss timing.
- * NOTE: This register is only writeable via I/O space.
- */
- if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
- outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
- }
+
+ /*
+ * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+ * the MISC. register to stretch the UltraDMA Tss timing.
+ * NOTE: This register is only writeable via I/O space.
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
+ outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
/* Now kick off ATA set up */
return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv);
file:88984b803d6dab2c2137c8203ad1669f8719ceff -> file:1d73b8d236ed5401024e755477313685a04d9297
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -661,6 +661,7 @@ static const struct pci_device_id via[]
{ PCI_VDEVICE(VIA, 0x3164), },
{ PCI_VDEVICE(VIA, 0x5324), },
{ PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
+ { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
{ },
};
file:1eb4e020eb5ce3be2ab78cf2f3760f40a41f3b20 -> file:ae2297cd29d886e87d784193765e6cbc6c4abaee
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1673,7 +1673,6 @@ static void nv_mcp55_freeze(struct ata_p
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask &= ~(NV_INT_ALL_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
- ata_sff_freeze(ap);
}
static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1687,7 +1686,6 @@ static void nv_mcp55_thaw(struct ata_por
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask |= (NV_INT_MASK_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
- ata_sff_thaw(ap);
}
static void nv_adma_error_handler(struct ata_port *ap)
@@ -2478,8 +2476,7 @@ static int nv_init_one(struct pci_dev *p
}
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
- IRQF_SHARED, ipriv->sht);
+ return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
}
#ifdef CONFIG_PM
file:02efd9a83d26a06df084d5fffeaf1860d7aa859f -> file:e35596b97853bd624c95c2d9ff8f399def6fb533
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -558,6 +558,19 @@ static void svia_configure(struct pci_de
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
+
+ /*
+ * vt6421 has problems talking to some drives. The following
+ * is the magic fix from Joseph Chan <JosephChan@via.com.tw>.
+ * Please add proper documentation if possible.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15173
+ */
+ if (pdev->device == 0x3249) {
+ pci_read_config_byte(pdev, 0x52, &tmp8);
+ tmp8 |= 1 << 2;
+ pci_write_config_byte(pdev, 0x52, tmp8);
+ }
}
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
file:33faaa22a19dae234e7bfe97b5d25f1eabb8e645 -> file:4d809667815e5d64ba72ef6b57146c8174d87170
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -295,6 +295,19 @@ int devtmpfs_delete_node(struct device *
if (dentry->d_inode) {
err = vfs_getattr(nd.path.mnt, dentry, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = 0;
+ newattrs.ia_gid = 0;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
err = vfs_unlink(nd.path.dentry->d_inode,
dentry);
if (!err || err == -ENOENT)
file:2fb2e6cc322aab1a36164de3fc99674559dea9ed -> file:c496c8a1a885d07396547e27b1eeb0107e0cc95e
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -499,6 +499,10 @@ static int __devinit agp_amd64_probe(str
u8 cap_ptr;
int err;
+ /* The Highlander principle */
+ if (agp_bridges_found)
+ return -ENODEV;
+
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
@@ -562,6 +566,8 @@ static void __devexit agp_amd64_remove(s
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
+
+ agp_bridges_found--;
}
#ifdef CONFIG_PM
@@ -709,6 +715,11 @@ static struct pci_device_id agp_amd64_pc
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+ { PCI_DEVICE_CLASS(0, 0) },
+ { }
+};
+
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
@@ -733,7 +744,6 @@ int __init agp_amd64_init(void)
return err;
if (agp_bridges_found == 0) {
- struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
#ifdef MODULE
@@ -749,17 +759,10 @@ int __init agp_amd64_init(void)
return -ENODEV;
/* Look for any AGP bridge */
- dev = NULL;
- err = -ENODEV;
- for_each_pci_dev(dev) {
- if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
- continue;
- /* Only one bridge supported right now */
- if (agp_amd64_probe(dev, NULL) == 0) {
- err = 0;
- break;
- }
- }
+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+ err = driver_attach(&agp_amd64_pci_driver.driver);
+ if (err == 0 && agp_bridges_found == 0)
+ err = -ENODEV;
}
return err;
}
file:9047b2714653c54ef3c0130f30e09cd98e0bc534 -> file:dc8a6f70483b51a8f1c1945959fcf4cbc552bfaa
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -488,9 +488,8 @@ zx1_gart_probe (acpi_handle obj, u32 dep
handle = obj;
do {
status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
/* TBD check _CID also */
- info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
kfree(info);
if (match) {
file:4dcfef05045a0822ab5a8054ac6300fe086860f8 -> file:b8e02190bef4302f2fc3d490b7cdb1bc54d5e2e9
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <asm/smp.h>
#include "agp.h"
/*
@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
intel_i830_fini_flush();
}
-static void
-do_wbinvd(void *null)
-{
- wbinvd();
-}
-
/* The chipset_flush interface needs to get data that has already been
* flushed out of the CPU all the way out to main memory, because the GPU
* doesn't snoop those buffers.
@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(str
memset(pg, 0, 1024);
- if (cpu_has_clflush) {
+ if (cpu_has_clflush)
clflush_cache_range(pg, 1024);
- } else {
- if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
- }
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
/* The intel i830 automatically initializes the agp aperture during POST.
file:d2e698096ace182698152e2366d7c2dc2cf91342 -> file:abae8c99259411c6454261b9d01d02b0f9d3ef92
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -310,9 +310,14 @@ static void deliver_recv_msg(struct smi_
{
/* Deliver the message to the upper layer with the lock
released. */
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
+
+ if (smi_info->run_to_completion) {
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ } else {
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+ }
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
file:64acd05f71c8743330e47a1d9883fc59acc22081 -> file:9abc3a19d53a033e7ff64dd5e68306fc97a2ddb4
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -247,6 +247,7 @@ static const struct file_operations raw_
.aio_read = generic_file_aio_read,
.write = do_sync_write,
.aio_write = blkdev_aio_write,
+ .fsync = block_fsync,
.open = raw_open,
.release= raw_release,
.ioctl = raw_ioctl,
file:8e00b4ddd0830699bca8bab65c3e855657b21978 -> file:792868d24f2a0f2ad6bc21a4967ace01a2f84d92
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -224,6 +224,7 @@ struct tpm_readpubek_params_out {
u8 algorithm[4];
u8 encscheme[2];
u8 sigscheme[2];
+ __be32 paramsize;
u8 parameters[12]; /*assuming RSA*/
__be32 keysize;
u8 modulus[256];
file:0b73e4ec1addafff42a79752a666fecd4b25d665 -> file:ca15c04e5a8d9601cfbce2bc224487b10847ed66
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -257,6 +257,10 @@ out:
return size;
}
+static int itpm;
+module_param(itpm, bool, 0444);
+MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
+
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
@@ -293,7 +297,7 @@ static int tpm_tis_send(struct tpm_chip
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
- if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
goto out_err;
}
@@ -467,6 +471,10 @@ static int tpm_tis_init(struct device *d
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+ if (itpm)
+ dev_info(dev, "Intel iTPM workaround enabled\n");
+
+
/* Figure out the capabilities */
intfcaps =
ioread32(chip->vendor.iobase +
@@ -614,7 +622,14 @@ static int tpm_tis_pnp_suspend(struct pn
static int tpm_tis_pnp_resume(struct pnp_dev *dev)
{
- return tpm_pm_resume(&dev->dev);
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+ int ret;
+
+ ret = tpm_pm_resume(&dev->dev);
+ if (!ret)
+ tpm_continue_selftest(chip);
+
+ return ret;
}
static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
@@ -629,6 +644,7 @@ static struct pnp_device_id tpm_pnp_tbl[
{"", 0}, /* User Specified */
{"", 0} /* Terminator */
};
+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
{
file:6b3e0c2f33e2b193838fbecc0dda72aa9f0db5ef -> file:234d9f6bc69692b7d987500a9fd5de4d99772912
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(s
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
- int ret;
p->total_cycles = 0;
- ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
- if (ret)
- return ret;
-
- /* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 0;
- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
- return 0;
+ return sh_cmt_start(p, FLAG_CLOCKSOURCE);
}
static void sh_cmt_clocksource_disable(struct clocksource *cs)
@@ -444,7 +436,18 @@ static int sh_cmt_register_clocksource(s
cs->disable = sh_cmt_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* clk_get_rate() needs an enabled clock */
+ clk_enable(p->clk);
+ p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
+ clk_disable(p->clk);
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+
pr_info("sh_cmt: %s used as clock source\n", cs->name);
+
clocksource_register(cs);
return 0;
}
@@ -603,18 +606,13 @@ static int sh_cmt_setup(struct sh_cmt_pr
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
- ret = setup_irq(irq, &p->irqaction);
- if (ret) {
- pr_err("sh_cmt: failed to request irq %d\n", irq);
- goto err1;
- }
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, cfg->clk);
if (IS_ERR(p->clk)) {
pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
ret = PTR_ERR(p->clk);
- goto err2;
+ goto err1;
}
if (resource_size(res) == 6) {
@@ -627,14 +625,25 @@ static int sh_cmt_setup(struct sh_cmt_pr
p->clear_bits = ~0xc000;
}
- return sh_cmt_register(p, cfg->name,
- cfg->clockevent_rating,
- cfg->clocksource_rating);
- err2:
- remove_irq(irq, &p->irqaction);
- err1:
+ ret = sh_cmt_register(p, cfg->name,
+ cfg->clockevent_rating,
+ cfg->clocksource_rating);
+ if (ret) {
+ pr_err("sh_cmt: registration failed\n");
+ goto err1;
+ }
+
+ ret = setup_irq(irq, &p->irqaction);
+ if (ret) {
+ pr_err("sh_cmt: failed to request irq %d\n", irq);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
iounmap(p->mapbase);
- err0:
+err0:
return ret;
}
file:973e714d605147d7e2110eaae48a25b780d32bc7 -> file:4c8a759e60cdb56c14cf57002a9672f81100822e
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -221,15 +221,15 @@ static void sh_mtu2_register_clockevent(
ced->cpumask = cpumask_of(0);
ced->set_mode = sh_mtu2_clock_event_mode;
+ pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ clockevents_register_device(ced);
+
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
pr_err("sh_mtu2: failed to request irq %d\n",
p->irqaction.irq);
return;
}
-
- pr_info("sh_mtu2: %s used for clock events\n", ced->name);
- clockevents_register_device(ced);
}
static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
file:93c2322feab79e8e119d0b51949ac0b1bc42135d -> file:961f5b5ef6a3584c558c45dbe20a05074fe325b1
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -323,15 +323,15 @@ static void sh_tmu_register_clockevent(s
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
+ pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ clockevents_register_device(ced);
+
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
pr_err("sh_tmu: failed to request irq %d\n",
p->irqaction.irq);
return;
}
-
- pr_info("sh_tmu: %s used for clock events\n", ced->name);
- clockevents_register_device(ced);
}
static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
file:26e4759aaa6bf3a54588f3607192d033ca03d60f -> file:4b34ade2332baaa50bb1ca1af9c45e1a9893d321
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,8 +22,6 @@
#include <linux/tick.h>
#include <linux/ktime.h>
#include <linux/sched.h>
-#include <linux/input.h>
-#include <linux/workqueue.h>
/*
* dbs is used in this file as a shortform for demandbased switching
@@ -578,9 +576,7 @@ static void do_dbs_timer(struct work_str
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
-
+ delay -= jiffies % delay;
mutex_lock(&dbs_info->timer_mutex);
/* Common NORMAL_SAMPLE setup */
@@ -618,84 +614,6 @@ static inline void dbs_timer_exit(struct
cancel_delayed_work_sync(&dbs_info->work);
}
-static void dbs_refresh_callback(struct work_struct *unused)
-{
- struct cpufreq_policy *policy;
- struct cpu_dbs_info_s *this_dbs_info;
-
- if (lock_policy_rwsem_write(0) < 0)
- return;
-
- this_dbs_info = &per_cpu(od_cpu_dbs_info, 0);
- policy = this_dbs_info->cur_policy;
-
- if (policy->cur < policy->max) {
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_L);
- this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0,
- &this_dbs_info->prev_cpu_wall);
- }
- unlock_policy_rwsem_write(0);
-}
-
-static DECLARE_WORK(dbs_refresh_work, dbs_refresh_callback);
-
-static void dbs_input_event(struct input_handle *handle, unsigned int type,
- unsigned int code, int value)
-{
- schedule_work(&dbs_refresh_work);
-}
-
-static int dbs_input_connect(struct input_handler *handler,
- struct input_dev *dev, const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "cpufreq";
-
- error = input_register_handle(handle);
- if (error)
- goto err2;
-
- error = input_open_device(handle);
- if (error)
- goto err1;
-
- return 0;
-err1:
- input_unregister_handle(handle);
-err2:
- kfree(handle);
- return error;
-}
-
-static void dbs_input_disconnect(struct input_handle *handle)
-{
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
-}
-
-static const struct input_device_id dbs_ids[] = {
- { .driver_info = 1 },
- { },
-};
-
-static struct input_handler dbs_input_handler = {
- .event = dbs_input_event,
- .connect = dbs_input_connect,
- .disconnect = dbs_input_disconnect,
- .name = "cpufreq_ond",
- .id_table = dbs_ids,
-};
-
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -759,7 +677,6 @@ static int cpufreq_governor_dbs(struct c
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
}
- rc = input_register_handler(&dbs_input_handler);
mutex_unlock(&dbs_mutex);
mutex_init(&this_dbs_info->timer_mutex);
@@ -773,7 +690,6 @@ static int cpufreq_governor_dbs(struct c
sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
- input_unregister_handler(&dbs_input_handler);
mutex_unlock(&dbs_mutex);
if (!dbs_enable)
sysfs_remove_group(cpufreq_global_kobject,
file:73655aeb3a60993e28fbbf9ea7930128e5a23407 -> file:f8e57c6303f2c17860d7f7700975ed5502743cb7
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -101,7 +101,6 @@ struct menu_device {
unsigned int expected_us;
u64 predicted_us;
- unsigned int measured_us;
unsigned int exit_us;
unsigned int bucket;
u64 correction_factor[BUCKETS];
@@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_de
int i;
int multiplier;
- data->last_state_idx = 0;
- data->exit_us = 0;
-
if (data->needs_update) {
menu_update(dev);
data->needs_update = 0;
}
+ data->last_state_idx = 0;
+ data->exit_us = 0;
+
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0))
return 0;
@@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_d
new_factor = data->correction_factor[data->bucket]
* (DECAY - 1) / DECAY;
- if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
+ if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
new_factor += RESOLUTION * measured_us / data->expected_us;
else
/*
file:713ed7d372475dc325ac7cfca8cc5e22181af29b -> file:1999807f078ff51b8157d65a258f267c6626b93e
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -295,7 +295,6 @@ wrong_ls_mce:
void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
{
u32 ec = ERROR_CODE(regs->nbsl);
- u32 xec = EXT_ERROR_CODE(regs->nbsl);
if (!handle_errors)
return;
@@ -311,11 +310,15 @@ void amd_decode_nb_mce(int node_id, stru
if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
} else {
- pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
- }
+ u8 assoc_cpus = regs->nbsh & 0xf;
+
+ if (assoc_cpus > 0)
+ pr_cont(", core: %d", fls(assoc_cpus) - 1);
+ pr_cont("\n");
+ }
- pr_emerg("%s.\n", EXT_ERR_MSG(xec));
+ pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
if (BUS_ERROR(ec) && nb_bus_decoder)
nb_bus_decoder(node_id, regs);
@@ -378,7 +381,7 @@ static void amd_decode_mce(struct mce *m
((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
/* do the two bits[14:13] together */
- ecc = m->status & (3ULL << 45);
+ ecc = (m->status >> 45) & 0x3;
if (ecc)
pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
file:ed635ae2b5d3d785e03118d2d48835c7448aff5b -> file:3fc2ceb6d715c8aa224f3455a3e4c27a27c934ab
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -239,7 +239,7 @@ void fw_schedule_bm_work(struct fw_card
static void fw_card_bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
- struct fw_device *root_device;
+ struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
unsigned long flags;
int root_id, new_root_id, irm_id, local_id;
@@ -247,6 +247,7 @@ static void fw_card_bm_work(struct work_
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
+ bool irm_is_1394_1995_only;
spin_lock_irqsave(&card->lock, flags);
@@ -256,12 +257,18 @@ static void fw_card_bm_work(struct work_
}
generation = card->generation;
+
root_node = card->root_node;
fw_node_get(root_node);
root_device = root_node->data;
root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc;
+
+ irm_device = card->irm_node->data;
+ irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
+ (irm_device->config_rom[2] & 0x000000f0) == 0;
+
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
@@ -284,8 +291,15 @@ static void fw_card_bm_work(struct work_
if (!card->irm_node->link_on) {
new_root_id = local_id;
- fw_notify("IRM has link off, making local node (%02x) root.\n",
- new_root_id);
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM has link off", new_root_id);
+ goto pick_me;
+ }
+
+ if (irm_is_1394_1995_only) {
+ new_root_id = local_id;
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM is not 1394a compliant", new_root_id);
goto pick_me;
}
@@ -324,8 +338,8 @@ static void fw_card_bm_work(struct work_
* root, and thus, IRM.
*/
new_root_id = local_id;
- fw_notify("BM lock failed, making local node (%02x) root.\n",
- new_root_id);
+ fw_notify("%s, making local node (%02x) root.\n",
+ "BM lock failed", new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
file:f9c09a54ec7fb66047bed59fcb4fc5548cee9b2c -> file:9a270478acacb352e7c19dd458638a0cce2ac93f
--- a/drivers/gpio/wm831x-gpio.c
+++ b/drivers/gpio/wm831x-gpio.c
@@ -61,23 +61,31 @@ static int wm831x_gpio_get(struct gpio_c
return 0;
}
-static int wm831x_gpio_direction_out(struct gpio_chip *chip,
- unsigned offset, int value)
+static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
- WM831X_GPN_DIR | WM831X_GPN_TRI, 0);
+ wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
+ value << offset);
}
-static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm831x_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
+ int ret;
- wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
- value << offset);
+ ret = wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
+ WM831X_GPN_DIR | WM831X_GPN_TRI, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Can only set GPIO state once it's in output mode */
+ wm831x_gpio_set(chip, offset, value);
+
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
file:afed886cb0fef0dcd544c2b0d9bad72a3a52593c -> file:08173fcdda4d511bb7bdfad6ac512d845f7d832d
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_mo
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
+ drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
file:b54ba63d506e0350abbf53acd50420cbeb79f7c3 -> file:259ec21a93ea5f8bd00d99fa5159668053b334ea
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -85,6 +85,8 @@ static struct edid_quirk {
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
@@ -322,7 +324,7 @@ static struct drm_display_mode drm_dmt_m
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -653,15 +655,6 @@ static struct drm_display_mode *drm_mode
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
@@ -834,8 +827,57 @@ static int add_standard_modes(struct drm
return modes;
}
+static int add_detailed_modes(struct drm_connector *connector,
+ struct detailed_timing *timing,
+ struct edid *edid, u32 quirks, int preferred)
+{
+ int i, modes = 0;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int timing_level = standard_timing_level(edid);
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(dev, edid, timing, quirks);
+ if (!newmode)
+ return 0;
+
+ if (preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, newmode);
+ return 1;
+ }
+
+ /* other timing types */
+ switch (data->type) {
+ case EDID_DETAIL_MONITOR_RANGE:
+ /* Get monitor range data */
+ break;
+ case EDID_DETAIL_STD_MODES:
+ /* Six modes per detailed section */
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(dev, std, edid->revision,
+ timing_level);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return modes;
+}
+
/**
- * add_detailed_modes - get detailed mode info from EDID data
+ * add_detailed_info - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
@@ -846,67 +888,24 @@ static int add_standard_modes(struct drm
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
+ int i, modes = 0;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- struct detailed_non_pixel *data = &timing->data.other_data;
- struct drm_display_mode *newmode;
+ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
- /* X server check is version 1.1 or higher */
- if (edid->version == 1 && edid->revision >= 1 &&
- !timing->pixel_clock) {
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- for (j = 0; j < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
- } else {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
-
- /* First detailed mode is preferred */
- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, newmode);
+ /* In 1.0, only timings are allowed */
+ if (!timing->pixel_clock && edid->version == 1 &&
+ edid->revision == 0)
+ continue;
- modes++;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks,
+ preferred);
}
return modes;
}
+
/**
* add_detailed_mode_eedid - get detailed mode info from addtional timing
* EDID block
@@ -920,12 +919,9 @@ static int add_detailed_info(struct drm_
static int add_detailed_info_eedid(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
+ int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- struct detailed_non_pixel *data;
- struct drm_display_mode *newmode;
int edid_ext_num;
int start_offset, end_offset;
int timing_level;
@@ -976,51 +972,7 @@ static int add_detailed_info_eedid(struc
for (i = start_offset; i < end_offset;
i += sizeof(struct detailed_timing)) {
timing = (struct detailed_timing *)(edid_ext + i);
- data = &timing->data.other_data;
- /* Detailed mode timing */
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
-
- drm_mode_probed_add(connector, newmode);
-
- modes++;
- continue;
- }
-
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- /* Five modes per detailed section */
- for (j = 0; j < 5; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
}
return modes;
file:251bc0e3b5ecd92acbb0244634ca46787bb0284d -> file:ba145531ca031499fb730f0089eb9603f7fd6201
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct
spin_unlock(&dev->count_lock);
}
out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
+ if (!retcode) {
+ mutex_lock(&dev->struct_mutex);
+ if (minor->type == DRM_MINOR_LEGACY) {
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ else if (dev->dev_mapping != inode->i_mapping)
+ retcode = -ENODEV;
+ }
+ mutex_unlock(&dev->struct_mutex);
}
- mutex_unlock(&dev->struct_mutex);
return retcode;
}
file:7f436ec075f6221df8ee6beafebae603054be003 -> file:544923988d138e1d55dfa25be708b85ae3fdf4be
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -192,6 +192,7 @@ int i965_reset(struct drm_device *dev, u
}
} else {
DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ mutex_unlock(&dev->struct_mutex);
return -ENODEV;
}
file:f5d49a7ac745e576dbf049409ec3f8d9a0c5ccb5 -> file:aafbef70de506b9bc9f177c57a81c3a316e44629
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -258,7 +258,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
file:04da731a70abf78b4366c13d851ff79e6b908245 -> file:952c844713d50f6220cab92cc73d252d8bbea4e4
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1470,9 +1470,6 @@ i915_gem_object_put_pages(struct drm_gem
obj_priv->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->pages[i] == NULL)
- break;
-
if (obj_priv->dirty)
set_page_dirty(obj_priv->pages[i]);
@@ -2246,7 +2243,6 @@ i915_gem_object_get_pages(struct drm_gem
struct address_space *mapping;
struct inode *inode;
struct page *page;
- int ret;
if (obj_priv->pages_refcount++ != 0)
return 0;
@@ -2269,11 +2265,9 @@ i915_gem_object_get_pages(struct drm_gem
mapping_gfp_mask (mapping) |
__GFP_COLD |
gfpmask);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- i915_gem_object_put_pages(obj);
- return ret;
- }
+ if (IS_ERR(page))
+ goto err_pages;
+
obj_priv->pages[i] = page;
}
@@ -2281,6 +2275,15 @@ i915_gem_object_get_pages(struct drm_gem
i915_gem_object_do_bit_17_swizzle(obj);
return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj_priv->pages[i]);
+
+ drm_free_large(obj_priv->pages);
+ obj_priv->pages = NULL;
+ obj_priv->pages_refcount--;
+ return PTR_ERR(page);
}
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2331,6 +2334,12 @@ static void i915_write_fence_reg(struct
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2606,6 +2615,14 @@ i915_gem_object_bind_to_gtt(struct drm_g
return -EINVAL;
}
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->size > dev->gtt_total) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
@@ -3930,6 +3947,17 @@ i915_gem_object_pin(struct drm_gem_objec
int ret;
i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (obj_priv->gtt_space != NULL) {
+ if (alignment == 0)
+ alignment = i915_gem_get_gtt_alignment(obj);
+ if (obj_priv->gtt_offset & (alignment - 1)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret)
@@ -4669,6 +4697,16 @@ i915_gem_load(struct drm_device *dev)
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ if (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ u32 tmp = I915_READ(MI_ARB_STATE);
+ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
+ /* arb state is a masked write, so set bit + bit in mask */
+ tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
+ I915_WRITE(MI_ARB_STATE, tmp);
+ }
+ }
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
file:200e398453ca5305987a2d851a5361ba29d7bba2 -> file:fb2811c58affbdbb38421ee2c872ce02d78e545f
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -353,21 +353,17 @@ i915_tiling_ok(struct drm_device *dev, i
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (IS_I9XX(dev) || IS_I8XX(dev)) {
+ if (stride > 8192)
return false;
- } else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
- return false;
+ if (IS_I9XX(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
/* 965+ just needs multiples of tile width */
file:cc9b49ab1fd03f43400c4d9bf920bb4bf8aa8dd3 -> file:7214c852df99a188692a585a83753aea98774070
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -214,7 +214,7 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
@@ -307,6 +307,70 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
+#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ * fetches. This is not turned on by default
+ */
+#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
+#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
+#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
+#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
+#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define MI_ARB_TIME_SLICE_MASK (7 << 5)
+#define MI_ARB_TIME_SLICE_1 (0 << 5)
+#define MI_ARB_TIME_SLICE_2 (1 << 5)
+#define MI_ARB_TIME_SLICE_4 (2 << 5)
+#define MI_ARB_TIME_SLICE_6 (3 << 5)
+#define MI_ARB_TIME_SLICE_8 (4 << 5)
+#define MI_ARB_TIME_SLICE_10 (5 << 5)
+#define MI_ARB_TIME_SLICE_14 (6 << 5)
+#define MI_ARB_TIME_SLICE_16 (7 << 5)
+
+/* Low priority grace period page size */
+#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
+#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
+
+/* Set display plane priority */
+#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
+#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
+
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
file:96cd256e60e6fd117ed00cf0f54503cc5312fadf -> file:97169ea7432317d6eada3f6373da6c97ff7ab002
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -241,10 +241,6 @@ parse_general_definitions(struct drm_i91
GPIOF,
};
- /* Set sensible defaults in case we can't find the general block
- or it is the wrong chipset */
- dev_priv->crt_ddc_bus = -1;
-
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
u16 block_size = get_blocksize(general);
file:5e730e6f8a74942bd5afb20dd24affac9fdd7359 -> file:166a24e76b21e388ffaeca2fd2febac41a5ecd15
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *d
else {
i2c_reg = GPIOA;
/* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != -1)
+ if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}
intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
file:b00a1aaf0d71fc19e00a70887d37422647d90070 -> file:88d5e3ad48adfdef9bfa8a6ed0b9a5068e7d9e48
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -785,8 +785,8 @@ intel_g4x_find_best_PLL(const intel_limi
intel_clock_t clock;
int max_n;
bool found;
- /* approximately equals target * 0.00488 */
- int err_most = (target >> 8) + (target >> 10);
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4322,7 +4322,7 @@ static void intel_init_display(struct dr
}
/* Returns the core display clock speed */
- if (IS_I945G(dev))
+ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_IGDGM(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
file:952bb4e2484d7acb100d6a4e8bc27a356f142931 -> file:bf0576298948311c48a0e7e37b1bbd256f022158
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -629,6 +629,13 @@ static const struct dmi_system_id bad_li
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
},
},
+ {
+ .ident = "Clevo M5x0N",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+ },
+ },
{ }
};
@@ -641,8 +648,12 @@ static const struct dmi_system_id bad_li
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
+ if (IS_I8XX(dev))
+ return connector_status_connected;
+
if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
status = connector_status_disconnected;
@@ -878,6 +889,14 @@ static const struct dmi_system_id intel_
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
{ } /* terminating entry */
};
file:3f5aaf14e6a3b5393c2334b0ce89e99e90a45119 -> file:5d9c6a7a6ed55574e5c60ee1c7ad7e8150a0846a
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
+#include <linux/dmi.h>
#undef SDVO_DEBUG
@@ -2289,6 +2290,25 @@ intel_sdvo_get_slave_addr(struct drm_dev
return 0x72;
}
+static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
+ return 1;
+}
+
+static struct dmi_system_id intel_sdvo_bad_tv[] = {
+ {
+ .callback = intel_sdvo_bad_tv_callback,
+ .ident = "IntelG45/ICH10R/DME1737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
static bool
intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
{
@@ -2329,7 +2349,8 @@ intel_sdvo_output_setup(struct intel_out
(1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
}
- } else if (flags & SDVO_OUTPUT_SVID0) {
+ } else if ((flags & SDVO_OUTPUT_SVID0) &&
+ !dmi_check_system(intel_sdvo_bad_tv)) {
sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
file:ce026f002ea5f0674db27cba57570bee2d9f98b3 -> file:5b28b4e7ebe79ebfc7cc47608bd9501d9c0d0baf
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1801,8 +1801,6 @@ intel_tv_init(struct drm_device *dev)
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
tv_priv->margin[TV_MARGIN_BOTTOM]);
-
- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
out:
drm_sysfs_connector_add(connector);
}
file:eb740fc3549f82c5530c5450f4862ca8cf764fa3 -> file:ccf42c3dd1be66a246c4cef9fe0670e4239fb647
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -368,6 +368,8 @@ int r200_packet0_check(struct radeon_cs_
/* 2D, 3D, CUBE */
switch (tmp) {
case 0:
+ case 3:
+ case 4:
case 5:
case 6:
case 7:
file:2f43ee8e40480cc2895c663fec62751570a1332f -> file:d8c4f72eef8ed6eaaf9e3a53335162af0d8491da
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -346,11 +346,12 @@ void r300_gpu_init(struct radeon_device
r100_hdp_reset(rdev);
/* FIXME: rv380 one pipes ? */
- if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
+ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
+ (rdev->family == CHIP_R350)) {
/* r300,r350 */
rdev->num_gb_pipes = 2;
} else {
- /* rv350,rv370,rv380 */
+ /* rv350,rv370,rv380,r300 AD */
rdev->num_gb_pipes = 1;
}
rdev->num_z_pipes = 1;
file:278f646bc18ef7dd0d027499f11b49d898b247fe -> file:731047301de4189541a1efb1396498199a8281a8
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1686,13 +1686,14 @@ int r600_init(struct radeon_device *rdev
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
return 0;
file:0d820764f34074dc47b8164895a446b1510649af -> file:838b88c14f574e20adc0a18c5b73662c955461e5
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nom
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+struct r600_cs_track {
+ u32 cb_color0_base_last;
+};
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nom
}
/**
+ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
* r600_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct rad
struct radeon_cs_packet *pkt)
{
struct radeon_cs_reloc *reloc;
+ struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct rad
int r;
u32 idx_value;
+ track = (struct r600_cs_track *)p->track;
ib = p->ib->ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct rad
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
switch (reg) {
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color0_base_last) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] = track->cb_color0_base_last;
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case DB_DEPTH_BASE:
case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color0_base_last = ib[idx+1+i];
+ break;
case CB_COLOR1_BASE:
case CB_COLOR2_BASE:
case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct rad
int r600_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
+ struct r600_cs_track *track;
int r;
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ p->track = track;
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *de
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
+ parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
file:27ab428b149bbccc6c077e6a648c991ebaddcd2a -> file:56fc6589132e9707e6bb5137c1184c8f6319a847
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -674,4 +674,30 @@
#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+
+#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
+#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280E0_BASE_256B 0x00000000
+#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
+#define R_0280C0_CB_COLOR0_TILE 0x0280C0
+#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280C0_BASE_256B 0x00000000
+#define R_0280C4_CB_COLOR1_TILE 0x0280C4
+#define R_0280C8_CB_COLOR2_TILE 0x0280C8
+#define R_0280CC_CB_COLOR3_TILE 0x0280CC
+#define R_0280D0_CB_COLOR4_TILE 0x0280D0
+#define R_0280D4_CB_COLOR5_TILE 0x0280D4
+#define R_0280D8_CB_COLOR6_TILE 0x0280D8
+#define R_0280DC_CB_COLOR7_TILE 0x0280DC
+
+
#endif
file:224506a2f7b1ae6d53d5b68347a4a7889a262a2c -> file:6735213892d564ad0e4182920f58c8f7496e7859
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -448,6 +448,7 @@ struct radeon_cs_chunk {
};
struct radeon_cs_parser {
+ struct device *dev;
struct radeon_device *rdev;
struct drm_file *filp;
/* chunks */
file:969502acc29cbeb33d164da0b854e2b94e3ef9c8 -> file:e5e22b1cf502dab69b5cd4ea480b0b52db9987e9
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -161,6 +161,15 @@ static bool radeon_atom_apply_quirks(str
}
}
+ /* ASUS HD 3600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x9598) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x01e4)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
/* ASUS HD 3450 board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x95C5) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
@@ -942,7 +951,7 @@ struct radeon_encoder_atom_dig *radeon_a
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
file:29763ceae3af9de8dc2067ff38e0ac63d59fe75e -> file:b1dc1a112aef618da04053ca56cfe81c8e1fef27
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -140,12 +140,14 @@ radeon_connector_analog_encoder_conflict
{
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
+ struct radeon_connector *radeon_conflict;
int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
if (conflict == connector)
continue;
+ radeon_conflict = to_radeon_connector(conflict);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (conflict->encoder_ids[i] == 0)
break;
@@ -155,6 +157,9 @@ radeon_connector_analog_encoder_conflict
if (conflict->status != connector_status_connected)
continue;
+ if (radeon_conflict->use_digital)
+ continue;
+
if (priority == true) {
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -281,7 +286,7 @@ int radeon_connector_set_property(struct
radeon_encoder = to_radeon_encoder(encoder);
if (!radeon_encoder->enc_priv)
return 0;
- if (rdev->is_atom_bios) {
+ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
struct radeon_encoder_atom_dac *dac_int;
dac_int = radeon_encoder->enc_priv;
dac_int->tv_std = val;
file:4f7afc79dd82c6db776c55a656c38cd450bb575a -> file:c7236f4c6cdd23c7cf6f67fe68f25af477637fe6
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_r
return -EBUSY;
}
-static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
+static void radeon_init_pipes(struct drm_device *dev)
{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
uint32_t gb_tile_config, gb_pipe_sel = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon
dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
} else {
/* R3xx */
- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
+ dev->pdev->device != 0x4144) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
dev_priv->num_gb_pipes = 2;
} else {
- /* R3Vxx */
+ /* RV3xx/R300 AD */
dev_priv->num_gb_pipes = 1;
}
}
@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct
/* setup the raster pipes */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
- radeon_init_pipes(dev_priv);
+ radeon_init_pipes(dev);
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
@@ -1644,6 +1646,7 @@ static int radeon_do_resume_cp(struct dr
radeon_cp_load_microcode(dev_priv);
radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+ dev_priv->have_z_offset = 0;
radeon_do_engine_reset(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
file:5ab2cf96a26498a3d29aba7fe18ed6c46716b161 -> file:20c52da0f62f9e46d19fda9fe2170e2b1b304981
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -230,6 +230,7 @@ int radeon_cs_ioctl(struct drm_device *d
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.rdev = rdev;
+ parser.dev = rdev->dev;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
@@ -246,7 +247,8 @@ int radeon_cs_ioctl(struct drm_device *d
}
r = radeon_cs_parser_relocs(&parser);
if (r) {
- DRM_ERROR("Failed to parse relocation !\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
return r;
file:c85df4afcb7ac09e2f89ec21f116a7ee4bdfaa39 -> file:6f683159fc64b537e2cd681ad9599f3972255801
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -599,7 +599,11 @@ radeon_user_framebuffer_create(struct dr
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-
+ if (obj == NULL) {
+ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ "can't create framebuffer\n", mode_cmd->handle);
+ return NULL;
+ }
return radeon_framebuffer_create(dev, mode_cmd, obj);
}
file:350962e0f346f483b02d04d37f48efb3219c2ed0 -> file:76e4070388c62383553ec818a5e2902c95b4b000
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -267,6 +267,8 @@ typedef struct drm_radeon_private {
u32 scratch_ages[5];
+ int have_z_offset;
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
resource_size_t fb_aper_offset;
file:d42bc512d75a8cd1c6a8ae3e91443a67d8121f74 -> file:4478b994b50013467c1d39e930934fe0c2665331
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1155,8 +1155,12 @@ radeon_atom_encoder_mode_set(struct drm_
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
file:00382122869b9c94dfeeff68c1f6a4d5ec2ad821 -> file:183bef831afb5752b111b846baf89adf3edb0f4a
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -89,6 +89,7 @@ static void radeon_legacy_lvds_dpms(stru
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ udelay(panel_pwr_delay * 1000);
break;
}
file:3a12bb0c0563daa412aa07ea61bf6ab0368fc4d5 -> file:fc64a20b85830306b62a5c62521b2f8e87d67f89
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
#define NTSC_TV_PLL_N_14 693
#define NTSC_TV_PLL_P_14 7
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
#define VERT_LEAD_IN_LINES 2
#define FRAC_BITS 0xe
#define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_const
630627, /* defRestart */
347, /* crtcPLL_N */
14, /* crtcPLL_M */
- 8, /* crtcPLL_postDiv */
+ 8, /* crtcPLL_postDiv */
1022, /* pixToTV */
},
+ { /* PAL timing for 14 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_PAL, /* standard */
+ 1131, /* horTotal */
+ 742, /* verTotal */
+ 813, /* horStart */
+ 840, /* horSyncStart */
+ 633, /* verSyncStart */
+ 708369, /* defRestart */
+ 211, /* crtcPLL_N */
+ 9, /* crtcPLL_M */
+ 8, /* crtcPLL_postDiv */
+ 759, /* pixToTV */
+ },
};
#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_const
if (pll->reference_freq == 2700)
const_ptr = &available_tv_modes[1];
else
- const_ptr = &available_tv_modes[1]; /* FIX ME */
+ const_ptr = &available_tv_modes[3];
}
return const_ptr;
}
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct dr
n = PAL_TV_PLL_N_27;
p = PAL_TV_PLL_P_27;
} else {
- m = PAL_TV_PLL_M_27;
- n = PAL_TV_PLL_N_27;
- p = PAL_TV_PLL_P_27;
+ m = PAL_TV_PLL_M_14;
+ n = PAL_TV_PLL_N_14;
+ p = PAL_TV_PLL_P_14;
}
}
file:38537d971a3e3b5a666fe24b8f63ee901b1c121e -> file:474791076cf974c51c3c07939190e4ad08b0f87f
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -101,6 +101,7 @@ static __inline__ int radeon_check_and_f
DRM_ERROR("Invalid depth buffer offset\n");
return -EINVAL;
}
+ dev_priv->have_z_offset = 1;
break;
case RADEON_EMIT_PP_CNTL:
@@ -876,6 +877,12 @@ static void radeon_cp_dispatch_clear(str
if (tmp & RADEON_BACK)
flags |= RADEON_FRONT;
}
+ if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+ if (!dev_priv->have_z_offset) {
+ printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
+ }
if (flags & (RADEON_FRONT | RADEON_BACK)) {
file:4444f48c496eb0d4f6bf88405a3700470874e527 -> file:170029747dafebeff8698f978bf064b9d274c6fc
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -57,7 +57,7 @@ void rs600_gart_tlb_flush(struct radeon_
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
+ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
file:b0efd0ddae7a223d9ac2c7b323e9080a8f098b57 -> file:c42403b0827cd3222f56fe0d1c4868d55454a90c
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1034,13 +1034,14 @@ int rv770_init(struct radeon_device *rde
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
return 0;
file:7bcb89f39ce8912b79d565e8b60228e542a08301 -> file:3d5b8b0705defb8a340649bdc80a28cca37f583f
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -466,7 +466,7 @@ static int ttm_tt_swapin(struct ttm_tt *
void *from_virtual;
void *to_virtual;
int i;
- int ret;
+ int ret = -ENOMEM;
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -485,8 +485,10 @@ static int ttm_tt_swapin(struct ttm_tt *
for (i = 0; i < ttm->num_pages; ++i) {
from_page = read_mapping_page(swap_space, i, NULL);
- if (IS_ERR(from_page))
+ if (IS_ERR(from_page)) {
+ ret = PTR_ERR(from_page);
goto out_err;
+ }
to_page = __ttm_tt_get_page(ttm, i);
if (unlikely(to_page == NULL))
goto out_err;
@@ -509,7 +511,7 @@ static int ttm_tt_swapin(struct ttm_tt *
return 0;
out_err:
ttm_tt_free_alloced_pages(ttm);
- return -ENOMEM;
+ return ret;
}
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -521,6 +523,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, s
void *from_virtual;
void *to_virtual;
int i;
+ int ret = -ENOMEM;
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
@@ -543,7 +546,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, s
0);
if (unlikely(IS_ERR(swap_storage))) {
printk(KERN_ERR "Failed allocating swap storage.\n");
- return -ENOMEM;
+ return PTR_ERR(swap_storage);
}
} else
swap_storage = persistant_swap_storage;
@@ -555,9 +558,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, s
if (unlikely(from_page == NULL))
continue;
to_page = read_mapping_page(swap_space, i, NULL);
- if (unlikely(to_page == NULL))
+ if (unlikely(IS_ERR(to_page))) {
+ ret = PTR_ERR(to_page);
goto out_err;
-
+ }
preempt_disable();
from_virtual = kmap_atomic(from_page, KM_USER0);
to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -581,5 +585,5 @@ out_err:
if (!persistant_swap_storage)
fput(swap_storage);
- return -ENOMEM;
+ return ret;
}
file:1ac0c93603c903a1d7a39f1efb10530936a2f4e1 -> file:aa8688d25e848457d81797d11d07700abc5bf0b0
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -954,6 +954,7 @@ static ssize_t vga_arb_write(struct file
}
} else if (strncmp(curr_pos, "target ", 7) == 0) {
+ struct pci_bus *pbus;
unsigned int domain, bus, devfn;
struct vga_device *vgadev;
@@ -961,7 +962,7 @@ static ssize_t vga_arb_write(struct file
remaining -= 7;
pr_devel("client 0x%p called 'target'\n", priv);
/* if target is default */
- if (!strncmp(buf, "default", 7))
+ if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
if (!vga_pci_str_to_vars(curr_pos, remaining,
@@ -969,18 +970,31 @@ static ssize_t vga_arb_write(struct file
ret_val = -EPROTO;
goto done;
}
+ pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- pdev = pci_get_bus_and_slot(bus, devfn);
+ pbus = pci_find_bus(domain, bus);
+ pr_devel("vgaarb: pbus %p\n", pbus);
+ if (pbus == NULL) {
+ pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
+ domain, bus);
+ ret_val = -ENODEV;
+ goto done;
+ }
+ pdev = pci_get_slot(pbus, devfn);
+ pr_devel("vgaarb: pdev %p\n", pdev);
if (!pdev) {
- pr_info("vgaarb: invalid PCI address!\n");
+ pr_err("vgaarb: invalid PCI address %x:%x\n",
+ bus, devfn);
ret_val = -ENODEV;
goto done;
}
}
vgadev = vgadev_find(pdev);
+ pr_devel("vgaarb: vgadev %p\n", vgadev);
if (vgadev == NULL) {
- pr_info("vgaarb: this pci device is not a vga device\n");
+ pr_err("vgaarb: this pci device is not a vga device\n");
pci_dev_put(pdev);
ret_val = -ENODEV;
goto done;
@@ -998,7 +1012,8 @@ static ssize_t vga_arb_write(struct file
}
}
if (i == MAX_USER_CARDS) {
- pr_err("vgaarb: maximum user cards number reached!\n");
+ pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+ MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
ret_val = -ENOMEM;
file:668604be62060e84836749ce899b1168c5ba78ba -> file:15978635d34fadd7a23c7c4d3c08f3f191bddd78
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1306,6 +1306,7 @@ static const struct hid_device_id hid_bl
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
file:cab13e8c7d292c7ed93a06b8a58acc6072d50fd3 -> file:3975e039c3ddbedfb4e7246b3b00004ba8c70ddd
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct
static int gyration_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- struct input_dev *input = field->hidinput->input;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
+ return 0;
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(usage->hid & 0xff) == 0x82) {
+ struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, usage->code, 1);
input_sync(input);
input_event(input, usage->type, usage->code, 0);
@@ -70,6 +73,7 @@ static int gyration_event(struct hid_dev
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
file:9831c3a9c2965ea3220c908b7210a82ff49fa5e3 -> file:252853d4dbe3f84313112113f318df7c96578654
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -251,6 +251,7 @@
#define USB_VENDOR_ID_GYRATION 0x0c16
#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
+#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
file:6c9ace1b76f6db6ca810c5cf2c7cb89889da7984 -> file:2ad62c339cd2c9cf849dc3a551a8d47548ef2190
--- a/drivers/hwmon/ams/ams-core.c
+++ b/drivers/hwmon/ams/ams-core.c
@@ -213,7 +213,7 @@ int __init ams_init(void)
return -ENODEV;
}
-void ams_exit(void)
+void ams_sensor_detach(void)
{
/* Remove input device */
ams_input_exit();
@@ -221,9 +221,6 @@ void ams_exit(void)
/* Remove attributes */
device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
- /* Shut down implementation */
- ams_info.exit();
-
/* Flush interrupt worker
*
* We do this after ams_info.exit(), because an interrupt might
@@ -239,6 +236,12 @@ void ams_exit(void)
pmf_unregister_irq_client(&ams_freefall_client);
}
+static void __exit ams_exit(void)
+{
+ /* Shut down implementation */
+ ams_info.exit();
+}
+
MODULE_AUTHOR("Stelian Pop, Michael Hanselmann");
MODULE_DESCRIPTION("Apple Motion Sensor driver");
MODULE_LICENSE("GPL");
file:2cbf8a6506c7e8fad72736773efa238367bf03f5 -> file:abeecd27b484fe2345f3c01dda91b7f74f079cd6
--- a/drivers/hwmon/ams/ams-i2c.c
+++ b/drivers/hwmon/ams/ams-i2c.c
@@ -238,6 +238,8 @@ static int ams_i2c_probe(struct i2c_clie
static int ams_i2c_remove(struct i2c_client *client)
{
if (ams_info.has_device) {
+ ams_sensor_detach();
+
/* Disable interrupts */
ams_i2c_set_irq(AMS_IRQ_ALL, 0);
file:fb18b3d3162bcebfe908fa5c678ccf6893fdb25d -> file:4f61b3ee1b08b4ca0ee65f018becb0ef7128892e
--- a/drivers/hwmon/ams/ams-pmu.c
+++ b/drivers/hwmon/ams/ams-pmu.c
@@ -133,6 +133,8 @@ static void ams_pmu_get_xyz(s8 *x, s8 *y
static void ams_pmu_exit(void)
{
+ ams_sensor_detach();
+
/* Disable interrupts */
ams_pmu_set_irq(AMS_IRQ_ALL, 0);
file:5ed387b0bd9aa08185e05bd790ac06976a8a7bed -> file:b28d7e27a031727bc7cc83c18d28b9fc15b2160a
--- a/drivers/hwmon/ams/ams.h
+++ b/drivers/hwmon/ams/ams.h
@@ -61,6 +61,7 @@ extern struct ams ams_info;
extern void ams_sensors(s8 *x, s8 *y, s8 *z);
extern int ams_sensor_attach(void);
+extern void ams_sensor_detach(void);
extern int ams_pmu_init(struct device_node *np);
extern int ams_i2c_init(struct device_node *np);
file:2d7bceeed0bc3daf10a8ae2d58d106d575107eb2 -> file:585219167fa754ac72f9e0a96c15d278cc36fc67
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -53,6 +53,7 @@ struct coretemp_data {
struct mutex update_lock;
const char *name;
u32 id;
+ u16 core_id;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
int temp;
@@ -75,7 +76,7 @@ static ssize_t show_name(struct device *
if (attr->index == SHOW_NAME)
ret = sprintf(buf, "%s\n", data->name);
else /* show label */
- ret = sprintf(buf, "Core %d\n", data->id);
+ ret = sprintf(buf, "Core %d\n", data->core_id);
return ret;
}
@@ -228,7 +229,7 @@ static int __devinit adjust_tjmax(struct
if (err) {
dev_warn(dev,
"Unable to access MSR 0xEE, for Tjmax, left"
- " at default");
+ " at default\n");
} else if (eax & 0x40000000) {
tjmax = tjmax_ee;
}
@@ -255,6 +256,9 @@ static int __devinit coretemp_probe(stru
}
data->id = pdev->id;
+#ifdef CONFIG_SMP
+ data->core_id = c->cpu_core_id;
+#endif
data->name = "coretemp";
mutex_init(&data->update_lock);
@@ -352,6 +356,10 @@ struct pdev_entry {
struct list_head list;
struct platform_device *pdev;
unsigned int cpu;
+#ifdef CONFIG_SMP
+ u16 phys_proc_id;
+ u16 cpu_core_id;
+#endif
};
static LIST_HEAD(pdev_list);
@@ -362,6 +370,22 @@ static int __cpuinit coretemp_device_add
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
+#ifdef CONFIG_SMP
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+#endif
+
+ mutex_lock(&pdev_list_mutex);
+
+#ifdef CONFIG_SMP
+ /* Skip second HT entry of each core */
+ list_for_each_entry(pdev_entry, &pdev_list, list) {
+ if (c->phys_proc_id == pdev_entry->phys_proc_id &&
+ c->cpu_core_id == pdev_entry->cpu_core_id) {
+ err = 0; /* Not an error */
+ goto exit;
+ }
+ }
+#endif
pdev = platform_device_alloc(DRVNAME, cpu);
if (!pdev) {
@@ -385,7 +409,10 @@ static int __cpuinit coretemp_device_add
pdev_entry->pdev = pdev;
pdev_entry->cpu = cpu;
- mutex_lock(&pdev_list_mutex);
+#ifdef CONFIG_SMP
+ pdev_entry->phys_proc_id = c->phys_proc_id;
+ pdev_entry->cpu_core_id = c->cpu_core_id;
+#endif
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
@@ -396,6 +423,7 @@ exit_device_free:
exit_device_put:
platform_device_put(pdev);
exit:
+ mutex_unlock(&pdev_list_mutex);
return err;
}
file:be475e844c2a73b146314bf3209f487c02110f59 -> file:f16d60f0bf47581ae2380bcb986f7b37aa7dea1b
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -324,8 +324,8 @@ static int lis3lv02d_remove(struct acpi_
lis3lv02d_joystick_disable();
lis3lv02d_poweroff(&lis3_dev);
- flush_work(&hpled_led.work);
led_classdev_unregister(&hpled_led.led_classdev);
+ flush_work(&hpled_led.work);
return lis3lv02d_remove_fs(&lis3_dev);
}
file:a3749cb0f181ec623a4d628ea615c66a154a6c5e -> file:497476f637a98b3ef8bb9d3107edf6b0b1e0c240
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -80,6 +80,13 @@ superio_inb(int reg)
return inb(VAL);
}
+static inline void
+superio_outb(int reg, int val)
+{
+ outb(reg, REG);
+ outb(val, VAL);
+}
+
static int superio_inw(int reg)
{
int val;
@@ -1036,6 +1043,21 @@ static int __init it87_find(unsigned sho
sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
reg = superio_inb(IT87_SIO_PINX2_REG);
+ /*
+ * The IT8720F has no VIN7 pin, so VCCH should always be
+ * routed internally to VIN7 with an internal divider.
+ * Curiously, there still is a configuration bit to control
+ * this, which means it can be set incorrectly. And even
+ * more curiously, many boards out there are improperly
+ * configured, even though the IT8720F datasheet claims
+ * that the internal routing of VCCH to VIN7 is the default
+ * setting. So we force the internal routing in this case.
+ */
+ if (sio_data->type == it8720 && !(reg & (1 << 1))) {
+ reg |= (1 << 1);
+ superio_outb(IT87_SIO_PINX2_REG, reg);
+ pr_notice("it87: Routing internal VCCH to in7\n");
+ }
if (reg & (1 << 0))
pr_info("it87: in3 is VCC (+5V)\n");
if (reg & (1 << 1))
file:1fe99511184111a68a38e151e96e03c9491f59ad -> file:f808d188afa3bfdbe4ae394ee72e8a6d14c5570c
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -120,7 +120,7 @@ static ssize_t show_temp(struct device *
int temp;
struct k8temp_data *data = k8temp_update_device(dev);
- if (data->swap_core_select)
+ if (data->swap_core_select && (data->sensorsp & SEL_CORE))
core = core ? 0 : 1;
temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
@@ -180,11 +180,13 @@ static int __devinit k8temp_probe(struct
}
if ((model >= 0x69) &&
- !(model == 0xc1 || model == 0x6c || model == 0x7c)) {
+ !(model == 0xc1 || model == 0x6c || model == 0x7c ||
+ model == 0x6b || model == 0x6f || model == 0x7f)) {
/*
- * RevG desktop CPUs (i.e. no socket S1G1 parts)
- * need additional offset, otherwise reported
- * temperature is below ambient temperature
+ * RevG desktop CPUs (i.e. no socket S1G1 or
+ * ASB1 parts) need additional offset,
+ * otherwise reported temperature is below
+ * ambient temperature
*/
data->temp_offset = 21000;
}
file:cf5afb9a10abbd9af3432dce36249a23edfcab0d -> file:5d5ed69851dbe9c8cc495ce198a1bef23ab741a0
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -127,12 +127,14 @@ void lis3lv02d_poweron(struct lis3lv02d
/*
* Common configuration
- * BDU: LSB and MSB values are not updated until both have been read.
- * So the value read will always be correct.
+ * BDU: (12 bits sensors only) LSB and MSB values are not updated until
+ * both have been read. So the value read will always be correct.
*/
- lis3->read(lis3, CTRL_REG2, &reg);
- reg |= CTRL2_BDU;
- lis3->write(lis3, CTRL_REG2, reg);
+ if (lis3->whoami == LIS_DOUBLE_ID) {
+ lis3->read(lis3, CTRL_REG2, &reg);
+ reg |= CTRL2_BDU;
+ lis3->write(lis3, CTRL_REG2, reg);
+ }
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
@@ -361,7 +363,8 @@ static ssize_t lis3lv02d_calibrate_store
}
/* conversion btw sampling rate and the register values */
-static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560};
+static int lis3_12_rates[4] = {40, 160, 640, 2560};
+static int lis3_8_rates[2] = {100, 400};
static ssize_t lis3lv02d_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -369,8 +372,13 @@ static ssize_t lis3lv02d_rate_show(struc
int val;
lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
- val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
- return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
+
+ if (lis3_dev.whoami == LIS_DOUBLE_ID)
+ val = lis3_12_rates[(ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4];
+ else
+ val = lis3_8_rates[(ctrl & CTRL1_DR) >> 7];
+
+ return sprintf(buf, "%d\n", val);
}
static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
file:3e1ff46f72d3ac2cdfde3f3bf1b83637513c42b8 -> file:7cdd76f31ce32ba00f10314bb4016bed6bdaf26c
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -103,6 +103,7 @@ enum lis3lv02d_ctrl1 {
CTRL1_DF1 = 0x20,
CTRL1_PD0 = 0x40,
CTRL1_PD1 = 0x80,
+ CTRL1_DR = 0x80, /* Data rate on 8 bits */
};
enum lis3lv02d_ctrl2 {
CTRL2_DAS = 0x01,
file:65c232a9d0c5a917b533e2aeac34c2c13aa4fba5 -> file:21d201befc2cdb7aeebb11c495f5ba3c10e74215
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -45,9 +45,7 @@ enum ltc4245_cmd {
LTC4245_VEEIN = 0x19,
LTC4245_VEESENSE = 0x1a,
LTC4245_VEEOUT = 0x1b,
- LTC4245_GPIOADC1 = 0x1c,
- LTC4245_GPIOADC2 = 0x1d,
- LTC4245_GPIOADC3 = 0x1e,
+ LTC4245_GPIOADC = 0x1c,
};
struct ltc4245_data {
@@ -61,7 +59,7 @@ struct ltc4245_data {
u8 cregs[0x08];
/* Voltage registers */
- u8 vregs[0x0f];
+ u8 vregs[0x0d];
};
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_upda
data->cregs[i] = val;
}
- /* Read voltage registers -- 0x10 to 0x1f */
+ /* Read voltage registers -- 0x10 to 0x1c */
for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
val = i2c_smbus_read_byte_data(client, i+0x10);
if (unlikely(val < 0))
@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct de
case LTC4245_VEEOUT:
voltage = regval * -55;
break;
- case LTC4245_GPIOADC1:
- case LTC4245_GPIOADC2:
- case LTC4245_GPIOADC3:
+ case LTC4245_GPIOADC:
voltage = regval * 10;
break;
default:
@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), L
LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
/* GPIO voltages */
-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
-LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
-LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
+LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
/* Power Consumption (virtual) */
LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attribu
&sensor_dev_attr_in8_min_alarm.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power2_input.dev_attr.attr,
file:864a371f6eb98443f0565b1d1263cb66a370e152 -> file:fbc997ee67d973283c37181cd38dc499159639b0
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -302,13 +302,13 @@ error_ret:
**/
static inline int sht15_calc_temp(struct sht15_data *data)
{
- int d1 = 0;
+ int d1 = temppoints[0].d1;
int i;
- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+ for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
- d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+ d1 = (data->supply_uV - temppoints[i - 1].vdd)
* (temppoints[i].d1 - temppoints[i - 1].d1)
/ (temppoints[i].vdd - temppoints[i - 1].vdd)
+ temppoints[i - 1].d1;
@@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct
/* If a regulator is available, query what the supply voltage actually is!*/
data->reg = regulator_get(data->dev, "vcc");
if (!IS_ERR(data->reg)) {
- data->supply_uV = regulator_get_voltage(data->reg);
+ int voltage;
+
+ voltage = regulator_get_voltage(data->reg);
+ if (voltage)
+ data->supply_uV = voltage;
+
regulator_enable(data->reg);
/* setup a notifier block to update this if another device
* causes the voltage to change */
file:20924343431b65f61048ca2fc05c5b1d93afbcd7 -> file:d3a786b36d6ac961e1464a95c7e26289fdf7a540
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -62,9 +62,9 @@ static const u8 TMP421_TEMP_LSB[4] = {
#define TMP423_DEVICE_ID 0x23
static const struct i2c_device_id tmp421_id[] = {
- { "tmp421", tmp421 },
- { "tmp422", tmp422 },
- { "tmp423", tmp423 },
+ { "tmp421", 2 },
+ { "tmp422", 3 },
+ { "tmp423", 4 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp421_id);
@@ -74,21 +74,23 @@ struct tmp421_data {
struct mutex update_lock;
char valid;
unsigned long last_updated;
- int kind;
+ int channels;
u8 config;
s16 temp[4];
};
static int temp_from_s16(s16 reg)
{
- int temp = reg;
+ /* Mask out status bits */
+ int temp = reg & ~0xf;
return (temp * 1000 + 128) / 256;
}
static int temp_from_u16(u16 reg)
{
- int temp = reg;
+ /* Mask out status bits */
+ int temp = reg & ~0xf;
/* Add offset for extended temperature range. */
temp -= 64 * 256;
@@ -108,7 +110,7 @@ static struct tmp421_data *tmp421_update
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
- for (i = 0; i <= data->kind; i++) {
+ for (i = 0; i < data->channels; i++) {
data->temp[i] = i2c_smbus_read_byte_data(client,
TMP421_TEMP_MSB[i]) << 8;
data->temp[i] |= i2c_smbus_read_byte_data(client,
@@ -167,7 +169,7 @@ static mode_t tmp421_is_visible(struct k
devattr = container_of(a, struct device_attribute, attr);
index = to_sensor_dev_attr(devattr)->index;
- if (data->kind > index)
+ if (index < data->channels)
return a->mode;
return 0;
@@ -275,7 +277,7 @@ static int tmp421_probe(struct i2c_clien
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->kind = id->driver_data;
+ data->channels = id->driver_data;
err = tmp421_init_client(client);
if (err)
file:55edcfe5b851abe5676d1d3c857eed93c233e458 -> file:806f033695b5da1eb1e2a584bd1a950a425d3f5b
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
Tolapai 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- PCH 0x3b30 32 hard yes yes yes
+ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -415,9 +416,11 @@ static int i801_block_transaction(union
data->block[0] = 32; /* max for SMBus block reads */
}
+ /* Experience has shown that the block buffer can only be used for
+ SMBus (not I2C) block transactions, even though the datasheet
+ doesn't mention this limitation. */
if ((i801_features & FEATURE_BLOCK_BUFFER)
- && !(command == I2C_SMBUS_I2C_BLOCK_DATA
- && read_write == I2C_SMBUS_READ)
+ && command != I2C_SMBUS_I2C_BLOCK_DATA
&& i801_set_block_buffer_mode() == 0)
result = i801_block_transaction_by_block(data, read_write,
hwpec);
@@ -578,6 +581,7 @@ static struct pci_device_id i801_ids[] =
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
{ 0, }
};
@@ -707,6 +711,7 @@ static int __devinit i801_probe(struct p
case PCI_DEVICE_ID_INTEL_ICH10_4:
case PCI_DEVICE_ID_INTEL_ICH10_5:
case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
file:1a32d62ed86b6bb611b8cd625a790587ef6b5735 -> file:a9c331301237734a3c927f9c1a80e739b78f44ea
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -632,12 +632,10 @@ static void cmd640_init_dev(ide_drive_t
static int cmd640_test_irq(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
+ u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
CFR_IDE01INTR;
-
- pci_read_config_byte(dev, irq_reg, &irq_stat);
+ u8 irq_stat = get_cmd640_reg(irq_reg);
return (irq_stat & irq_mask) ? 1 : 0;
}
file:cc8633cbe133db4d9b7f81330ef66a547a6176ac -> file:67fb73559fd582e0bbd477500636c315834b7764
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -428,13 +428,11 @@ int ide_raw_taskfile(ide_drive_t *drive,
{
struct request *rq;
int error;
+ int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
- rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- rq->cmd_flags |= REQ_RW;
-
/*
* (ks) We transfer currently only whole sectors.
* This is suffient for now. But, it would be great,
file:30bdf427ee6d099b907f13ffe0d05dfa9d53c39a -> file:f8302c26774387179da91ca9790bbeaba1e43882
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *de
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
}
file:df3eb8c9fd96adf7e0b9cc8496bfce760ad6952d -> file:b4b22576f12a0a0aba0c99f421fcdf7cd2699016
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1163,7 +1163,7 @@ static ssize_t create_child(struct devic
return ret ? ret : count;
}
-static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
+static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
static ssize_t delete_child(struct device *dev,
struct device_attribute *attr,
@@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct devic
return ret ? ret : count;
}
-static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
+static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
int ipoib_add_pkey_attr(struct net_device *dev)
{
file:b9453d068e9d81922568daa82f733649c5b623f2 -> file:274c883ef3eaf30afd478408bd1f10c001baf0f8
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(str
mem_copy->copy_buf = NULL;
}
+#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
+
/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than
@@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(str
* where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG.
*/
+
static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
- struct scatterlist *sg;
- u64 first_addr, last_addr, page;
- int end_aligned;
- unsigned int cur_page = 0;
+ struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
+ u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
- int i;
+ unsigned int dma_len;
+ int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
+ new_chunk = 1;
+ cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
-
+ start_addr = ib_sg_dma_address(ibdev, sg);
+ if (new_chunk)
+ chunk_start = start_addr;
+ dma_len = ib_sg_dma_len(ibdev, sg);
+ end_addr = start_addr + dma_len;
total_sz += dma_len;
- first_addr = ib_sg_dma_address(ibdev, sg);
- last_addr = first_addr + dma_len;
-
- end_aligned = !(last_addr & ~MASK_4K);
-
- /* continue to collect page fragments till aligned or SG ends */
- while (!end_aligned && (i + 1 < data->dma_nents)) {
- sg = sg_next(sg);
- i++;
- dma_len = ib_sg_dma_len(ibdev, sg);
- total_sz += dma_len;
- last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
- end_aligned = !(last_addr & ~MASK_4K);
+ /* collect page fragments until aligned or end of SG list */
+ if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
+ new_chunk = 0;
+ continue;
}
+ new_chunk = 1;
- /* handle the 1st page in the 1st DMA element */
- if (cur_page == 0) {
- page = first_addr & MASK_4K;
- page_vec->pages[cur_page] = page;
- cur_page++;
+ /* address of the first page in the contiguous chunk;
+ masking relevant for the very first SG entry,
+ which might be unaligned */
+ page = chunk_start & MASK_4K;
+ do {
+ page_vec->pages[cur_page++] = page;
page += SIZE_4K;
- } else
- page = first_addr;
-
- for (; page < last_addr; page += SIZE_4K) {
- page_vec->pages[cur_page] = page;
- cur_page++;
- }
-
+ } while (page < end_addr);
}
+
page_vec->data_size = total_sz;
iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
return cur_page;
}
-#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct is
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
-static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
- struct ib_device *ibdev)
+static int iser_data_buf_aligned_len(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
- struct scatterlist *sgl, *sg;
- u64 end_addr, next_addr;
- int i, cnt;
- unsigned int ret_len = 0;
+ struct scatterlist *sgl, *sg, *next_sg = NULL;
+ u64 start_addr, end_addr;
+ int i, ret_len, start_check = 0;
+
+ if (data->dma_nents == 1)
+ return 1;
sgl = (struct scatterlist *)data->buf;
+ start_addr = ib_sg_dma_address(ibdev, sgl);
- cnt = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
- /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
- "offset: %ld sz: %ld\n", i,
- (unsigned long)sg_phys(sg),
- (unsigned long)sg->offset,
- (unsigned long)sg->length); */
- end_addr = ib_sg_dma_address(ibdev, sg) +
- ib_sg_dma_len(ibdev, sg);
- /* iser_dbg("Checking sg iobuf end address "
- "0x%08lX\n", end_addr); */
- if (i + 1 < data->dma_nents) {
- next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
- /* are i, i+1 fragments of the same page? */
- if (end_addr == next_addr) {
- cnt++;
- continue;
- } else if (!IS_4K_ALIGNED(end_addr)) {
- ret_len = cnt + 1;
- break;
- }
- }
- cnt++;
+ if (start_check && !IS_4K_ALIGNED(start_addr))
+ break;
+
+ next_sg = sg_next(sg);
+ if (!next_sg)
+ break;
+
+ end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
+ start_addr = ib_sg_dma_address(ibdev, next_sg);
+
+ if (end_addr == start_addr) {
+ start_check = 0;
+ continue;
+ } else
+ start_check = 1;
+
+ if (!IS_4K_ALIGNED(end_addr))
+ break;
}
- if (i == data->dma_nents)
- ret_len = cnt; /* loop ended */
+ ret_len = (next_sg) ? i : i+1;
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
ret_len, data->dma_nents, data);
return ret_len;
file:e11a1c3071c49f72bc625b04507c8bd37e9b454d -> file:ee98b1bc5d890c28de0f50aea1db487535fedee8
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -423,15 +423,4 @@ config KEYBOARD_W90P910
To compile this driver as a module, choose M here: the
module will be called w90p910_keypad.
-config KEYBOARD_PMIC8058
- tristate "Qualcomm PMIC8058 keypad"
- depends on PMIC8058 && ARCH_MSM7X30
- default y
- help
- Say Y here if you want to enable the driver for the PMIC8058
- keypad provided as a reference design from Qualcomm. This is intended
- to support upto 18x8 matrix based keypad design.
-
- To compile this driver as a module, choose M here: the module will
- be called pmic8058-keypad.
endif
file:6424785e020caa3caeb0ed2e248d077a7ee4f4e5 -> file:babad5e58b77d323f357618da87accd4f97dcce9
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -36,8 +36,4 @@ obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd
obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
-ifeq ($(CONFIG_MSM_SSBI),y)
-else
-obj-$(CONFIG_KEYBOARD_PMIC8058) += pmic8058-keypad.o
-endif
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
file:9a2977c216967d3c7a23daca744e8f136663c213 -> file:2cfbc1752605c05241326df2c553fd745d34ab8a
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -50,8 +50,12 @@
*/
#define TWL4030_MAX_ROWS 8 /* TWL4030 hard limit */
#define TWL4030_MAX_COLS 8
-#define TWL4030_ROW_SHIFT 3
-#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS * TWL4030_MAX_COLS)
+/*
+ * Note that we add space for an extra column so that we can handle
+ * row lines connected to the gnd (see twl4030_col_xlate()).
+ */
+#define TWL4030_ROW_SHIFT 4
+#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
@@ -181,7 +185,7 @@ static int twl4030_read_kp_matrix_state(
return ret;
}
-static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
+static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
{
int i;
u16 check = 0;
@@ -190,12 +194,12 @@ static int twl4030_is_in_ghost_state(str
u16 col = key_state[i];
if ((col & check) && hweight16(col) > 1)
- return 1;
+ return true;
check |= col;
}
- return 0;
+ return false;
}
static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
@@ -224,7 +228,8 @@ static void twl4030_kp_scan(struct twl40
if (!changed)
continue;
- for (col = 0; col < kp->n_cols; col++) {
+ /* Extra column handles "all gnd" rows */
+ for (col = 0; col < kp->n_cols + 1; col++) {
int code;
if (!(changed & (1 << col)))
file:fc8823bcf20c162f29c993851d2f8c1705bb8d9e -> file:0c99db075e8f140d7670e6ad1d09c20e556a723c
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -62,6 +62,8 @@ static const struct alps_model_info alps
{ { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
{ { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */
+ { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
};
/*
file:0876d82cecfc2a99737d24e13cf40945b795bbeb -> file:9451e28701f8d414721d881d91d83c2941a24cea
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1349,6 +1349,7 @@ static int psmouse_reconnect(struct seri
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
struct serio_driver *drv = serio->drv;
+ unsigned char type;
int rc = -1;
if (!drv || !psmouse) {
@@ -1368,10 +1369,15 @@ static int psmouse_reconnect(struct seri
if (psmouse->reconnect) {
if (psmouse->reconnect(psmouse))
goto out;
- } else if (psmouse_probe(psmouse) < 0 ||
- psmouse->type != psmouse_extensions(psmouse,
- psmouse_max_proto, false)) {
- goto out;
+ } else {
+ psmouse_reset(psmouse);
+
+ if (psmouse_probe(psmouse) < 0)
+ goto out;
+
+ type = psmouse_extensions(psmouse, psmouse_max_proto, false);
+ if (psmouse->type != type)
+ goto out;
}
/* ok, the device type (and capabilities) match the old one,
file:2a5982e532f81aea8ec28fca40403c2f68150723 -> file:ba09e4dd84928d37223e5b1ae66bc13a4a60a9c5
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -166,6 +166,13 @@ static const struct dmi_system_id __init
},
},
{
+ /* Gigabyte Spring Peak - defines wrong chassis type */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+ },
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
@@ -442,6 +449,13 @@ static const struct dmi_system_id __init
},
},
{
+ /* Medion Akoya E1222 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
+ },
+ },
+ {
/* Mivvy M310 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
file:9114ae1c7488a477aaaad4b7e983890d5b3ef2a0 -> file:e6307ba452ea5439f08b963db4e4363159dae1ee
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom.h
*
- * USB Wacom Graphire and Wacom Intuos tablet support
+ * USB Wacom tablet support
*
* Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
* Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk>
@@ -69,6 +69,7 @@
* v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
* v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
* v1.51 (pc) - Added support for Intuos4
+ * v1.52 (pc) - Query Wacom data upon system resume
*/
/*
@@ -89,9 +90,9 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.51"
+#define DRIVER_VERSION "v1.52"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
-#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
+#define DRIVER_DESC "USB Wacom tablet driver"
#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
file:ea30c983a33efdbd6469cbd2fbc07fe308ade8fe -> file:69fc4b8d8738e60f68634aad297247060d791e03
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom_sys.c
*
- * USB Wacom Graphire and Wacom Intuos tablet support - system specific code
+ * USB Wacom tablet support - system specific code
*/
/*
@@ -562,10 +562,15 @@ static int wacom_resume(struct usb_inter
int rv;
mutex_lock(&wacom->lock);
+
+ /* switch to wacom mode first */
+ wacom_query_tablet_data(intf);
+
if (wacom->open)
rv = usb_submit_urb(wacom->irq, GFP_NOIO);
else
rv = 0;
+
mutex_unlock(&wacom->lock);
return rv;
file:cc768caa38f54b86f48ec92310ed93e3b1fcf998 -> file:a0f7b99aee8be4b8d3aab15c03f7af6ecfb5005e
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -1243,14 +1243,10 @@ static void do_action(int action, struct
* note that bcs may be NULL if no B channel is free
*/
at_state2->ConState = 700;
- kfree(at_state2->str_var[STR_NMBR]);
- at_state2->str_var[STR_NMBR] = NULL;
- kfree(at_state2->str_var[STR_ZCPN]);
- at_state2->str_var[STR_ZCPN] = NULL;
- kfree(at_state2->str_var[STR_ZBC]);
- at_state2->str_var[STR_ZBC] = NULL;
- kfree(at_state2->str_var[STR_ZHLC]);
- at_state2->str_var[STR_ZHLC] = NULL;
+ for (i = 0; i < STR_NUM; ++i) {
+ kfree(at_state2->str_var[i]);
+ at_state2->str_var[i] = NULL;
+ }
at_state2->int_var[VAR_ZCTP] = -1;
spin_lock_irqsave(&cs->lock, flags);
file:6a8e1384e7bd09b6441ef6a27eaed5c497f31061 -> file:b3065b8b24568674ae7b5c64504f6d9a5e3ca360
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -635,7 +635,6 @@ void gigaset_if_receive(struct cardstate
if ((tty = cs->tty) == NULL)
gig_dbg(DEBUG_ANY, "receive on closed device");
else {
- tty_buffer_request_room(tty, len);
tty_insert_flip_string(tty, buffer, len);
tty_flip_buffer_push(tty);
}
file:386a7972111d9f88b4e13285d1c810ef29647114 -> file:a564fe2eff14146c66aed69761df9ce75a714b7b
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -90,6 +90,8 @@ static struct task_struct *thread_therm
static void write_both_fan_speed(struct thermostat *th, int speed);
static void write_fan_speed(struct thermostat *th, int speed, int fan);
+static void thermostat_create_files(void);
+static void thermostat_remove_files(void);
static int
write_reg(struct thermostat* th, int reg, u8 data)
@@ -161,6 +163,8 @@ remove_thermostat(struct i2c_client *cli
struct thermostat *th = i2c_get_clientdata(client);
int i;
+ thermostat_remove_files();
+
if (thread_therm != NULL) {
kthread_stop(thread_therm);
}
@@ -449,6 +453,8 @@ static int probe_thermostat(struct i2c_c
return -ENOMEM;
}
+ thermostat_create_files();
+
return 0;
}
@@ -566,7 +572,6 @@ thermostat_init(void)
struct device_node* np;
const u32 *prop;
int i = 0, offset = 0;
- int err;
np = of_find_node_by_name(NULL, "fan");
if (!np)
@@ -633,6 +638,17 @@ thermostat_init(void)
return -ENODEV;
}
+#ifndef CONFIG_I2C_POWERMAC
+ request_module("i2c-powermac");
+#endif
+
+ return i2c_add_driver(&thermostat_driver);
+}
+
+static void thermostat_create_files(void)
+{
+ int err;
+
err = device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
@@ -647,16 +663,9 @@ thermostat_init(void)
if (err)
printk(KERN_WARNING
"Failed to create tempertaure attribute file(s).\n");
-
-#ifndef CONFIG_I2C_POWERMAC
- request_module("i2c-powermac");
-#endif
-
- return i2c_add_driver(&thermostat_driver);
}
-static void __exit
-thermostat_exit(void)
+static void thermostat_remove_files(void)
{
if (of_dev) {
device_remove_file(&of_dev->dev, &dev_attr_sensor1_temperature);
@@ -673,9 +682,14 @@ thermostat_exit(void)
device_remove_file(&of_dev->dev,
&dev_attr_sensor2_fan_speed);
- of_device_unregister(of_dev);
}
+}
+
+static void __exit
+thermostat_exit(void)
+{
i2c_del_driver(&thermostat_driver);
+ of_device_unregister(of_dev);
}
module_init(thermostat_init);
file:8a223a48802c0c867f79818045399937e650bf5e -> file:5f9315b32a42aed67da57a7de2caf97bfdcb8fd1
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -162,7 +162,7 @@ static inline sector_t get_dev_size(stru
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
sector_t sector)
{
- return (sector & ~store->chunk_mask) >> store->chunk_shift;
+ return sector >> store->chunk_shift;
}
int dm_exception_store_type_register(struct dm_exception_store_type *type);
file:1ceceb334d5ebe8f5ce637d29898fc984bfcc797 -> file:dff9d2f449c380363287b567c9b5e1ad79ee7ee1
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit max_phys_segments to 1 lying within
+ * a single page.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
conf->array_sectors += rdev->sectors;
cnt++;
file:08f7471d015039dc125fac9c7f0bc79a0594eb02 -> file:01470794d9c31dc2368b3304560580904756b02c
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2011,12 +2011,18 @@ repeat:
if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
/* .. if the array isn't clean, an 'even' event must also go
* to spares. */
- if ((mddev->events&1)==0)
+ if ((mddev->events&1)==0) {
nospares = 0;
+ sync_req = 2; /* force a second update to get the
+ * even/odd in sync */
+ }
} else {
/* otherwise an 'odd' event must go to spares */
- if ((mddev->events&1))
+ if ((mddev->events&1)) {
nospares = 0;
+ sync_req = 2; /* force a second update to get the
+ * even/odd in sync */
+ }
}
}
@@ -5328,6 +5334,7 @@ static int md_ioctl(struct block_device
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
+ int ro;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -5463,6 +5470,34 @@ static int md_ioctl(struct block_device
err = do_md_stop(mddev, 1, 1);
goto done_unlock;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
+
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
+
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
+
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ }
+ goto done_unlock;
}
/*
file:ee7646f974a07165bd368deb3a18f6390e5553dc -> file:e4b11f18adc7d978947d4da9bb992b8cec334559
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *m
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_phys_segments to one, lying
+ * within a single page.
* (Note: it is very unlikely that a device with
* merge_bvec_fn will be involved in multipath.)
*/
- if (q->merge_bvec_fn &&
- queue_max_sectors(q) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (q->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
conf->working_disks++;
mddev->degraded--;
@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;
file:d3a4ce06015a300e9d4df5e95ee63669613db17e -> file:3db857cdd33e14bc0b6430df75b1da8783bad435
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *m
disk_stack_limits(mddev->gendisk, rdev1->bdev,
rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_phys_segments to 1, lying within
+ * a single page.
*/
- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
-
+ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
file:e07ce2e033a95c48ba2c9a78c519640743220e2d -> file:5ccad2822b115361c7c875430fb747755e4658ce
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -417,7 +417,7 @@ static void raid1_end_write_request(stru
*/
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
- const unsigned long this_sector = r1_bio->sector;
+ const sector_t this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
int wonly_disk = -1;
const int sectors = r1_bio->sectors;
@@ -433,7 +433,7 @@ static int read_balance(conf_t *conf, r1
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operation device, for consistancy */
+ /* Choose the first operational device, for consistancy */
new_disk = 0;
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -891,9 +891,10 @@ static int make_request(struct request_q
if (test_bit(Faulty, &rdev->flags)) {
rdev_dec_pending(rdev, mddev);
r1_bio->bios[i] = NULL;
- } else
+ } else {
r1_bio->bios[i] = bio;
- targets++;
+ targets++;
+ }
} else
r1_bio->bios[i] = NULL;
}
file:c2cb7b87b440dce777d92bb6c215d1c82efddbe1 -> file:6c2a35b42d42279b894358874bfd08a4558695f0
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -493,7 +493,7 @@ static int raid10_mergeable_bvec(struct
*/
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
- const unsigned long this_sector = r10_bio->sector;
+ const sector_t this_sector = r10_bio->sector;
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mdde
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ /* as we don't honour merge_bvec_fn, we must
+ * never risk violating it, so limit
+ * ->max_phys_segments to one lying with a single
+ * page, as a one page request is never in
+ * violation.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2155,12 +2159,14 @@ static int run(mddev_t *mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit max_phys_segments to 1 lying
+ * within a single page.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
disk->head_position = 0;
}
file:431b9b26ca5deda17ed117f634e1975556e009e0 -> file:23949739fc257d7b270fc8688158dd746f9de68f
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1526,7 +1526,7 @@ static void raid5_end_read_request(struc
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
- if (conf->mddev->degraded)
+ if (conf->mddev->degraded >= conf->max_degraded)
printk_rl(KERN_WARNING
"raid5:%s: read error not correctable "
"(sector %llu on %s).\n",
@@ -1649,8 +1649,8 @@ static sector_t raid5_compute_sector(rai
int previous, int *dd_idx,
struct stripe_head *sh)
{
- long stripe;
- unsigned long chunk_number;
+ sector_t stripe, stripe2;
+ sector_t chunk_number;
unsigned int chunk_offset;
int pd_idx, qd_idx;
int ddf_layout = 0;
@@ -1670,18 +1670,13 @@ static sector_t raid5_compute_sector(rai
*/
chunk_offset = sector_div(r_sector, sectors_per_chunk);
chunk_number = r_sector;
- BUG_ON(r_sector != chunk_number);
/*
* Compute the stripe number
*/
- stripe = chunk_number / data_disks;
-
- /*
- * Compute the data disk and parity disk indexes inside the stripe
- */
- *dd_idx = chunk_number % data_disks;
-
+ stripe = chunk_number;
+ *dd_idx = sector_div(stripe, data_disks);
+ stripe2 = stripe;
/*
* Select the parity disk based on the user selected algorithm.
*/
@@ -1693,21 +1688,21 @@ static sector_t raid5_compute_sector(rai
case 5:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
- pd_idx = data_disks - stripe % raid_disks;
+ pd_idx = data_disks - sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_LEFT_SYMMETRIC:
- pd_idx = data_disks - stripe % raid_disks;
+ pd_idx = data_disks - sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_PARITY_0:
@@ -1727,7 +1722,7 @@ static sector_t raid5_compute_sector(rai
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1736,7 +1731,7 @@ static sector_t raid5_compute_sector(rai
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1745,12 +1740,12 @@ static sector_t raid5_compute_sector(rai
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_LEFT_SYMMETRIC:
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
@@ -1769,7 +1764,7 @@ static sector_t raid5_compute_sector(rai
/* Exactly the same as RIGHT_ASYMMETRIC, but or
* of blocks for computing Q is different.
*/
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1784,7 +1779,8 @@ static sector_t raid5_compute_sector(rai
* D D D P Q rather than
* Q D D D P
*/
- pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
+ stripe2 += 1;
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
@@ -1796,7 +1792,7 @@ static sector_t raid5_compute_sector(rai
case ALGORITHM_ROTATING_N_CONTINUE:
/* Same as left_symmetric but Q is before P */
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
ddf_layout = 1;
@@ -1804,27 +1800,27 @@ static sector_t raid5_compute_sector(rai
case ALGORITHM_LEFT_ASYMMETRIC_6:
/* RAID5 left_asymmetric, with Q on last device */
- pd_idx = data_disks - stripe % (raid_disks-1);
+ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_ASYMMETRIC_6:
- pd_idx = stripe % (raid_disks-1);
+ pd_idx = sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_LEFT_SYMMETRIC_6:
- pd_idx = data_disks - stripe % (raid_disks-1);
+ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_SYMMETRIC_6:
- pd_idx = stripe % (raid_disks-1);
+ pd_idx = sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
@@ -1869,14 +1865,14 @@ static sector_t compute_blocknr(struct s
: conf->algorithm;
sector_t stripe;
int chunk_offset;
- int chunk_number, dummy1, dd_idx = i;
+ sector_t chunk_number;
+ int dummy1, dd_idx = i;
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
- BUG_ON(new_sector != stripe);
if (i == sh->pd_idx)
return 0;
@@ -1969,7 +1965,7 @@ static sector_t compute_blocknr(struct s
}
chunk_number = stripe * data_disks + i;
- r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
+ r_sector = chunk_number * sectors_per_chunk + chunk_offset;
check = raid5_compute_sector(conf, r_sector,
previous, &dummy1, &sh2);
file:0241a7c5c34a65ae53f28698936d06ae41df8c43 -> file:55e591daf717f4fc26efecf98923aca309cad449
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -350,6 +350,7 @@ static void dvb_net_ule( struct net_devi
const u8 *ts, *ts_end, *from_where = NULL;
u8 ts_remain = 0, how_much = 0, new_ts = 1;
struct ethhdr *ethh = NULL;
+ bool error = false;
#ifdef ULE_DEBUG
/* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
@@ -459,10 +460,16 @@ static void dvb_net_ule( struct net_devi
/* Drop partly decoded SNDU, reset state, resync on PUSI. */
if (priv->ule_skb) {
- dev_kfree_skb( priv->ule_skb );
+ error = true;
+ dev_kfree_skb(priv->ule_skb);
+ }
+
+ if (error || priv->ule_sndu_remain) {
dev->stats.rx_errors++;
dev->stats.rx_frame_errors++;
+ error = false;
}
+
reset_ule(priv);
priv->need_pusi = 1;
continue;
@@ -504,6 +511,7 @@ static void dvb_net_ule( struct net_devi
"bytes left in TS. Resyncing.\n", ts_remain);
priv->ule_sndu_len = 0;
priv->need_pusi = 1;
+ ts += TS_SZ;
continue;
}
@@ -533,6 +541,7 @@ static void dvb_net_ule( struct net_devi
from_where += 2;
}
+ priv->ule_sndu_remain = priv->ule_sndu_len + 2;
/*
* State of current TS:
* ts_remain (remaining bytes in the current TS cell)
@@ -542,6 +551,7 @@ static void dvb_net_ule( struct net_devi
*/
switch (ts_remain) {
case 1:
+ priv->ule_sndu_remain--;
priv->ule_sndu_type = from_where[0] << 8;
priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
ts_remain -= 1; from_where += 1;
@@ -555,6 +565,7 @@ static void dvb_net_ule( struct net_devi
default: /* complete ULE header is present in current TS. */
/* Extract ULE type field. */
if (priv->ule_sndu_type_1) {
+ priv->ule_sndu_type_1 = 0;
priv->ule_sndu_type |= from_where[0];
from_where += 1; /* points to payload start. */
ts_remain -= 1;
file:3051b64aa17c6bd3c8d7b3fbaad1727908d92b26 -> file:445fa10680644c9d546d5aa8644947e6b5587573
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct
spi_bias *= qam_tab[p->constellation];
spi_bias /= p->code_rate_HP + 1;
spi_bias /= (guard_tab[p->guard_interval] + 32);
- spi_bias *= 1000ULL;
- spi_bias /= 1000ULL + ppm/1000;
+ spi_bias *= 1000;
+ spi_bias /= 1000 + ppm/1000;
spi_bias *= p->code_rate_HP;
val0x04 = (p->transmission_mode << 2) | p->guard_interval;
file:d8d4214fd65f2c7af9d3af1a263a30f9dc24e3a9 -> file:32a7ec65ec42f1c40fbb4e4576b56cded5045902
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -68,13 +68,14 @@ config DVB_BUDGET
select DVB_VES1820 if !DVB_FE_CUSTOMISE
select DVB_L64781 if !DVB_FE_CUSTOMISE
select DVB_TDA8083 if !DVB_FE_CUSTOMISE
- select DVB_TDA10021 if !DVB_FE_CUSTOMISE
- select DVB_TDA10023 if !DVB_FE_CUSTOMISE
select DVB_S5H1420 if !DVB_FE_CUSTOMISE
select DVB_TDA10086 if !DVB_FE_CUSTOMISE
select DVB_TDA826X if !DVB_FE_CUSTOMISE
select DVB_LNBP21 if !DVB_FE_CUSTOMISE
select DVB_TDA1004X if !DVB_FE_CUSTOMISE
+ select DVB_ISL6423 if !DVB_FE_CUSTOMISE
+ select DVB_STV090x if !DVB_FE_CUSTOMISE
+ select DVB_STV6110x if !DVB_FE_CUSTOMISE
help
Support for simple SAA7146 based DVB cards (so called Budget-
or Nova-PCI cards) without onboard MPEG2 decoder, and without
file:e48380c48990165955a9dadeb80d90a758aa6e46 -> file:95a463c1ef85f20b2efce59f1ff4cb8558406c75
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -643,9 +643,6 @@ static void frontend_init(struct budget
&budget->i2c_adap,
&tt1600_isl6423_config);
- } else {
- dvb_frontend_detach(budget->dvb_frontend);
- budget->dvb_frontend = NULL;
}
}
break;
file:a6724019c66f5d9d121edd285a89e73772e7e350 -> file:d258ed719b7d75e63895eb475d99082a9b9eebf6
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4468,6 +4468,7 @@ static int __devinit bttv_probe(struct p
request_modules(btv);
}
+ init_bttv_i2c_ir(btv);
bttv_input_init(btv);
/* everything is fine */
file:beda363418b0e6f28c5bc1ed1342ef16a9676817 -> file:3eb7c2952c8bd993349ab1cc4bd944ea8791b49a
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv
if (0 == btv->i2c_rc && i2c_scan)
do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
- /* Instantiate the IR receiver device, if present */
+ return btv->i2c_rc;
+}
+
+/* Instantiate the I2C IR receiver device, if present */
+void __devinit init_bttv_i2c_ir(struct bttv *btv)
+{
if (0 == btv->i2c_rc) {
struct i2c_board_info info;
/* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
}
- return btv->i2c_rc;
}
int __devexit fini_bttv_i2c(struct bttv *btv)
file:a1d0e9c9f2866ea3a11b24f4357ac31547899b4b -> file:6cccc2a17eee074f121fee7aabbb581223df1905
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug;
extern unsigned int bttv_gpio;
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
+extern void init_bttv_i2c_ir(struct bttv *btv);
extern int fini_bttv_i2c(struct bttv *btv);
#define bttv_printk if (bttv_verbose) printk
file:4172cb3874207428556d5d75e99d46e1d5f6a38f -> file:d4746e06451692f8c853bf8c73924c87f6ad70b3
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -365,7 +365,17 @@ int cx23885_i2c_register(struct cx23885_
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- i2c_new_probed_device(&bus->i2c_adap, &info, addr_list);
+ /*
+ * We can't call i2c_new_probed_device() because it uses
+ * quick writes for probing and the IR receiver device only
+ * replies to reads.
+ */
+ if (i2c_smbus_xfer(&bus->i2c_adap, addr_list[0], 0,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK,
+ NULL) >= 0) {
+ info.addr = addr_list[0];
+ i2c_new_device(&bus->i2c_adap, &info);
+ }
}
return bus->i2c_rc;
file:ee1ca39db06ad684758dc4da0bfa8b35aa8e1f56 -> file:fb39f11845583a86dcb463745d66d498bc873efe
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -188,10 +188,24 @@ int cx88_i2c_init(struct cx88_core *core
0x18, 0x6b, 0x71,
I2C_CLIENT_END
};
+ const unsigned short *addrp;
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- i2c_new_probed_device(&core->i2c_adap, &info, addr_list);
+ /*
+ * We can't call i2c_new_probed_device() because it uses
+ * quick writes for probing and at least some R receiver
+ * devices only reply to reads.
+ */
+ for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
+ if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
+ I2C_SMBUS_READ, 0,
+ I2C_SMBUS_QUICK, NULL) >= 0) {
+ info.addr = *addrp;
+ i2c_new_device(&core->i2c_adap, &info);
+ break;
+ }
+ }
}
return core->i2c_rc;
}
file:db749461e5c61126a73bbabd7710d66b8fea5455 -> file:efddf15d498cac67ecbfdd4c557dbd19e2fb1e68
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -610,6 +610,7 @@ static int dvb_fini(struct em28xx *dev)
if (dev->dvb) {
unregister_dvb(dev->dvb);
+ kfree(dev->dvb);
dev->dvb = NULL;
}
file:f8328b9efae53506ed4ff7822c1e999e46058105 -> file:d61767cd7a519b1698c0e9b541e4eaea44843040
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -530,6 +530,12 @@ static int start_cif_cam(struct gspca_de
{0x13, 0x00, {0x01}, 1},
{0, 0, {0}, 0}
};
+ /* Without this command the cam won't work with USB-UHCI */
+ gspca_dev->usb_buf[0] = 0x0a;
+ gspca_dev->usb_buf[1] = 0x00;
+ err_code = mr_write(gspca_dev, 2);
+ if (err_code < 0)
+ return err_code;
err_code = sensor_write_regs(gspca_dev, cif_sensor1_init_data,
ARRAY_SIZE(cif_sensor1_init_data));
}
file:bfae63f5584c408f555f63b0a5ef8124fa566f5f -> file:7878182a67b2c7ee9d82cf6b7e15446c363b3a4e
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -497,8 +497,6 @@ static const __devinitdata struct usb_de
{USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
/* QuickCam Messenger (new) */
{USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
- /* QuickCam Messenger (new) */
- {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
file:50b415e07edaba9280516850d8856a487cc29d44 -> file:f7f7e04cf4853e02a5c0cd11d051433e6387f1a5
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_dev
buf[0] = 0xff; /* fixed */
ret = send_control_msg(pdev,
- SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf));
+ SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
if (!mode && ret >= 0) {
if (value < 0)
file:4a293b4444593be0e5c54505e434e183adca0f20 -> file:0ca39ec4ba8e7a640c2a19aeedbd562119df9f99
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -826,6 +826,13 @@ int uvc_query_v4l2_ctrl(struct uvc_video
ret = 0;
goto out;
+ case V4L2_CTRL_TYPE_BUTTON:
+ v4l2_ctrl->minimum = 0;
+ v4l2_ctrl->maximum = 0;
+ v4l2_ctrl->step = 0;
+ ret = 0;
+ goto out;
+
default:
break;
}
file:8756be5691544d6c496dcf2803bc24adeed8c3d5 -> file:450ac705c05735fda966fd0c8ed068104b2142fb
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -58,6 +58,11 @@ static struct uvc_format_desc uvc_fmts[]
.fcc = V4L2_PIX_FMT_YUYV,
},
{
+ .name = "YUV 4:2:2 (YUYV)",
+ .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
.name = "YUV 4:2:0 (NV12)",
.guid = UVC_GUID_FORMAT_NV12,
.fcc = V4L2_PIX_FMT_NV12,
@@ -83,11 +88,16 @@ static struct uvc_format_desc uvc_fmts[]
.fcc = V4L2_PIX_FMT_UYVY,
},
{
- .name = "Greyscale",
+ .name = "Greyscale (8-bit)",
.guid = UVC_GUID_FORMAT_Y800,
.fcc = V4L2_PIX_FMT_GREY,
},
{
+ .name = "Greyscale (16-bit)",
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
.name = "RGB Bayer",
.guid = UVC_GUID_FORMAT_BY8,
.fcc = V4L2_PIX_FMT_SBGGR8,
@@ -2048,6 +2058,15 @@ static struct usb_device_id uvc_ids[] =
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
+ /* Syntek (Packard Bell EasyNote MX52 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x174f,
+ .idProduct = 0x8a12,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_STREAM_NO_FID },
/* Syntek (Asus F9SG) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2112,6 +2131,15 @@ static struct usb_device_id uvc_ids[] =
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX },
+ /* Arkmicro unbranded */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x18ec,
+ .idProduct = 0x3290,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Bodelin ProScopeHR */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_DEV_HI
file:e7958aa454cefa28603536e40a7ac65089bf309d -> file:64007b90bd733b281ee30b68cd7362607fcad412
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -112,6 +112,9 @@ struct uvc_xu_control {
#define UVC_GUID_FORMAT_YUY2 \
{ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2_ISIGHT \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_NV12 \
{ 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -127,11 +130,13 @@ struct uvc_xu_control {
#define UVC_GUID_FORMAT_Y800 \
{ 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_BY8 \
{ 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
file:9b2e2198aee9dd07c12328c026fd7712bac95c03 -> file:352acd05c46b93c417596025ebf12be3b423dc01
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -621,11 +621,8 @@ __mptctl_ioctl(struct file *file, unsign
*/
iocnumX = khdr.iocnum & 0xFF;
if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
- (iocp == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnumX);
+ (iocp == NULL))
return -ENODEV;
- }
if (!iocp->active) {
printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n",
file:6cea7181ed73b28aad0e3b62f1f4916e832e6623 -> file:f6227340a5d0791324b2e26495e11e512bca5b5c
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -792,11 +792,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F
* precedence!
*/
sc->result = (DID_OK << 16) | scsi_status;
- if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
- /* Have already saved the status and sense data
+ if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
+
+ /*
+ * For an Errata on LSI53C1030
+ * When the length of request data
+ * and transfer data are different
+ * with result of command (READ or VERIFY),
+ * DID_SOFT_ERROR is set.
*/
- ;
- } else {
+ if (ioc->bus_type == SPI) {
+ if (pScsiReq->CDB[0] == READ_6 ||
+ pScsiReq->CDB[0] == READ_10 ||
+ pScsiReq->CDB[0] == READ_12 ||
+ pScsiReq->CDB[0] == READ_16 ||
+ pScsiReq->CDB[0] == VERIFY ||
+ pScsiReq->CDB[0] == VERIFY_16) {
+ if (scsi_bufflen(sc) !=
+ xfer_cnt) {
+ sc->result =
+ DID_SOFT_ERROR << 16;
+ printk(KERN_WARNING "Errata"
+ "on LSI53C1030 occurred."
+ "sc->req_bufflen=0x%02x,"
+ "xfer_cnt=0x%02x\n",
+ scsi_bufflen(sc),
+ xfer_cnt);
+ }
+ }
+ }
+
if (xfer_cnt < sc->underflow) {
if (scsi_status == SAM_STAT_BUSY)
sc->result = SAM_STAT_BUSY;
@@ -835,7 +860,58 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_F
sc->result = (DID_OK << 16) | scsi_status;
if (scsi_state == 0) {
;
- } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+ } else if (scsi_state &
+ MPI_SCSI_STATE_AUTOSENSE_VALID) {
+
+ /*
+ * For potential trouble on LSI53C1030.
+ * (date:2007.xx.)
+ * It is checked whether the length of
+ * request data is equal to
+ * the length of transfer and residual.
+ * MEDIUM_ERROR is set by incorrect data.
+ */
+ if ((ioc->bus_type == SPI) &&
+ (sc->sense_buffer[2] & 0x20)) {
+ u32 difftransfer;
+ difftransfer =
+ sc->sense_buffer[3] << 24 |
+ sc->sense_buffer[4] << 16 |
+ sc->sense_buffer[5] << 8 |
+ sc->sense_buffer[6];
+ if (((sc->sense_buffer[3] & 0x80) ==
+ 0x80) && (scsi_bufflen(sc)
+ != xfer_cnt)) {
+ sc->sense_buffer[2] =
+ MEDIUM_ERROR;
+ sc->sense_buffer[12] = 0xff;
+ sc->sense_buffer[13] = 0xff;
+ printk(KERN_WARNING"Errata"
+ "on LSI53C1030 occurred."
+ "sc->req_bufflen=0x%02x,"
+ "xfer_cnt=0x%02x\n" ,
+ scsi_bufflen(sc),
+ xfer_cnt);
+ }
+ if (((sc->sense_buffer[3] & 0x80)
+ != 0x80) &&
+ (scsi_bufflen(sc) !=
+ xfer_cnt + difftransfer)) {
+ sc->sense_buffer[2] =
+ MEDIUM_ERROR;
+ sc->sense_buffer[12] = 0xff;
+ sc->sense_buffer[13] = 0xff;
+ printk(KERN_WARNING
+ "Errata on LSI53C1030 occurred"
+ "sc->req_bufflen=0x%02x,"
+ " xfer_cnt=0x%02x,"
+ "difftransfer=0x%02x\n",
+ scsi_bufflen(sc),
+ xfer_cnt,
+ difftransfer);
+ }
+ }
+
/*
* If running against circa 200003dd 909 MPT f/w,
* may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
file:dbf4de39754d1f4009edfdb02f4bfa1e80e56baa -> file:69698e504f3d019f0b2cd3ac66887118af040b63
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -165,8 +165,8 @@ static struct pci_device_id com20020pci_
{ 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
{ 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{0,}
file:60edb9f232bb8a6d8ce0623f7346ddab5fe3ef52 -> file:b0fb7254e2cb75b7839ee334361576356d441ac9
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -394,11 +394,13 @@ static const struct ethtool_ops atl1e_et
.get_eeprom = atl1e_get_eeprom,
.set_eeprom = atl1e_set_eeprom,
.get_tx_csum = atl1e_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso,
#endif
+ .set_tso = ethtool_op_set_tso,
};
void atl1e_set_ethtool_ops(struct net_device *netdev)
file:08cddb6ff740f4dfac35677e91424378bcf3b2e9 -> file:a9aa957f3a7e6d347426e041ea7ca6bdeeedb764
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4752,8 +4752,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 res
rc = bnx2_alloc_bad_rbuf(bp);
}
- if (bp->flags & BNX2_FLAG_USING_MSIX)
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
bnx2_setup_msix_tbl(bp);
+ /* Prevent MSIX table reads and write from timing out */
+ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+ }
return rc;
}
file:16d2ecd2a3b7b4fa08d7ce95c8e342cdc3788856 -> file:9463e5db95666eb7b6838e4da045a7f3c2026557
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -84,6 +84,20 @@ static struct can_bittiming_const sja100
.brp_inc = 1,
};
+static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+{
+ unsigned long flags;
+
+ /*
+ * The command register needs some locking and time to settle
+ * the write_reg() operation - especially on SMP systems.
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+ priv->read_reg(priv, REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+}
+
static int sja1000_probe_chip(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
@@ -279,7 +293,7 @@ static netdev_tx_t sja1000_start_xmit(st
can_put_echo_skb(skb, dev, 0);
- priv->write_reg(priv, REG_CMR, CMD_TR);
+ sja1000_write_cmdreg(priv, CMD_TR);
return NETDEV_TX_OK;
}
@@ -334,7 +348,7 @@ static void sja1000_rx(struct net_device
cf->data[i++] = 0;
/* release receive buffer */
- priv->write_reg(priv, REG_CMR, CMD_RRB);
+ sja1000_write_cmdreg(priv, CMD_RRB);
netif_rx(skb);
@@ -368,7 +382,7 @@ static int sja1000_err(struct net_device
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
+ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
}
if (isrc & IRQ_EI) {
file:302d2c763ad7cfb3c2a18da3a2911e2cf786b876 -> file:cfd3f57e4ab53076b5c97e32a4f1bf5cd8298aeb
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -165,6 +165,7 @@ struct sja1000_priv {
void __iomem *reg_base; /* ioremap'ed address to registers */
unsigned long irq_flags; /* for request_irq() */
+ spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
u16 flags; /* custom mode flags */
u8 ocr; /* output control register */
file:61f9da2b49431d88056daa1bbcb4b6bceaee86b7 -> file:1cace005bff202ae4ff0c1d3dfb7dc81d4f0cb3f
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1176,7 +1176,8 @@ static int __devinit cpmac_probe(struct
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: Could not attach to PHY\n",
dev->name);
- return PTR_ERR(priv->phy);
+ rc = PTR_ERR(priv->phy);
+ goto fail;
}
if ((rc = register_netdev(dev))) {
file:5248f9e0b2f4c4a934a78cf8209381da57077978 -> file:35cd36729155ee18a7dcd44d883890a980c0b87d
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops =
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
+ cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
"10GBASE-CX4");
return 0;
file:31b8bef49d2e1a15323634f29b72b8453815b0e8 -> file:3f5eb81cc2aa34718e0646299ada34e61683ac45
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -471,17 +471,13 @@ static uint32_t dm9000_get_rx_csum(struc
return dm->rx_csum;
}
-static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
- unsigned long flags;
if (dm->can_csum) {
dm->rx_csum = data;
-
- spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
- spin_unlock_irqrestore(&dm->lock, flags);
return 0;
}
@@ -489,6 +485,19 @@ static int dm9000_set_rx_csum(struct net
return -EOPNOTSUPP;
}
+static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dm->lock, flags);
+ ret = dm9000_set_rx_csum_unlocked(dev, data);
+ spin_unlock_irqrestore(&dm->lock, flags);
+
+ return ret;
+}
+
static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
@@ -667,7 +676,7 @@ static unsigned char dm9000_type_to_char
* Set DM9000 multicast address
*/
static void
-dm9000_hash_table(struct net_device *dev)
+dm9000_hash_table_unlocked(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct dev_mc_list *mcptr = dev->mc_list;
@@ -676,12 +685,9 @@ dm9000_hash_table(struct net_device *dev
u32 hash_val;
u16 hash_table[4];
u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
- unsigned long flags;
dm9000_dbg(db, 1, "entering %s\n", __func__);
- spin_lock_irqsave(&db->lock, flags);
-
for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
iow(db, oft, dev->dev_addr[i]);
@@ -711,6 +717,16 @@ dm9000_hash_table(struct net_device *dev
}
iow(db, DM9000_RCR, rcr);
+}
+
+static void
+dm9000_hash_table(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&db->lock, flags);
+ dm9000_hash_table_unlocked(dev);
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -729,7 +745,7 @@ dm9000_init_dm9000(struct net_device *de
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
- dm9000_set_rx_csum(dev, db->rx_csum);
+ dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
/* GPIO0 on pre-activate PHY */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
@@ -749,7 +765,7 @@ dm9000_init_dm9000(struct net_device *de
iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
/* Set address filter table */
- dm9000_hash_table(dev);
+ dm9000_hash_table_unlocked(dev);
imr = IMR_PAR | IMR_PTM | IMR_PRM;
if (db->type != TYPE_DM9000E)
file:aaea41ef794dc1ee1fe23d124a4192b058e146e5 -> file:e8e87a723fada3a24da353d1938b7c974b203aae
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -356,6 +356,7 @@ enum e1e_registers {
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
#define E1000_DEV_ID_ICH8_IGP_C 0x104B
file:eff3f478365556bf00a5ed996a398bf051190105 -> file:c688b55c1b7564adc002f18a5da93bf5170f9a70
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -3209,6 +3209,7 @@ void e1000e_disable_gig_wol_ich8lan(stru
u32 phy_ctrl;
switch (hw->mac.type) {
+ case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pchlan:
file:21545306bc1d433d6b0decf2325f0a1481a4d3b1 -> file:433f4dde9ea86d2db5c69e2ece5f802901ca1f7f
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -665,6 +665,8 @@ static bool e1000_clean_tx_irq(struct e1
i = 0;
}
+ if (i == tx_ring->next_to_use)
+ break;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
@@ -5360,6 +5362,7 @@ static struct pci_device_id e1000_pci_tb
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
file:3116601dbfea859984fa480edd0ecd4db5893e4e -> file:7cd446d0f51a96793c463dfc2ff1923407b22145
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5900,7 +5900,7 @@ static int __devinit nv_probe(struct pci
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
- if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
+ if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
file:f8f5772557cefa661d864d0e0dfa1fde6c467ac0 -> file:33352ffa9669cb33bc25b16542bb4589f4d526a4
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -81,6 +81,7 @@ static s32 igb_get_invariants_82575(stru
break;
case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
@@ -1167,9 +1168,18 @@ static s32 igb_read_mac_addr_82575(struc
{
s32 ret_val = 0;
- if (igb_check_alt_mac_addr(hw))
- ret_val = igb_read_mac_addr(hw);
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = igb_check_alt_mac_addr(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_read_mac_addr(hw);
+out:
return ret_val;
}
file:119869b1124dd0c7495e16da522fc8fee2748702 -> file:72081df3a39714b7e99a37b8e91ef32e808347c4
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -42,6 +42,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
@@ -52,6 +53,8 @@ struct e1000_hw;
#define E1000_FUNC_1 1
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
enum e1000_mac_type {
e1000_undefined = 0,
e1000_82575,
file:7d76bb085e105923080bb8304209f0874c5eeff6 -> file:d4fa82c45fb943a05848a9690a5cd2c7a286e203
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_
}
if (nvm_alt_mac_addr_offset == 0xFFFF) {
- ret_val = -(E1000_NOT_IMPLEMENTED);
+ /* There is no Alternate MAC Address */
goto out;
}
if (hw->bus.func == E1000_FUNC_1)
- nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
-
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
for (i = 0; i < ETH_ALEN; i += 2) {
offset = nvm_alt_mac_addr_offset + (i >> 1);
ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_
/* if multicast bit is set, the alternate address will not be used */
if (alt_mac_addr[0] & 0x01) {
- ret_val = -(E1000_NOT_IMPLEMENTED);
+ hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
goto out;
}
- for (i = 0; i < ETH_ALEN; i++)
- hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
-
- hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
out:
return ret_val;
file:714c3a4a44eff5a1d43b538a6c6cf380bfea445c -> file:8111776927ae30ffcbc60ffb56daebbe80f744a0
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -63,6 +63,7 @@ static const struct e1000_info *igb_info
static struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
file:34b04924c8a1f6412f7231306833c3ceebafbe63 -> file:9c4214993b8232b5e230f41ca52dd54900121edd
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -332,6 +332,7 @@ static enum ixgbe_media_type ixgbe_get_m
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
file:a456578b85786da64b32c75d98fafbe169b12893 -> file:a873c5d7931a5e784b09a8786a169ad42ddd8eef
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -96,6 +96,8 @@ static struct pci_device_id ixgbe_pci_tb
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
@@ -5239,9 +5241,13 @@ static int ixgbe_maybe_stop_tx(struct ne
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int txq = smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- return smp_processor_id();
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
+ return txq;
+ }
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
file:ef4bdd58e0162e96c1bfad79980d2fd3dc3a73bb -> file:7d66f5bbbec9c6d0ca4662f1ff624aa9b09881bb
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -50,6 +50,7 @@
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
file:1d2a32544ed2d50189d53c7197f976834a5983e3 -> file:3bb3a6da36c26f57240cc2fbae42bce68b27315b
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapte
jme->jme_vlan_rx(skb, jme->vlgrp,
le16_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4;
+ } else {
+ dev_kfree_skb(skb);
}
} else {
jme->jme_rx(skb);
@@ -2085,12 +2087,45 @@ jme_tx_timeout(struct net_device *netdev
jme_reset_link(jme);
}
+static inline void jme_pause_rx(struct jme_adapter *jme)
+{
+ atomic_dec(&jme->link_changing);
+
+ jme_set_rx_pcc(jme, PCC_OFF);
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_DISABLE(jme);
+ } else {
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+ }
+}
+
+static inline void jme_resume_rx(struct jme_adapter *jme)
+{
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_ENABLE(jme);
+ } else {
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+ }
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+ jme_set_rx_pcc(jme, PCC_P1);
+
+ atomic_inc(&jme->link_changing);
+}
+
static void
jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct jme_adapter *jme = netdev_priv(netdev);
+ jme_pause_rx(jme);
jme->vlgrp = grp;
+ jme_resume_rx(jme);
}
static void
file:c146304d8d6ca6398b22a3bef00e10416719fe79 -> file:c0ceebccaa49ce38b75df1c9c30a67ec56c4b7b0
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct
static irqreturn_t ks_irq(int irq, void *pw)
{
- struct ks_net *ks = pw;
- struct net_device *netdev = ks->netdev;
+ struct net_device *netdev = pw;
+ struct ks_net *ks = netdev_priv(netdev);
u16 status;
/*this should be the first in IRQ handler */
file:04b382fcb8c881c2ab76fea05636deb63c64effd -> file:83eef8e35b769cf0ea144326b776f135cb5baec0
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -174,9 +174,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct m
if (chunk->nsg <= 0)
goto fail;
+ }
+ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
- }
npages -= 1 << cur_order;
} else {
file:5910df60c93eee6a16705ef350546aef6593107a -> file:b724d7faa3c8bad73241c5c6d76f6509f5bd289b
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1178,7 +1178,8 @@ static int pppol2tp_xmit(struct ppp_chan
/* Calculate UDP checksum if configured to do so */
if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
+ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
+ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
skb->ip_summed = CHECKSUM_COMPLETE;
csum = skb_checksum(skb, 0, udp_len, 0);
uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
file:0fe2fc90f207ebdf74d01620b50781fb2d556882 -> file:211b587195cc845ea89eefce9a044ccab4708081
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -186,7 +186,12 @@ static struct pci_device_id rtl8169_pci_
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static int rx_copybreak = 200;
+/*
+ * we set our copybreak very high so that we don't have
+ * to allocate 16k frames all the time (see note in
+ * rtl8169_open()
+ */
+static int rx_copybreak = 16383;
static int use_dac;
static struct {
u32 msg_enable;
@@ -2827,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_p
spin_lock_irq(&tp->lock);
RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W32(MAC0, low);
+
RTL_W32(MAC4, high);
+ RTL_R32(MAC4);
+
+ RTL_W32(MAC0, low);
+ RTL_R32(MAC0);
+
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
@@ -3245,9 +3255,13 @@ static void __devexit rtl8169_remove_one
}
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
- struct net_device *dev)
+ unsigned int mtu)
{
- unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (max_frame != 16383)
+ printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
+ "NIC may lead to frame reception errors!\n");
tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
}
@@ -3259,7 +3273,17 @@ static int rtl8169_open(struct net_devic
int retval = -ENOMEM;
- rtl8169_set_rxbufsize(tp, dev);
+ /*
+ * Note that we use a magic value here, its wierd I know
+ * its done because, some subset of rtl8169 hardware suffers from
+ * a problem in which frames received that are longer than
+ * the size set in RxMaxSize register return garbage sizes
+ * when received. To avoid this we need to turn off filtering,
+ * which is done by setting a value of 16383 in the RxMaxSize register
+ * and allocating 16k frames to handle the largest possible rx value
+ * thats what the magic math below does.
+ */
+ rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
/*
* Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3912,7 +3936,7 @@ static int rtl8169_change_mtu(struct net
rtl8169_down(dev);
- rtl8169_set_rxbufsize(tp, dev);
+ rtl8169_set_rxbufsize(tp, dev->mtu);
ret = rtl8169_init_ring(dev);
if (ret < 0)
@@ -4297,7 +4321,7 @@ static netdev_tx_t rtl8169_start_xmit(st
tp->cur_tx += frags + 1;
- smp_wmb();
+ wmb();
RTL_W8(TxPoll, NPQ); /* set polling bit */
@@ -4657,7 +4681,7 @@ static int rtl8169_poll(struct napi_stru
* until it does.
*/
tp->intr_mask = 0xffff;
- smp_wmb();
+ wmb();
RTL_W16(IntrMask, tp->intr_event);
}
@@ -4795,8 +4819,8 @@ static void rtl_set_rx_mode(struct net_d
mc_filter[1] = swab32(data);
}
- RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(RxConfig, tmp);
file:f3600b3eb8c5e68edc9be162ee7b28d66abeda36 -> file:a17aaeed096c86233471b3f52d8a3b2df8df4499
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -704,11 +704,24 @@ static void sky2_phy_power_down(struct s
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
+/* Enable Rx/Tx */
+static void sky2_enable_rx_tx(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 reg;
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+}
+
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(sky2->hw, sky2->port);
+ sky2_enable_rx_tx(sky2);
spin_unlock_bh(&sky2->phy_lock);
}
@@ -1008,11 +1021,8 @@ static void sky2_prefetch_init(struct sk
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
{
struct sky2_tx_le *le = sky2->tx_le + *slot;
- struct tx_ring_info *re = sky2->tx_ring + *slot;
*slot = RING_NEXT(*slot, sky2->tx_ring_size);
- re->flags = 0;
- re->skb = NULL;
le->ctrl = 0;
return le;
}
@@ -1580,8 +1590,7 @@ static unsigned tx_le_req(const struct s
return count;
}
-static void sky2_tx_unmap(struct pci_dev *pdev,
- const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1591,6 +1600,7 @@ static void sky2_tx_unmap(struct pci_dev
pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
pci_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
+ re->flags = 0;
}
/*
@@ -1797,6 +1807,7 @@ static void sky2_tx_complete(struct sky2
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ re->skb = NULL;
dev_kfree_skb_any(skb);
sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
@@ -1931,7 +1942,6 @@ static void sky2_link_up(struct sky2_por
{
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u16 reg;
static const char *fc_name[] = {
[FC_NONE] = "none",
[FC_TX] = "tx",
@@ -1939,10 +1949,7 @@ static void sky2_link_up(struct sky2_por
[FC_BOTH] = "both",
};
- /* enable Rx/Tx */
- reg = gma_read16(hw, port, GM_GP_CTRL);
- reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
- gma_write16(hw, port, GM_GP_CTRL, reg);
+ sky2_enable_rx_tx(sky2);
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
file:ba5d3fe753b694d58b93bedf2fc050f001995fef -> file:fd6622ca4cd5918ae5d5a596efe3f166dd16b429
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4995,7 +4995,7 @@ static void tg3_poll_controller(struct n
struct tg3 *tp = netdev_priv(dev);
for (i = 0; i < tp->irq_cnt; i++)
- tg3_interrupt(tp->napi[i].irq_vec, dev);
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
}
#endif
@@ -5392,7 +5392,7 @@ static netdev_tx_t tg3_start_xmit_dma_bu
mss = 0;
if ((mss = skb_shinfo(skb)->gso_size) != 0) {
struct iphdr *iph;
- int tcp_opt_len, ip_tcp_len, hdr_len;
+ u32 tcp_opt_len, ip_tcp_len, hdr_len;
if (skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5423,10 @@ static netdev_tx_t tg3_start_xmit_dma_bu
IPPROTO_TCP,
0);
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
+ mss |= hdr_len << 9;
+ else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
@@ -5459,6 +5461,9 @@ static netdev_tx_t tg3_start_xmit_dma_bu
would_hit_hwbug = 0;
+ if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
+ would_hit_hwbug = 1;
+
if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
would_hit_hwbug = 1;
else if (tg3_4g_overflow_test(mapping, len))
@@ -5482,6 +5487,10 @@ static netdev_tx_t tg3_start_xmit_dma_bu
tnapi->tx_buffers[entry].skb = NULL;
+ if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
+ len <= 8)
+ would_hit_hwbug = 1;
+
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
@@ -8159,6 +8168,7 @@ static int tg3_test_msi(struct tg3 *tp)
pci_disable_msi(tp->pdev);
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+ tp->napi[0].irq_vec = tp->pdev->irq;
err = tg3_request_irq(tp, 0);
if (err)
@@ -12608,6 +12618,9 @@ static int __devinit tg3_get_invariants(
}
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
+
tp->irq_max = 1;
#ifdef TG3_NAPI
@@ -13975,8 +13988,7 @@ static int __devinit tg3_init_one(struct
goto err_out_iounmap;
}
- if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
dev->netdev_ops = &tg3_netdev_ops;
else
dev->netdev_ops = &tg3_netdev_ops_dma_bug;
file:bab7940158e65d208ca734f22d12e432620f69bb -> file:529f55ad16dbb8de49d1559f80f9b9ea23eb843e
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2759,6 +2759,9 @@ struct tg3 {
#define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
#define TG3_FLG3_PHY_IS_FET 0x00010000
#define TG3_FLG3_ENABLE_RSS 0x00020000
+#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
+#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
+#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
struct timer_list timer;
u16 timer_counter;
file:1cc8cf4425d18a576e5c8829a1bbfb0461bb0f18 -> file:516713fa0a0570a4d3faf871fa4cf1089297981b
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION
If in doubt, say Y.
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
config DE4X5
tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
depends on PCI || EISA
file:a45ded0538b84db192a1d2ff6bbc3ce1eb1ad0f3 -> file:b94370f7eb5afd8e32e01b446cfa5ee54e7d52d1
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -92,6 +92,10 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
+#ifdef CONFIG_TULIP_DM910X
+#include <linux/of.h>
+#endif
+
/* Board/System/Debug information/definition ---------------- */
#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
@@ -377,6 +381,23 @@ static int __devinit dmfe_init_one (stru
if (!printed_version++)
printk(version);
+ /*
+ * SPARC on-board DM910x chips should be handled by the main
+ * tulip driver, except for early DM9100s.
+ */
+#ifdef CONFIG_TULIP_DM910X
+ if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
+ ent->driver_data == PCI_DM9102_ID) {
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+
+ if (dp && of_get_property(dp, "local-mac-address", NULL)) {
+ printk(KERN_INFO DRV_NAME
+ ": skipping on-board DM910x (use tulip)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
file:6b2330e4206e12467c7e28c08e155a4c5bccf668 -> file:88bf54f8562c5d2de4be39f2c681c419394cce83
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -196,9 +196,13 @@ struct tulip_chip_table tulip_tbl[] = {
| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
/* DM910X */
+#ifdef CONFIG_TULIP_DM910X
{ "Davicom DM9102/DM9102A", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
tulip_timer, tulip_media_task },
+#else
+ { NULL },
+#endif
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
@@ -228,8 +232,10 @@ static struct pci_device_id tulip_pci_tb
{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+#ifdef CONFIG_TULIP_DM910X
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+#endif
{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -1299,18 +1305,30 @@ static int __devinit tulip_init_one (str
}
/*
- * Early DM9100's need software CRC and the DMFE driver
+ * DM910x chips should be handled by the dmfe driver, except
+ * on-board chips on SPARC systems. Also, early DM9100s need
+ * software CRC which only the dmfe driver supports.
*/
- if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
- {
- /* Read Chip revision */
- if (pdev->revision < 0x30)
- {
- printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+#ifdef CONFIG_TULIP_DM910X
+ if (chip_idx == DM910X) {
+ struct device_node *dp;
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
+ pdev->revision < 0x30) {
+ printk(KERN_INFO PFX
+ "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+
+ dp = pci_device_to_OF_node(pdev);
+ if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
+ printk(KERN_INFO PFX
+ "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
+#endif
/*
* Looks for early PCI chipsets where people report hangs
file:4469f2451a6f660a20fc37d0ca9e47ec4114b787 -> file:b4b25ffa3ab7c6a199852604997d00869eda1f1d
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Wait for and prevent any further xmits. */
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_ge
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_tx_wake_all_queues(ugeth->ndev);
+ netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -3273,13 +3276,12 @@ static int ucc_geth_tx(struct net_device
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
break;
dev->stats.tx_packets++;
- skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
-
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +
file:a2b30a10064f131e996657fb5f03dd600c528be8 -> file:9a6eedef4afc9f8d8fc312ed4222f35cb33dc06b
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -238,7 +238,7 @@ static int dm_write_shared_word(struct u
goto out;
dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
- dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
+ dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
for (i = 0; i < DM_TIMEOUT; i++) {
u8 tmp;
file:1fd70583be444089b3c6f402477f204bb3128335 -> file:31a5d3c15ae96f290bc9c81611d876c38f512abf
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -102,6 +102,7 @@ static const int multicast_filter_limit
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
+#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
@@ -389,6 +390,7 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct work_struct reset_task;
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(stru
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ INIT_WORK(&rp->reset_task, rhine_reset_task);
+
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device
return 0;
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_reset_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
-
- printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
- "%4.4x, resetting...\n",
- dev->name, ioread16(ioaddr + IntrStatus),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ struct rhine_private *rp = container_of(work, struct rhine_private,
+ reset_task);
+ struct net_device *dev = rp->dev;
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
napi_disable(&rp->napi);
- spin_lock(&rp->lock);
+ spin_lock_bh(&rp->lock);
/* clear all descriptors */
free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_
rhine_chip_reset(dev);
init_registers(dev);
- spin_unlock(&rp->lock);
+ spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_
netif_wake_queue(dev);
}
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ schedule_work(&rp->reset_task);
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- spin_lock_irq(&rp->lock);
-
- netif_stop_queue(dev);
napi_disable(&rp->napi);
+ cancel_work_sync(&rp->reset_task);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
file:e04e5bee005ca36eaff7ebe8c9ccd4ae8c34e3ef -> file:74b9d7d4a3efae426b56d02ae70f5eec8d5d993c
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2186,8 +2186,6 @@ static int velocity_open(struct net_devi
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
@@ -2199,6 +2197,8 @@ static int velocity_open(struct net_devi
goto out;
}
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
vptr->flags |= VELOCITY_FLAGS_OPENED;
@@ -2287,10 +2287,10 @@ static int velocity_change_mtu(struct ne
dev->mtu = new_mtu;
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
file:b9e002fccbca22e20541136a3779c9f56bc2220c -> file:7e3788d543109d11f0bf711ffff203498655aebf
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -398,8 +398,7 @@ static void refill_work(struct work_stru
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
- try_fill_recv(vi, GFP_KERNEL);
- still_empty = (vi->num == 0);
+ still_empty = !try_fill_recv(vi, GFP_KERNEL);
napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
file:abf896a7390e5172f9fdf00339102d96f7d1e17c -> file:6c26840bfe0bc67e490c1ee07fef4a7954ff442b
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5254,11 +5254,7 @@ static int set_wep_key(struct airo_info
WepKeyRid wkr;
int rc;
- if (keylen == 0) {
- airo_print_err(ai->dev->name, "%s: key length to set was zero",
- __func__);
- return -1;
- }
+ WARN_ON(keylen == 0);
memset(&wkr, 0, sizeof(wkr));
wkr.len = cpu_to_le16(sizeof(wkr));
@@ -6404,11 +6400,7 @@ static int airo_set_encode(struct net_de
if (dwrq->length > MIN_KEY_SIZE)
key.len = MAX_KEY_SIZE;
else
- if (dwrq->length > 0)
- key.len = MIN_KEY_SIZE;
- else
- /* Disable the key */
- key.len = 0;
+ key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
@@ -6589,12 +6581,22 @@ static int airo_set_encodeext(struct net
default:
return -EINVAL;
}
- /* Send the key to the card */
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set WEP key"
- " at index %d: %d.", idx, rc);
- return rc;
+ if (key.len == 0) {
+ rc = set_wep_tx_idx(local, idx, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP transmit index to %d: %d.",
+ idx, rc);
+ return rc;
+ }
+ } else {
+ rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP key at index %d: %d.",
+ idx, rc);
+ return rc;
+ }
}
}
file:6cbfb2f8339189c3eb13164c3eeab807131f935c -> file:3931181f4d930d29041e4e8b3556c026fee3b342
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -422,5 +422,6 @@ enum ar9170_txq {
#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
+#define AR9170_RX_STREAM_MAX_SIZE 65535
#endif /* __AR9170_HW_H */
file:c1f8c69db165d01fa752ccf3188f35d020307b76 -> file:3e547b59d585d093abf121ba64e844865920894b
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -2526,7 +2526,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into seperate rx descriptors.
*/
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;
file:f141a4f711a198484ec8aac0123fa93fc736b733 -> file:dbcb72930fa32780f7a3fc8bcd0c907eb2a980ff
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -66,18 +66,28 @@ static struct usb_device_id ar9170_usb_i
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
@@ -414,7 +424,7 @@ static int ar9170_usb_exec_cmd(struct ar
spin_unlock_irqrestore(&aru->common.cmdlock, flags);
usb_fill_int_urb(urb, aru->udev,
- usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
+ usb_sndintpipe(aru->udev, AR9170_EP_CMD),
aru->common.cmdbuf, plen + 4,
ar9170_usb_tx_urb_complete, NULL, 1);
file:6cd5efcec41719de9535558fdbb244ffbc1838fa -> file:2c79c78ef2d700cbd61610e0a1e4bfa0d26d2967
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -540,13 +540,12 @@ struct ath5k_txq_info {
u32 tqi_cbr_period; /* Constant bit rate period */
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Not used */
+ u32 tqi_ready_time; /* Time queue waits after an event */
};
/*
* Transmit packet types.
* used on tx control descriptor
- * TODO: Use them inside base.c corectly
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
file:71a1bd254517c610257e384b0bc65e01800a2782 -> file:88663dfe3b3d4f15528f3d0f8e48757aa7d73bf3
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -133,6 +133,7 @@ struct ath5k_hw *ath5k_hw_attach(struct
ah->ah_cw_min = AR5K_TUNE_CWMIN;
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
+ ah->ah_current_channel = &sc->channels[0];
/*
* Find the mac version
file:8a82c757ea63e09152ea157bc97e0bf592008441 -> file:7ee61d426dbb5dedb31c9e3effa6057c898490e9
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1220,6 +1220,29 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc
return 0;
}
+static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
struct ath5k_txq *txq)
@@ -1274,7 +1297,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
+ ieee80211_get_hdrlen_from_skb(skb),
+ get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
@@ -1487,7 +1511,8 @@ ath5k_beaconq_config(struct ath5k_softc
ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
if (ret)
- return ret;
+ goto err;
+
if (sc->opmode == NL80211_IFTYPE_AP ||
sc->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
@@ -1514,10 +1539,25 @@ ath5k_beaconq_config(struct ath5k_softc
if (ret) {
ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
- return ret;
+ goto err;
}
+ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+err:
+ return ret;
}
static void
@@ -1778,11 +1818,6 @@ ath5k_tasklet_rx(unsigned long data)
return;
}
- if (unlikely(rs.rs_more)) {
- ATH5K_WARN(sc, "unsupported jumbo\n");
- goto next;
- }
-
if (unlikely(rs.rs_status)) {
if (rs.rs_status & AR5K_RXERR_PHY)
goto next;
@@ -1812,6 +1847,8 @@ ath5k_tasklet_rx(unsigned long data)
sc->opmode != NL80211_IFTYPE_MONITOR)
goto next;
}
+ if (unlikely(rs.rs_more))
+ goto next;
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
@@ -2935,13 +2972,15 @@ static void ath5k_configure_filter(struc
if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
if (*new_flags & FIF_PROMISC_IN_BSS) {
- rfilt |= AR5K_RX_FILTER_PROM;
__set_bit(ATH_STAT_PROMISC, sc->status);
} else {
__clear_bit(ATH_STAT_PROMISC, sc->status);
}
}
+ if (test_bit(ATH_STAT_PROMISC, sc->status))
+ rfilt |= AR5K_RX_FILTER_PROM;
+
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
if (*new_flags & FIF_ALLMULTI) {
mfilt[0] = ~0;
file:eeebb9aef2060eb90bb10f487c875f2d0982057a -> file:b7c57259f1340b138c31342c68ddccd9f7ca5fee
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k
break;
case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
file:34e13c7008497c5a270832874b096fe118988ad7 -> file:257ea18c849f3c5376a53163d025ccf1fdc6ee3f
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1382,8 +1382,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah,
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
* exists */
- if (ah->ah_version == AR5K_AR5212)
- ath5k_hw_set_sleep_clock(ah, true);
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_op_mode != NL80211_IFTYPE_AP)
+ ath5k_hw_set_sleep_clock(ah, true);
/*
* Disable beacons and reset the register
file:cdb90c5bf39b182f196eed5ef575063fec10b699 -> file:ad1196946c43f149ab13dc26cd26c1908dadb97c
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -368,6 +368,7 @@ void ath_tx_aggr_start(struct ath_softc
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+void ath9k_enable_ps(struct ath_softc *sc);
/********/
/* VIFs */
file:45c4ea57616bd6afdbfe3a0e45dd46dc0bfb9be3 -> file:72e24559ec4b280a4997e4a8c256490a4f8ac679
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -512,16 +512,13 @@ static void ath_beacon_config_ap(struct
{
u32 nexttbtt, intval;
- /* Configure the timers only when the TSF has to be reset */
-
- if (!(sc->sc_flags & SC_OP_TSF_RESET))
- return;
-
/* NB: the beacon interval is kept internally in TU's */
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
intval /= ATH_BCBUF; /* for staggered beacons */
nexttbtt = intval;
- intval |= ATH9K_BEACON_RESET_TSF;
+
+ if (sc->sc_flags & SC_OP_TSF_RESET)
+ intval |= ATH9K_BEACON_RESET_TSF;
/*
* In AP mode we enable the beacon timers and SWBA interrupts to
file:0905b38f7b6234fc34fd1cb859591ac215679856 -> file:2185f9771f3835626aa68671be2989f4d9539da4
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -398,7 +398,6 @@ static void ath9k_hw_init_config(struct
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
- ah->config.ht_enable = 1;
ah->config.ofdm_trig_low = 200;
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
@@ -412,6 +411,11 @@ static void ath9k_hw_init_config(struct
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
+ ah->config.ht_enable = 1;
+ else
+ ah->config.ht_enable = 0;
+
ah->config.intr_mitigation = true;
/*
@@ -617,6 +621,7 @@ static bool ath9k_hw_devid_supported(u16
case AR9285_DEVID_PCIE:
case AR5416_DEVID_AR9287_PCI:
case AR5416_DEVID_AR9287_PCIE:
+ case AR2427_DEVID_PCIE:
return true;
default:
break;
@@ -1295,6 +1300,16 @@ static void ath9k_hw_override_ini(struct
* Necessary to avoid issues on AR5416 2.0
*/
REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
}
static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
file:ff4383bb3ac3da0c5c1af7a5163c07acffa9aaad -> file:262556ad62f2f36e9a809ce8c66ceff1ca3a356b
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -37,6 +37,7 @@
#define AR9280_DEVID_PCI 0x0029
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
+#define AR2427_DEVID_PCIE 0x002c
#define AR5416_AR9100_DEVID 0x000b
#define AR_SUBVENDOR_ID_NOG 0x0e11
#define AR_SUBVENDOR_ID_NEW_A 0x7065
file:8622265a030afc23919ec5ccbd9f7ce7e658061d -> file:a21c214a3e4d3dbe2bee63cebe26f13e15073084
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/initvals.h
@@ -2762,7 +2762,7 @@ static const u32 ar9280Common_9280_2[][2
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -3935,7 +3935,7 @@ static const u_int32_t ar9285Common_9285
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -5072,7 +5072,7 @@ static const u_int32_t ar9287Common_9287
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -6864,7 +6864,7 @@ static const u_int32_t ar9271Common_9271
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
file:5864eaa53cfca3224f3a0619610aba89892b3edc -> file:0c349cece9514cba2e73f420f824f04875a5b6b9
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1538,14 +1538,19 @@ bad_no_ah:
void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
+ struct ath_hw *ah = sc->sc_ah;
+
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SPECTRUM_MGMT;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
@@ -1555,7 +1560,10 @@ void ath_set_hw_capab(struct ath_softc *
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- hw->wiphy->ps_default = false;
+ if (AR_SREV_5416(ah))
+ hw->wiphy->ps_default = false;
+ else
+ hw->wiphy->ps_default = true;
hw->queues = 4;
hw->max_rates = 4;
@@ -2305,6 +2313,19 @@ static void ath9k_remove_interface(struc
mutex_unlock(&sc->mutex);
}
+void ath9k_enable_ps(struct ath_softc *sc)
+{
+ sc->ps_enabled = true;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ sc->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
+ }
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2336,19 +2357,9 @@ static int ath9k_config(struct ieee80211
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
sc->sc_flags |= SC_OP_PS_ENABLED;
- if (!(ah->caps.hw_caps &
- ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
- }
- }
- sc->ps_enabled = true;
if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_enable_ps(sc);
}
} else {
sc->ps_enabled = false;
file:903dd8ad9d430820a67668754d20c4abd48bbf41 -> file:14cf3fe95001184927800a88f426e7f975fbfca1
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -25,6 +25,7 @@ static struct pci_device_id ath_pci_id_t
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ 0 }
file:dfda6f444648b9e145aae00134ad9a46e9ea0fb9 -> file:b36ec9454460365e151191a0e773799219a604d8
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -368,6 +368,9 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
#define AR_PHY_M_SLEEP 0x99f0
#define AR_PHY_REFCLKDLY 0x99f4
#define AR_PHY_REFCLKPD 0x99f8
file:1895d63aad0a8a8e65c3599918ab6bd159bef88e -> file:fd397aa439c930b54e80beb1c43545aba84a4308
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -757,7 +757,7 @@ static void ath_get_rate(void *priv, str
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix, nrix;
+ u8 try_per_rate, i = 0, rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@@ -777,26 +777,25 @@ static void ath_get_rate(void *priv, str
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
- nrix = rix;
if (is_probe) {
/* set one try for probe rates. For the
* probes don't enable rts */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, nrix, 0);
+ 1, rix, 0);
/* Get the next tried/allowed rate. No RTS for the next series
* after the probe rate
*/
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, nrix, 0);
+ try_per_rate, rix, 0);
tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
/* Set the choosen rate. No RTS for first series entry. */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, nrix, 0);
+ try_per_rate, rix, 0);
}
/* Fill in the other rates for multirate retry */
@@ -805,10 +804,10 @@ static void ath_get_rate(void *priv, str
if (i + 1 == 4)
try_per_rate = 4;
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, nrix, 1);
+ try_per_rate, rix, 1);
}
/*
file:9009bac9207d1ec484972958a7fb1f4c8b16b8e1 -> file:e499fb6e00c40b3e447287c978ac615aff418021
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1320,25 +1320,6 @@ static enum ath9k_pkt_type get_hw_packet
return htype;
}
-static bool is_pae(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
- __le16 fc;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
-
- if (ieee80211_is_data(fc)) {
- if (ieee80211_is_nullfunc(fc) ||
- /* Port Access Entity (IEEE 802.1X) */
- (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- return true;
- }
- }
-
- return false;
-}
-
static int get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1648,7 +1629,7 @@ static void ath_tx_start_dma(struct ath_
goto tx_done;
}
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
/*
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
@@ -1998,10 +1979,9 @@ static void ath_tx_processq(struct ath_s
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- } else
+ if ((sc->sc_flags & SC_OP_PS_ENABLED))
+ ath9k_enable_ps(sc);
+ else
sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
}
file:0e6b15454d39da61dd7638b036d3ae3f615ec42f -> file:805d28a06ff50e0448368fa5586bcedf2edccb95
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -696,6 +696,7 @@ struct b43_wldev {
bool radio_hw_enable; /* saved state of radio hardware enabled state */
bool qos_enabled; /* TRUE, if QoS is used. */
bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
+ bool use_pio; /* TRUE if next init should use PIO */
/* PHY/Radio device. */
struct b43_phy phy;
@@ -750,12 +751,6 @@ struct b43_wldev {
#endif
};
-/*
- * Include goes here to avoid a dependency problem.
- * A better fix would be to integrate xmit.h into b43.h.
- */
-#include "xmit.h"
-
/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
struct b43_wl {
/* Pointer to the active wireless device on this chip */
@@ -830,15 +825,9 @@ struct b43_wl {
/* The device LEDs. */
struct b43_leds leds;
-#ifdef CONFIG_B43_PIO
- /*
- * RX/TX header/tail buffers used by the frame transmit functions.
- */
- struct b43_rxhdr_fw4 rxhdr;
- struct b43_txhdr txhdr;
- u8 rx_tail[4];
- u8 tx_tail[4];
-#endif /* CONFIG_B43_PIO */
+ /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
+ u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
+ u8 pio_tailspace[4] __attribute__((__aligned__(8)));
};
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -889,20 +878,15 @@ static inline void b43_write32(struct b4
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
{
-#ifdef CONFIG_B43_PIO
return dev->__using_pio_transfers;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_B43_FORCE_PIO
-# define B43_FORCE_PIO 1
+# define B43_PIO_DEFAULT 1
#else
-# define B43_FORCE_PIO 0
+# define B43_PIO_DEFAULT 0
#endif
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
file:de4e804bedf05541c3b9c943bfec268836b24222 -> file:571d475ca321684af1bbd6084ea264dc3233b824
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1620,7 +1620,6 @@ void b43_dma_tx_resume(struct b43_wldev
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
@@ -1654,4 +1653,3 @@ void b43_dma_direct_fifo_rx(struct b43_w
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */
file:9ca253e40bd5e8cceb7fc1dfbad4837bf368a11e -> file:d6056347b6bdbe146f612b88a7a0f87816c4e79d
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+int b43_modparam_pio = B43_PIO_DEFAULT;
+module_param_named(pio, b43_modparam_pio, int, 0644);
+MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -852,19 +855,16 @@ static void b43_op_update_tkip_key(struc
if (B43_WARN_ON(!modparam_hwtkip))
return;
- mutex_lock(&wl->mutex);
-
+ /* This is only called from the RX path through mac80211, where
+ * our mutex is already locked. */
+ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
dev = wl->current_dev;
- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
- goto out_unlock;
+ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
rx_tkip_phase1_write(dev, index, iv32, phase1key);
keymac_write(dev, index, addr);
-
-out_unlock:
- mutex_unlock(&wl->mutex);
}
static void do_key_write(struct b43_wldev *dev,
@@ -1791,6 +1791,10 @@ static void b43_do_interrupt_thread(stru
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
+ b43err(dev->wl, "This device does not support DMA "
+ "on your system. Please use PIO instead.\n");
+ /* Fall back to PIO transfers if we get fatal DMA errors! */
+ dev->use_pio = 1;
b43_controller_restart(dev, "DMA error");
return;
}
@@ -3967,6 +3971,7 @@ static int b43_wireless_core_start(struc
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_STARTED);
/* Start data flow (TX/RX). */
@@ -4357,7 +4362,7 @@ static int b43_wireless_core_init(struct
if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
(dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
- B43_FORCE_PIO) {
+ dev->use_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4376,8 +4381,6 @@ static int b43_wireless_core_init(struct
ieee80211_wake_queues(dev->wl->hw);
- ieee80211_wake_queues(dev->wl->hw);
-
b43_set_status(dev, B43_STAT_INITIALIZED);
out:
@@ -4827,6 +4830,7 @@ static int b43_one_core_attach(struct ss
if (!wldev)
goto out;
+ wldev->use_pio = b43_modparam_pio;
wldev->dev = dev;
wldev->wl = wl;
b43_set_status(wldev, B43_STAT_UNINIT);
file:9b9044400218e4e59a085272001037993ccda90a -> file:c5cd3bc861befda98e4afbe7b9008aca5aac7645
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -342,12 +342,15 @@ static u16 tx_write_2byte_queue(struct b
q->mmio_base + B43_PIO_TXDATA,
sizeof(u16));
if (data_len & 1) {
+ u8 *tail = wl->pio_tailspace;
+ BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
+
/* Write the last byte. */
ctl &= ~B43_PIO_TXCTL_WRITEHI;
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
- wl->tx_tail[0] = data[data_len - 1];
- wl->tx_tail[1] = 0;
- ssb_block_write(dev->dev, wl->tx_tail, 2,
+ tail[0] = data[data_len - 1];
+ tail[1] = 0;
+ ssb_block_write(dev->dev, tail, 2,
q->mmio_base + B43_PIO_TXDATA,
sizeof(u16));
}
@@ -393,31 +396,31 @@ static u32 tx_write_4byte_queue(struct b
q->mmio_base + B43_PIO8_TXDATA,
sizeof(u32));
if (data_len & 3) {
- wl->tx_tail[3] = 0;
+ u8 *tail = wl->pio_tailspace;
+ BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
+
+ memset(tail, 0, 4);
/* Write the last few bytes. */
ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
B43_PIO8_TXCTL_24_31);
switch (data_len & 3) {
case 3:
ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
- wl->tx_tail[0] = data[data_len - 3];
- wl->tx_tail[1] = data[data_len - 2];
- wl->tx_tail[2] = data[data_len - 1];
+ tail[0] = data[data_len - 3];
+ tail[1] = data[data_len - 2];
+ tail[2] = data[data_len - 1];
break;
case 2:
ctl |= B43_PIO8_TXCTL_8_15;
- wl->tx_tail[0] = data[data_len - 2];
- wl->tx_tail[1] = data[data_len - 1];
- wl->tx_tail[2] = 0;
+ tail[0] = data[data_len - 2];
+ tail[1] = data[data_len - 1];
break;
case 1:
- wl->tx_tail[0] = data[data_len - 1];
- wl->tx_tail[1] = 0;
- wl->tx_tail[2] = 0;
+ tail[0] = data[data_len - 1];
break;
}
b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
- ssb_block_write(dev->dev, wl->tx_tail, 4,
+ ssb_block_write(dev->dev, tail, 4,
q->mmio_base + B43_PIO8_TXDATA,
sizeof(u32));
}
@@ -456,6 +459,7 @@ static int pio_tx_frame(struct b43_pio_t
int err;
unsigned int hdrlen;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
B43_WARN_ON(list_empty(&q->packets_list));
pack = list_entry(q->packets_list.next,
@@ -463,7 +467,9 @@ static int pio_tx_frame(struct b43_pio_t
cookie = generate_cookie(q, pack);
hdrlen = b43_txhdr_size(dev);
- err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
+ BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
+ B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
+ err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
info, cookie);
if (err)
return err;
@@ -477,9 +483,9 @@ static int pio_tx_frame(struct b43_pio_t
pack->skb = skb;
if (q->rev >= 8)
- pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
+ pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
else
- pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
+ pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
/* Remove it from the list of available packet slots.
* It will be put back when we receive the status report. */
@@ -625,8 +631,11 @@ static bool pio_rx_frame(struct b43_pio_
unsigned int i, padding;
struct sk_buff *skb;
const char *err_msg = NULL;
+ struct b43_rxhdr_fw4 *rxhdr =
+ (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
- memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
+ BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
+ memset(rxhdr, 0, sizeof(*rxhdr));
/* Check if we have data and wait for it to get ready. */
if (q->rev >= 8) {
@@ -664,16 +673,16 @@ data_ready:
/* Get the preamble (RX header) */
if (q->rev >= 8) {
- ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
+ ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
} else {
- ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
+ ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
}
/* Sanity checks. */
- len = le16_to_cpu(wl->rxhdr.frame_len);
+ len = le16_to_cpu(rxhdr->frame_len);
if (unlikely(len > 0x700)) {
err_msg = "len > 0x700";
goto rx_error;
@@ -683,7 +692,7 @@ data_ready:
goto rx_error;
}
- macstat = le32_to_cpu(wl->rxhdr.mac_status);
+ macstat = le32_to_cpu(rxhdr->mac_status);
if (macstat & B43_RX_MAC_FCSERR) {
if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
/* Drop frames with failed FCS. */
@@ -708,22 +717,25 @@ data_ready:
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
if (len & 3) {
+ u8 *tail = wl->pio_tailspace;
+ BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
+
/* Read the last few bytes. */
- ssb_block_read(dev->dev, wl->rx_tail, 4,
+ ssb_block_read(dev->dev, tail, 4,
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
switch (len & 3) {
case 3:
- skb->data[len + padding - 3] = wl->rx_tail[0];
- skb->data[len + padding - 2] = wl->rx_tail[1];
- skb->data[len + padding - 1] = wl->rx_tail[2];
+ skb->data[len + padding - 3] = tail[0];
+ skb->data[len + padding - 2] = tail[1];
+ skb->data[len + padding - 1] = tail[2];
break;
case 2:
- skb->data[len + padding - 2] = wl->rx_tail[0];
- skb->data[len + padding - 1] = wl->rx_tail[1];
+ skb->data[len + padding - 2] = tail[0];
+ skb->data[len + padding - 1] = tail[1];
break;
case 1:
- skb->data[len + padding - 1] = wl->rx_tail[0];
+ skb->data[len + padding - 1] = tail[0];
break;
}
}
@@ -732,15 +744,18 @@ data_ready:
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
if (len & 1) {
+ u8 *tail = wl->pio_tailspace;
+ BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
+
/* Read the last byte. */
- ssb_block_read(dev->dev, wl->rx_tail, 2,
+ ssb_block_read(dev->dev, tail, 2,
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
- skb->data[len + padding - 1] = wl->rx_tail[0];
+ skb->data[len + padding - 1] = tail[0];
}
}
- b43_rx(q->dev, skb, &wl->rxhdr);
+ b43_rx(q->dev, skb, rxhdr);
return 1;
file:7dd649c9ddadcd2a91533395da4f06e9d7fb9c03 -> file:7b3c42f93a16217e0c36a1c2c20e40a9f2f93ab8
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
#define B43_PIO_MAX_NR_TXPACKETS 32
-#ifdef CONFIG_B43_PIO
-
struct b43_pio_txpacket {
/* Pointer to the TX queue we belong to. */
struct b43_pio_txqueue *queue;
@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *
void b43_pio_tx_suspend(struct b43_wldev *dev);
void b43_pio_tx_resume(struct b43_wldev *dev);
-
-#else /* CONFIG_B43_PIO */
-
-
-static inline int b43_pio_init(struct b43_wldev *dev)
-{
- return 0;
-}
-static inline void b43_pio_free(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_stop(struct b43_wldev *dev)
-{
-}
-static inline int b43_pio_tx(struct b43_wldev *dev,
- struct sk_buff *skb)
-{
- return 0;
-}
-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
- const struct b43_txstatus *status)
-{
-}
-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
-{
-}
-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
-{
-}
-
-#endif /* CONFIG_B43_PIO */
#endif /* B43_PIO_H_ */
file:f4e9695ec18611a2c33439184fa501fc9dc9a8c7 -> file:51d68971f6faa096a904ba928d147876405eeca6
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
*/
-#include "b43.h"
+#include "xmit.h"
#include "phy_common.h"
#include "dma.h"
#include "pio.h"
file:4b60148a5e61c9cc9b2f0eea64fe128d72d1ac55 -> file:c3968fad55e1092a5ce0132678ab9e0e431bd477
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2921,6 +2921,7 @@ static int b43legacy_wireless_core_start
goto out;
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_STARTED);
/* Start data flow (TX/RX) */
@@ -3341,6 +3342,7 @@ static int b43legacy_wireless_core_init(
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
b43legacy_leds_init(dev);
file:ad8eab4a639b05fe82f41d7977f06448f2119760 -> file:b4ff1dc1d72d8cef60854d1bd3b9081f891b41e0
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -626,6 +626,7 @@ static int prism2_config(struct pcmcia_d
int ret = 1;
int last_fn, last_ret;
struct hostap_cs_priv *hw_priv;
+ unsigned long flags;
PDEBUG(DEBUG_FLOW, "prism2_config()\n");
@@ -661,6 +662,12 @@ static int prism2_config(struct pcmcia_d
link->dev_node = &hw_priv->node;
/*
+ * Make sure the IRQ handler cannot proceed until at least
+ * dev->base_addr is initialized.
+ */
+ spin_lock_irqsave(&local->irq_init_lock, flags);
+
+ /*
* Allocate an interrupt line. Note that this does not assign a
* handler to the interrupt, unless the 'Handler' member of the
* irq structure is initialized.
@@ -686,6 +693,8 @@ static int prism2_config(struct pcmcia_d
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
+ spin_unlock_irqrestore(&local->irq_init_lock, flags);
+
/* Finally, report what we've done */
printk(KERN_INFO "%s: index 0x%02x: ",
dev_info, link->conf.ConfigIndex);
@@ -715,6 +724,7 @@ static int prism2_config(struct pcmcia_d
return ret;
cs_failed:
+ spin_unlock_irqrestore(&local->irq_init_lock, flags);
cs_error(link, last_fn, last_ret);
failed:
file:ff9b5c882184a00e58316166a62fd5514ead0ae6 -> file:2f999fc94f60ca9b36bb11d70fd29096c85eaf7e
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2621,6 +2621,18 @@ static irqreturn_t prism2_interrupt(int
iface = netdev_priv(dev);
local = iface->local;
+ /* Detect early interrupt before driver is fully configued */
+ spin_lock(&local->irq_init_lock);
+ if (!dev->base_addr) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+ dev->name);
+ }
+ spin_unlock(&local->irq_init_lock);
+ return IRQ_HANDLED;
+ }
+ spin_unlock(&local->irq_init_lock);
+
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
if (local->func->card_present && !local->func->card_present(local)) {
@@ -3138,6 +3150,7 @@ prism2_init_local_data(struct prism2_hel
spin_lock_init(&local->cmdlock);
spin_lock_init(&local->baplock);
spin_lock_init(&local->lock);
+ spin_lock_init(&local->irq_init_lock);
mutex_init(&local->rid_bap_mtx);
if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
file:3d238917af07f32bcd16ffcf8eb75f90f824b120 -> file:1ba33be98b254ce9ef8a56fcb6c55501d14a9839
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -654,7 +654,7 @@ struct local_info {
rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
* when removing entries from the list.
* TX and RX paths can use read lock. */
- spinlock_t cmdlock, baplock, lock;
+ spinlock_t cmdlock, baplock, lock, irq_init_lock;
struct mutex rid_bap_mtx;
u16 infofid; /* MAC buffer id for info frame */
/* txfid, intransmitfid, next_txtid, and next_alloc are protected by
file:9d60f6cfa1d5c5a2a130cf0c89f2e19a3973198b -> file:56bfcc35c9f0d050c12b4dffd5eddaf32564280e
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2545,11 +2545,9 @@ int iwl3945_hw_set_hw_params(struct iwl_
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
- priv->shared_virt =
- pci_alloc_consistent(priv->pci_dev,
- sizeof(struct iwl3945_shared),
- &priv->shared_phys);
-
+ priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ &priv->shared_phys, GFP_KERNEL);
if (!priv->shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
mutex_unlock(&priv->mutex);
file:99331edd9d7523033811210586a513e999956cb7 -> file:585b8d49f35b872de70555cc4bbb4c6e45e70114
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -715,6 +715,13 @@ static int iwl4965_alive_notify(struct i
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
@@ -2134,7 +2141,9 @@ static void iwl4965_rx_reply_tx(struct i
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ if (qc)
+ iwl_free_tfds_in_queue(priv, sta_id,
+ tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -2162,13 +2171,14 @@ static void iwl4965_rx_reply_tx(struct i
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ else if (sta_id == IWL_INVALID_STATION)
+ IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
-
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
file:133df70bc609d9888c633fdd67e4478ed2c8a785 -> file:1f423f2f6a2510b3e0eec8036972322aa9ec674e
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -793,6 +793,13 @@ int iwl5000_alive_notify(struct iwl_priv
iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
int ac = iwl5000_default_queue_to_tx_fifo[i];
@@ -1264,7 +1271,7 @@ static void iwl5000_rx_reply_tx(struct i
scd_ssn , index, txq_id, txq->swq_id);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1293,16 +1300,14 @@ static void iwl5000_rx_reply_tx(struct i
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwl_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
@@ -1669,12 +1674,12 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
};
-struct iwl_cfg iwl5100_bg_cfg = {
- .name = "5100BG",
+struct iwl_cfg iwl5100_bgn_cfg = {
+ .name = "5100BGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
- .sku = IWL_SKU_G,
+ .sku = IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
@@ -1700,7 +1705,6 @@ struct iwl_cfg iwl5100_abg_cfg = {
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.need_pll_cfg = true,
- .ht_greenfield_support = true,
};
struct iwl_cfg iwl5100_agn_cfg = {
@@ -1757,6 +1761,22 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
};
+struct iwl_cfg iwl5150_abg_cfg = {
+ .name = "5150ABG",
+ .fw_name_pre = IWL5150_FW_PRE,
+ .ucode_api_max = IWL5150_UCODE_API_MAX,
+ .ucode_api_min = IWL5150_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G,
+ .ops = &iwl5150_ops,
+ .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+ .mod_params = &iwl50_mod_params,
+ .valid_tx_ant = ANT_A,
+ .valid_rx_ant = ANT_AB,
+ .need_pll_cfg = true,
+};
+
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
file:0eb25916f81d4f46be4910fe6e522b1047673b33 -> file:1a3dfa2b1ef020f684dbd80c3c4529bf1116f009
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -402,10 +402,23 @@ static void rs_tl_turn_on_agg_for_tid(st
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
+ int ret;
+
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
- ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ret = ieee80211_stop_tx_ba_session(priv->hw, sta->addr, tid,
+ WLAN_BACK_INITIATOR);
+ }
}
}
@@ -2180,9 +2193,12 @@ static void rs_rate_scale_perform(struct
/* Else we have enough samples; calculate estimate of
* actual average throughput */
-
- BUG_ON(window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128));
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
/* If we are searching for better modulation mode, check success. */
if (lq_sta->search_better_tbl &&
file:921dc4a26fe2eef00455865f996cf91d67d17bd3 -> file:166bedd3c615b497fc9b69580a2089e77dcd24a6
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3203,23 +3203,63 @@ static struct pci_device_id iwl_hw_card_
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
#endif /* CONFIG_IWL4965 */
#ifdef CONFIG_IWL5000
- {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)},
- {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)},
- {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)},
- {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)},
- {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)},
- {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)},
- {IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)},
- {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
- {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)},
- {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)},
-/* 5350 WiFi/WiMax */
- {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)},
- {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)},
- {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)},
-/* 5150 Wifi/WiMax */
- {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)},
- {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)},
+/* 5100 Series WiFi */
+ {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+
+/* 5300 Series WiFi */
+ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+
+/* 5350 Series WiFi/WiMax */
+ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+
+/* 5150 Series Wifi/WiMax */
+ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
/* 6000/6050 Series */
{IWL_PCI_DEVICE(0x008D, PCI_ANY_ID, iwl6000h_2agn_cfg)},
{IWL_PCI_DEVICE(0x008E, PCI_ANY_ID, iwl6000h_2agn_cfg)},
file:0cd4ec40a7d8603f7666d748363981327247d8b6 -> file:4a4f7e49a8835ef0a91c53fedb53556602e1580d
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1598,9 +1598,9 @@ EXPORT_SYMBOL(iwl_uninit_drv);
void iwl_free_isr_ict(struct iwl_priv *priv)
{
if (priv->ict_tbl_vir) {
- pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
- PAGE_SIZE, priv->ict_tbl_vir,
- priv->ict_tbl_dma);
+ dma_free_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->ict_tbl_vir, priv->ict_tbl_dma);
priv->ict_tbl_vir = NULL;
}
}
@@ -1616,9 +1616,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *p
if (priv->cfg->use_isr_legacy)
return 0;
/* allocate shrared data table */
- priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
- ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma);
+ priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->ict_tbl_dma, GFP_KERNEL);
if (!priv->ict_tbl_vir)
return -ENOMEM;
@@ -2645,8 +2645,8 @@ int iwl_mac_config(struct ieee80211_hw *
if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
priv->staging_rxon.flags = 0;
- iwl_set_rxon_ht(priv, ht_conf);
iwl_set_rxon_channel(priv, conf->channel);
+ iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, conf->channel->band);
spin_unlock_irqrestore(&priv->lock, flags);
file:7754538c2194f3068019295270f1c6e4527a1b64 -> file:40ec0c148d1150a68b85c83c1c4ec28f1f0181ba
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -410,6 +410,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
file:cea2ee25722c400bb8ecf4ddb9617cee3a23afb3 -> file:3539ea4c9bd4d78e1cba4891f3600a3eefe548bb
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -53,9 +53,10 @@ extern struct iwl_cfg iwl4965_agn_cfg;
extern struct iwl_cfg iwl5300_agn_cfg;
extern struct iwl_cfg iwl5100_agn_cfg;
extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bg_cfg;
+extern struct iwl_cfg iwl5100_bgn_cfg;
extern struct iwl_cfg iwl5100_abg_cfg;
extern struct iwl_cfg iwl5150_agn_cfg;
+extern struct iwl_cfg iwl5150_abg_cfg;
extern struct iwl_cfg iwl6000h_2agn_cfg;
extern struct iwl_cfg iwl6000i_2agn_cfg;
extern struct iwl_cfg iwl6000_3agn_cfg;
file:bd0b12efb5c7fdaa5aecd4de3271d6bdb48c47ac -> file:f8481e8bf04a75220609d1c9d824f8dc808a4d28
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(stru
struct fw_desc *desc)
{
if (desc->v_addr)
- pci_free_consistent(pci_dev, desc->len,
- desc->v_addr, desc->p_addr);
+ dma_free_coherent(&pci_dev->dev, desc->len,
+ desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
@@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(stru
static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
- desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
file:493626bcd3ec53102f7d5d00c15cc5a7045588dd -> file:3198a8a7633e2e02f5b0881fe1ff29900a303441
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -345,10 +345,10 @@ void iwl_rx_queue_free(struct iwl_priv *
}
}
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@@ -357,7 +357,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
spin_lock_init(&rxq->lock);
@@ -365,12 +365,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
+ GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma);
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
@@ -387,8 +388,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *
return 0;
err_rb:
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
err_bd:
return -ENOMEM;
}
file:4f3a108fa9901e1c9ee314ea59020838dfb99120 -> file:db2946e0df5435d6675c4eb7edefea24993af644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -405,21 +405,6 @@ void iwl_init_scan_params(struct iwl_pri
static int iwl_scan_initiate(struct iwl_priv *priv)
{
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- return -EAGAIN;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
- return -EAGAIN;
- }
-
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->scan_start = jiffies;
@@ -450,6 +435,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw
goto out_unlock;
}
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
/* We don't schedule scan within next_scan_jiffies period.
* Avoid scanning during possible EAPOL exchange, return
* success immediately.
@@ -802,6 +799,7 @@ void iwl_bg_abort_scan(struct work_struc
mutex_lock(&priv->mutex);
+ cancel_delayed_work_sync(&priv->scan_check);
set_bit(STATUS_SCAN_ABORTING, &priv->status);
iwl_send_scan_abort(priv);
file:b7e196e3c8d37e1671171e2cacecf86a99306dcc -> file:d21c06ea0ec3a85a00b800a7acb34f9b38a82dc9
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[
static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
struct iwl_dma_ptr *ptr, size_t size)
{
- ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
@@ -73,7 +74,7 @@ static inline void iwl_free_dma_ptr(stru
if (unlikely(!ptr->addr))
return;
- pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
@@ -119,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+EXPORT_SYMBOL(iwl_free_tfds_in_queue);
+
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
@@ -131,7 +146,7 @@ void iwl_tx_queue_free(struct iwl_priv *
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i, len;
if (q->n_bd == 0)
@@ -150,8 +165,8 @@ void iwl_tx_queue_free(struct iwl_priv *
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
/* De-alloc array of per-TFD driver data */
kfree(txq->txb);
@@ -180,7 +195,7 @@ void iwl_cmd_queue_free(struct iwl_priv
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i, len;
if (q->n_bd == 0)
@@ -195,8 +210,8 @@ void iwl_cmd_queue_free(struct iwl_priv
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
/* deallocate arrays */
kfree(txq->cmd);
@@ -287,7 +302,7 @@ static int iwl_queue_init(struct iwl_pri
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
struct iwl_tx_queue *txq, u32 id)
{
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
/* Driver private data, only for Tx (not command) queues,
@@ -306,8 +321,8 @@ static int iwl_tx_queue_alloc(struct iwl
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
- txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
-
+ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+ GFP_KERNEL);
if (!txq->tfds) {
IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
goto error;
@@ -776,8 +791,10 @@ int iwl_tx_skb(struct iwl_priv *priv, st
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ }
}
txq = &priv->txq[txq_id];
@@ -1057,6 +1074,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
int nfreed = 0;
+ struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1071,13 +1089,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv
tx_info = &txq->txb[txq->q.read_ptr];
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
tx_info->skb[0] = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- nfreed++;
}
return nfreed;
}
@@ -1254,7 +1275,7 @@ int iwl_tx_agg_stop(struct iwl_priv *pri
{
int tx_fifo_id, txq_id, sta_id, ssn = -1;
struct iwl_tid_data *tid_data;
- int ret, write_ptr, read_ptr;
+ int write_ptr, read_ptr;
unsigned long flags;
if (!ra) {
@@ -1306,13 +1327,17 @@ int iwl_tx_agg_stop(struct iwl_priv *pri
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
spin_lock_irqsave(&priv->lock, flags);
- ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
- if (ret)
- return ret;
-
ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
return 0;
@@ -1454,6 +1479,11 @@ void iwl_rx_reply_compressed_ba(struct i
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
agg = &priv->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
/* Find index just before block-ack window */
index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
@@ -1485,7 +1515,7 @@ void iwl_rx_reply_compressed_ba(struct i
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
file:5f26c93d35167ab33d57b1275065dde5d65d4320 -> file:619590ddb0960db46c765797f4c84d212c40b726
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -356,10 +356,10 @@ static int iwl3945_send_beacon_cmd(struc
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
if (priv->shared_virt)
- pci_free_consistent(priv->pci_dev,
- sizeof(struct iwl3945_shared),
- priv->shared_virt,
- priv->shared_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ priv->shared_virt,
+ priv->shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -1272,10 +1272,10 @@ static void iwl3945_rx_queue_free(struct
}
}
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@@ -1904,7 +1904,7 @@ static void iwl3945_init_hw_rates(struct
{
int i;
- for (i = 0; i < IWL_RATE_COUNT; i++) {
+ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
rates[i].bitrate = iwl3945_rates[i].ieee * 5;
rates[i].hw_value = i; /* Rate scaling will work on indexes */
rates[i].hw_value_short = i;
file:485a8d406525ccbea266cfe155cca8150ccdf967 -> file:f876d0251c92e4cd22c3e73f18d1f6b2456b1f19
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -34,6 +34,8 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
#include "host.h"
#include "decl.h"
@@ -883,6 +885,7 @@ static int if_sdio_probe(struct sdio_fun
int ret, i;
unsigned int model;
struct if_sdio_packet *packet;
+ struct mmc_host *host = func->card->host;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -963,6 +966,25 @@ static int if_sdio_probe(struct sdio_fun
if (ret)
goto disable;
+ /* For 1-bit transfers to the 8686 model, we need to enable the
+ * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
+ * bit to allow access to non-vendor registers. */
+ if ((card->model == IF_SDIO_MODEL_8686) &&
+ (host->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
+ u8 reg;
+
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+
+ reg |= SDIO_BUS_ECSI;
+ sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+ }
+
card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
if (ret)
goto release_int;
file:0efe67deedee2429fadd3fbeb6f5da0bb17734c0 -> file:8e3818f6832e12987b6294eb4fdb304e0454ede1
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -126,7 +126,7 @@ static int p54_generate_band(struct ieee
int ret = -ENOMEM;
if ((!list->entries) || (!list->band_channel_num[band]))
- return 0;
+ return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
@@ -158,6 +158,7 @@ static int p54_generate_band(struct ieee
(list->channels[i].data & CHAN_HAS_CURVE ? "" :
" [curve data]"),
list->channels[i].index, list->channels[i].freq);
+ continue;
}
tmp->channels[j].band = list->channels[i].band;
@@ -165,7 +166,16 @@ static int p54_generate_band(struct ieee
j++;
}
- tmp->n_channels = list->band_channel_num[band];
+ if (j == 0) {
+ printk(KERN_ERR "%s: Disabling totally damaged %s band.\n",
+ wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ?
+ "2 GHz" : "5 GHz");
+
+ ret = -ENODATA;
+ goto err_out;
+ }
+
+ tmp->n_channels = j;
old = priv->band_table[band];
priv->band_table[band] = tmp;
if (old) {
@@ -228,13 +238,13 @@ static int p54_generate_channel_lists(st
struct p54_common *priv = dev->priv;
struct p54_channel_list *list;
unsigned int i, j, max_channel_num;
- int ret = -ENOMEM;
+ int ret = 0;
u16 freq;
if ((priv->iq_autocal_len != priv->curve_data->entries) ||
(priv->iq_autocal_len != priv->output_limit->entries))
- printk(KERN_ERR "%s: EEPROM is damaged... you may not be able"
- "to use all channels with this device.\n",
+ printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. "
+ "You may not be able to use all channels.\n",
wiphy_name(dev->wiphy));
max_channel_num = max_t(unsigned int, priv->output_limit->entries,
@@ -243,8 +253,10 @@ static int p54_generate_channel_lists(st
priv->curve_data->entries);
list = kzalloc(sizeof(*list), GFP_KERNEL);
- if (!list)
+ if (!list) {
+ ret = -ENOMEM;
goto free;
+ }
list->max_entries = max_channel_num;
list->channels = kzalloc(sizeof(struct p54_channel_entry) *
@@ -282,13 +294,8 @@ static int p54_generate_channel_lists(st
p54_compare_channels, NULL);
for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
- if (list->band_channel_num[i]) {
- ret = p54_generate_band(dev, list, i);
- if (ret)
- goto free;
-
+ if (p54_generate_band(dev, list, i) == 0)
j++;
- }
}
if (j == 0) {
/* no useable band available. */
file:d348c265e867c9384812b0a803e5aaae740732ef -> file:3df13f590182a748aed9d8da8e1af810406d7844
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -40,6 +40,8 @@ static struct pci_device_id p54p_table[]
{ PCI_DEVICE(0x1260, 0x3877) },
/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3886) },
+ /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
+ { PCI_DEVICE(0x1260, 0xffff) },
{ },
};
@@ -157,6 +159,14 @@ static void p54p_refill_rx_ring(struct i
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ dev_kfree_skb_any(skb);
+ dev_err(&priv->pdev->dev,
+ "RX DMA Mapping error\n");
+ break;
+ }
+
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
@@ -197,6 +207,14 @@ static void p54p_check_rx_ring(struct ie
i %= ring_limit;
continue;
}
+
+ if (unlikely(len > priv->common.rx_mtu)) {
+ if (net_ratelimit())
+ dev_err(&priv->pdev->dev, "rx'd frame size "
+ "exceeds length threshold.\n");
+
+ len = priv->common.rx_mtu;
+ }
skb_put(skb, len);
if (p54_rx(dev, skb)) {
@@ -229,7 +247,7 @@ static void p54p_check_tx_ring(struct ie
u32 idx, i;
i = (*index) % ring_limit;
- (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
+ (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
idx %= ring_limit;
while (i != idx) {
@@ -317,14 +335,20 @@ static void p54p_tx(struct ieee80211_hw
u32 device_idx, idx, i;
spin_lock_irqsave(&priv->lock, flags);
-
device_idx = le32_to_cpu(ring_control->device_idx[1]);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
- priv->tx_buf_data[i] = skb;
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ p54_free_skb(dev, skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return ;
+ }
+ priv->tx_buf_data[i] = skb;
+
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
file:92af9b96bb7a2bb51f3adba3316db4ca86bf1beb -> file:805284df3896be8fd0272646964fa52315c442d9
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[]
/* Version 1 devices (pci chip + net2280) */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
{USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
+ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
{USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
{USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
{USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
@@ -60,6 +61,7 @@ static struct usb_device_id p54u_table[]
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
{USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
{USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
@@ -76,6 +78,7 @@ static struct usb_device_id p54u_table[]
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
{USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
{}
file:b6dda2b27fb526cda78fe1f07d59808c8a84bbbf -> file:9d147ded8741556e81124497c196d480c8b17dc5
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -186,7 +186,7 @@ static int p54_tx_qos_accounting_alloc(s
struct ieee80211_tx_queue_stats *queue;
unsigned long flags;
- if (WARN_ON(p54_queue > P54_QUEUE_NUM))
+ if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
return -EINVAL;
queue = &priv->tx_stats[p54_queue];
file:54175b6fa86c2937342ee52c52bd3c47c99a1dcd -> file:2ecbedb26e152c28e697ac8927be151934ee4144
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -83,11 +83,11 @@ MODULE_PARM_DESC(roamdelta,
"set roaming tendency: 0=aggressive, 1=moderate, "
"2=conservative (default: moderate)");
-static int modparam_workaround_interval = 500;
+static int modparam_workaround_interval;
module_param_named(workaround_interval, modparam_workaround_interval,
int, 0444);
MODULE_PARM_DESC(workaround_interval,
- "set stall workaround interval in msecs (default: 500)");
+ "set stall workaround interval in msecs (0=disabled) (default: 0)");
/* various RNDIS OID defs */
@@ -733,12 +733,13 @@ static int rndis_query_oid(struct usbnet
le32_to_cpu(u.get_c->status));
if (ret == 0) {
+ memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
+
ret = le32_to_cpu(u.get_c->len);
if (ret > *len)
*len = ret;
- memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
- ret = rndis_error_status(u.get_c->status);
+ ret = rndis_error_status(u.get_c->status);
if (ret < 0)
devdbg(dev, "rndis_query_oid(%s): device returned "
"error, 0x%08x (%d)", oid_to_string(oid),
@@ -1072,6 +1073,8 @@ static int set_auth_mode(struct usbnet *
auth_mode = NDIS_80211_AUTH_SHARED;
else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
auth_mode = NDIS_80211_AUTH_OPEN;
+ else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC)
+ auth_mode = NDIS_80211_AUTH_AUTO_SWITCH;
else
return -ENOTSUPP;
@@ -2547,7 +2550,7 @@ static void rndis_device_poller(struct w
/* Workaround transfer stalls on poor quality links.
* TODO: find right way to fix these stalls (as stalls do not happen
* with ndiswrapper/windows driver). */
- if (priv->last_qual <= 25) {
+ if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) {
/* Decrease stats worker interval to catch stalls.
* faster. Faster than 400-500ms causes packet loss,
* Slower doesn't catch stalls fast enough.
file:16429c49139c2da3953a6d5fef836f2f093b1443 -> file:c6ca7ee16e8425d7ce32878cdd77e8791194127b
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -190,6 +190,7 @@ static void rtl8180_handle_tx(struct iee
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.rates[0].count = (flags & 0xFF) + 1;
+ info->status.rates[1].idx = -1;
ieee80211_tx_status_irqsafe(dev, skb);
if (ring->entries - skb_queue_len(&ring->queue) == 2)
file:a00723059f83f3ef497d1649e082f36103a6d93e -> file:1685c09c85892844134b1e93d86e5cfded29a89d
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -443,7 +443,8 @@ out:
void wl1251_debugfs_reset(struct wl1251 *wl)
{
- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
+ if (wl->stats.fw_stats != NULL)
+ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
wl->stats.retry_count = 0;
wl->stats.excessive_retries = 0;
}
file:9423f22bdcedcda262bde981d9b8da6698412179 -> file:d74b89bbda83390d05b5529ab451b34ed05f30cc
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -160,6 +160,7 @@ disable:
sdio_disable_func(func);
release:
sdio_release_host(func);
+ wl1251_free_hw(wl);
return ret;
}
file:a7aae24f2889a4578ebd90dad52ae72aab916116 -> file:1f1f5a8016a237b57e1d4aa01acf742718ba2659
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -30,23 +30,7 @@
#define OP_BUFFER_FLAGS 0
-/*
- * Read and write access is using spin locking. Thus, writing to the
- * buffer by NMI handler (x86) could occur also during critical
- * sections when reading the buffer. To avoid this, there are 2
- * buffers for independent read and write access. Read access is in
- * process context only, write access only in the NMI handler. If the
- * read buffer runs empty, both buffers are swapped atomically. There
- * is potentially a small window during swapping where the buffers are
- * disabled and samples could be lost.
- *
- * Using 2 buffers is a little bit overhead, but the solution is clear
- * and does not require changes in the ring buffer implementation. It
- * can be changed to a single buffer solution when the ring buffer
- * access is implemented as non-locking atomic code.
- */
-static struct ring_buffer *op_ring_buffer_read;
-static struct ring_buffer *op_ring_buffer_write;
+static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -69,12 +53,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(v
void free_cpu_buffers(void)
{
- if (op_ring_buffer_read)
- ring_buffer_free(op_ring_buffer_read);
- op_ring_buffer_read = NULL;
- if (op_ring_buffer_write)
- ring_buffer_free(op_ring_buffer_write);
- op_ring_buffer_write = NULL;
+ if (op_ring_buffer)
+ ring_buffer_free(op_ring_buffer);
+ op_ring_buffer = NULL;
}
#define RB_EVENT_HDR_SIZE 4
@@ -87,11 +68,8 @@ int alloc_cpu_buffers(void)
unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
RB_EVENT_HDR_SIZE);
- op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer_read)
- goto fail;
- op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer_write)
+ op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+ if (!op_ring_buffer)
goto fail;
for_each_possible_cpu(i) {
@@ -163,16 +141,11 @@ struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
{
entry->event = ring_buffer_lock_reserve
- (op_ring_buffer_write, sizeof(struct op_sample) +
+ (op_ring_buffer, sizeof(struct op_sample) +
size * sizeof(entry->sample->data[0]));
- if (entry->event)
- entry->sample = ring_buffer_event_data(entry->event);
- else
- entry->sample = NULL;
-
- if (!entry->sample)
+ if (!entry->event)
return NULL;
-
+ entry->sample = ring_buffer_event_data(entry->event);
entry->size = size;
entry->data = entry->sample->data;
@@ -181,25 +154,16 @@ struct op_sample
int op_cpu_buffer_write_commit(struct op_entry *entry)
{
- return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
+ return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
}
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
- if (e)
- goto event;
- if (ring_buffer_swap_cpu(op_ring_buffer_read,
- op_ring_buffer_write,
- cpu))
+ e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
+ if (!e)
return NULL;
- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
- if (e)
- goto event;
- return NULL;
-event:
entry->event = e;
entry->sample = ring_buffer_event_data(e);
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -210,8 +174,7 @@ event:
unsigned long op_cpu_buffer_entries(int cpu)
{
- return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
- + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
+ return ring_buffer_entries_cpu(op_ring_buffer, cpu);
}
static int
file:c1abac8ab5c326847ca8cca0c2eb33b1fcae4e1e -> file:5becbdee4027019a5c584df5ff5fb3cc99b04cc3
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -245,7 +245,7 @@ static void __init print_ebda_hpc (void)
int __init ibmphp_access_ebda (void)
{
- u8 format, num_ctlrs, rio_complete, hs_complete;
+ u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz;
u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
int rc = 0;
@@ -260,7 +260,16 @@ int __init ibmphp_access_ebda (void)
iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg);
- io_mem = ioremap(ebda_seg<<4, 1024);
+ io_mem = ioremap(ebda_seg<<4, 1);
+ if (!io_mem)
+ return -ENOMEM;
+ ebda_sz = readb(io_mem);
+ iounmap(io_mem);
+ debug("ebda size: %d(KiB)\n", ebda_sz);
+ if (ebda_sz == 0)
+ return -ENOMEM;
+
+ io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024));
if (!io_mem )
return -ENOMEM;
next_offset = 0x180;
file:64777220a719877c0d9aec17a133a050948cdf26 -> file:812d4ac6bd2fbe4ed04e28f010ebaea8508a7c65
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -601,7 +601,7 @@ static void __pci_start_power_transition
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
- return state > PCI_D0 ?
+ return state >= PCI_D0 ?
pci_platform_power_transition(dev, state) : -EINVAL;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -638,10 +638,6 @@ int pci_set_power_state(struct pci_dev *
*/
return 0;
- /* Check if we're already there */
- if (dev->current_state == state)
- return 0;
-
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
@@ -2050,6 +2046,7 @@ void pci_msi_off(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
}
+EXPORT_SYMBOL_GPL(pci_msi_off);
#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
/*
@@ -2350,18 +2347,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
*/
int pcix_get_max_mmrbc(struct pci_dev *dev)
{
- int err, cap;
+ int cap;
u32 stat;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
- if (err)
+ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
return -EINVAL;
- return (stat & PCI_X_STATUS_MAX_READ) >> 12;
+ return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
}
EXPORT_SYMBOL(pcix_get_max_mmrbc);
@@ -2374,18 +2370,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
*/
int pcix_get_mmrbc(struct pci_dev *dev)
{
- int ret, cap;
- u32 cmd;
+ int cap;
+ u16 cmd;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
- ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
- if (!ret)
- ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
+ return -EINVAL;
- return ret;
+ return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
}
EXPORT_SYMBOL(pcix_get_mmrbc);
@@ -2400,28 +2395,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
*/
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
{
- int cap, err = -EINVAL;
- u32 stat, cmd, v, o;
+ int cap;
+ u32 stat, v, o;
+ u16 cmd;
if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
- goto out;
+ return -EINVAL;
v = ffs(mmrbc) - 10;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
- goto out;
+ return -EINVAL;
- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
- if (err)
- goto out;
+ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
+ return -EINVAL;
if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
return -E2BIG;
- err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
- if (err)
- goto out;
+ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
+ return -EINVAL;
o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
if (o != v) {
@@ -2431,10 +2425,10 @@ int pcix_set_mmrbc(struct pci_dev *dev,
cmd &= ~PCI_X_CMD_MAX_READ;
cmd |= v << 2;
- err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
+ if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
+ return -EIO;
}
-out:
- return err;
+ return 0;
}
EXPORT_SYMBOL(pcix_set_mmrbc);
@@ -2544,6 +2538,23 @@ int pci_resource_bar(struct pci_dev *dev
return 0;
}
+/* Some architectures require additional programming to enable VGA */
+static arch_set_vga_state_t arch_set_vga_state;
+
+void __init pci_register_set_vga_state(arch_set_vga_state_t func)
+{
+ arch_set_vga_state = func; /* NULL disables */
+}
+
+static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ if (arch_set_vga_state)
+ return arch_set_vga_state(dev, decode, command_bits,
+ change_bridge);
+ return 0;
+}
+
/**
* pci_set_vga_state - set VGA decode state on device and parents if requested
* @dev: the PCI device
@@ -2557,9 +2568,15 @@ int pci_set_vga_state(struct pci_dev *de
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
+ int rc;
WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ /* ARCH specific VGA enables */
+ rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
+ if (rc)
+ return rc;
+
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (decode == true)
cmd |= command_bits;
@@ -2806,4 +2823,3 @@ EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
-
file:0d91a8a4d278a9d38b72c74a87c148b269f0dbea -> file:b8fb987e7600e753e3e4a71ebecbb4508404d7dd
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -302,7 +302,7 @@ static int aer_inject(struct aer_error_i
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
- u32 sever;
+ u32 sever, cor_mask, uncor_mask;
int ret = 0;
dev = pci_get_bus_and_slot(einj->bus, devfn);
@@ -320,6 +320,9 @@ static int aer_inject(struct aer_error_i
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ &uncor_mask);
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
@@ -354,6 +357,21 @@ static int aer_inject(struct aer_error_i
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
+ if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The correctable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+ if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The uncorrectable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
rperr = __find_aer_error_by_dev(rpdev);
if (!rperr) {
rperr = rperr_alloc;
file:9f5ccbeb4fa5fcef281eb2ed3d3a079ae885909e -> file:72fa87c095d8c6752232e070c70dd96f74ed9111
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -78,19 +78,15 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
int pos;
- u32 status, mask;
+ u32 status;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return -EIO;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- if (dev->error_state == pci_channel_io_normal)
- status &= ~mask; /* Clear corresponding nonfatal bits */
- else
- status &= mask; /* Clear corresponding fatal bits */
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ if (status)
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
file:245d2cdb47651d4d095edd847417b5251565fc69 -> file:c0de0b999c9128001489f2d4818709983bbe3bc9
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1444,7 +1444,8 @@ static void quirk_jmicron_ata(struct pci
conf5 &= ~(1 << 24); /* Clear bit 24 */
switch (pdev->device) {
- case PCI_DEVICE_ID_JMICRON_JMB360:
+ case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
+ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
/* The controller should be in single function ahci mode */
conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
break;
@@ -1480,12 +1481,14 @@ static void quirk_jmicron_ata(struct pci
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
@@ -2092,6 +2095,8 @@ static void __devinit quirk_disable_msi(
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
@@ -2183,15 +2188,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
-/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
+/* The P5N32-SLI motherboards from Asus have a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void __devinit nvenet_msi_disable(struct pci_dev *dev)
{
- if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
+ if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
+ dmi_name_in_vendors("P5N32-E SLI")) {
dev_info(&dev->dev,
- "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
+ "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
@@ -2513,6 +2519,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
file:4226e535273874fb06aea0344c4c8ce00a8b957c -> file:c533b1c6556c9565fa50ca84e7942af6439ac794
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -34,6 +34,7 @@
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/dmi.h>
#define EEEPC_LAPTOP_VERSION "0.1"
@@ -135,6 +136,8 @@ struct eeepc_hotk {
acpi_handle handle; /* the handle of the hotk device */
u32 cm_supported; /* the control methods supported
by this BIOS */
+ bool cpufv_disabled;
+ bool hotplug_disabled;
uint init_flag; /* Init flags */
u16 event_count[128]; /* count for each event */
struct input_dev *inputdev;
@@ -251,6 +254,14 @@ MODULE_AUTHOR("Corentin Chary, Eric Coop
MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
MODULE_LICENSE("GPL");
+static bool hotplug_disabled;
+
+module_param(hotplug_disabled, bool, 0644);
+MODULE_PARM_DESC(hotplug_disabled,
+ "Disable hotplug for wireless device. "
+ "If your laptop need that, please report to "
+ "acpi4asus-user@lists.sourceforge.net.");
+
/*
* ACPI Helpers
*/
@@ -467,6 +478,8 @@ static ssize_t store_cpufv(struct device
struct eeepc_cpufv c;
int rv, value;
+ if (ehotk->cpufv_disabled)
+ return -EPERM;
if (get_cpufv(&c))
return -ENODEV;
rv = parse_arg(buf, count, &value);
@@ -478,6 +491,38 @@ static ssize_t store_cpufv(struct device
return rv;
}
+static ssize_t show_cpufv_disabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", ehotk->cpufv_disabled);
+}
+
+static ssize_t store_cpufv_disabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rv, value;
+
+ rv = parse_arg(buf, count, &value);
+ if (rv < 0)
+ return rv;
+
+ switch (value) {
+ case 0:
+ if (ehotk->cpufv_disabled)
+ pr_warning("cpufv enabled (not officially supported "
+ "on this model)\n");
+ ehotk->cpufv_disabled = false;
+ return rv;
+ case 1:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+}
+
+
static struct device_attribute dev_attr_cpufv = {
.attr = {
.name = "cpufv",
@@ -493,12 +538,22 @@ static struct device_attribute dev_attr_
.show = show_available_cpufv
};
+static struct device_attribute dev_attr_cpufv_disabled = {
+ .attr = {
+ .name = "cpufv_disabled",
+ .mode = 0644 },
+ .show = show_cpufv_disabled,
+ .store = store_cpufv_disabled
+};
+
+
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
&dev_attr_available_cpufv.attr,
+ &dev_attr_cpufv_disabled.attr,
NULL
};
@@ -564,6 +619,54 @@ static int eeepc_setkeycode(struct input
return -EINVAL;
}
+static void eeepc_dmi_check(void)
+{
+ const char *model;
+
+ model = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!model)
+ return;
+
+ /*
+ * Blacklist for setting cpufv (cpu speed).
+ *
+ * EeePC 4G ("701") implements CFVS, but it is not supported
+ * by the pre-installed OS, and the original option to change it
+ * in the BIOS setup screen was removed in later versions.
+ *
+ * Judging by the lack of "Super Hybrid Engine" on Asus product pages,
+ * this applies to all "701" models (4G/4G Surf/2G Surf).
+ *
+ * So Asus made a deliberate decision not to support it on this model.
+ * We have several reports that using it can cause the system to hang
+ *
+ * The hang has also been reported on a "702" (Model name "8G"?).
+ *
+ * We avoid dmi_check_system() / dmi_match(), because they use
+ * substring matching. We don't want to affect the "701SD"
+ * and "701SDX" models, because they do support S.H.E.
+ */
+ if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
+ ehotk->cpufv_disabled = true;
+ pr_info("model %s does not officially support setting cpu "
+ "speed\n", model);
+ pr_info("cpufv disabled to avoid instability\n");
+ }
+
+ /*
+ * Blacklist for wlan hotplug
+ *
+ * Eeepc 1005HA doesn't work like others models and don't need the
+ * hotplug code. In fact, current hotplug code seems to unplug another
+ * device...
+ */
+ if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
+ strcmp(model, "1005PE") == 0) {
+ ehotk->hotplug_disabled = true;
+ pr_info("wlan hotplug disabled\n");
+ }
+}
+
static void cmsg_quirk(int cm, const char *name)
{
int dummy;
@@ -649,6 +752,8 @@ static void eeepc_rfkill_hotplug(void)
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked();
+ bool absent;
+ u32 l;
if (ehotk->wlan_rfkill)
rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
@@ -662,6 +767,22 @@ static void eeepc_rfkill_hotplug(void)
goto out_unlock;
}
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_unlock;
+ }
+ absent = (l == 0xffffffff);
+
+ if (blocked != absent) {
+ pr_warning("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warning("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
+ goto out_unlock;
+ }
+
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
@@ -1095,6 +1216,9 @@ static int eeepc_rfkill_init(struct devi
if (result && result != -ENODEV)
goto exit;
+ if (ehotk->hotplug_disabled)
+ return 0;
+
result = eeepc_setup_pci_hotplug();
/*
* If we get -EBUSY then something else is handling the PCI hotplug -
@@ -1208,6 +1332,10 @@ static int __devinit eeepc_hotk_add(stru
device->driver_data = ehotk;
ehotk->device = device;
+ ehotk->hotplug_disabled = hotplug_disabled;
+
+ eeepc_dmi_check();
+
result = eeepc_hotk_check();
if (result)
goto fail_platform_driver;
file:1ee734c14cc11cc406209dde4fd3350765ed48f2 -> file:7e51d5be8cc07fd65c7e5df66992b156c5b5d6f7
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
*/
#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020500
+#define TPACPI_SYSFS_VERSION 0x020600
/*
* Changelog:
@@ -61,6 +61,7 @@
#include <linux/nvram.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/sysfs.h>
#include <linux/backlight.h>
#include <linux/fb.h>
@@ -256,7 +257,7 @@ struct tp_acpi_drv_struct {
struct ibm_struct {
char *name;
- int (*read) (char *);
+ int (*read) (struct seq_file *);
int (*write) (char *);
void (*exit) (void);
void (*resume) (void);
@@ -280,6 +281,7 @@ struct ibm_init_struct {
char param[32];
int (*init) (struct ibm_init_struct *);
+ mode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -776,36 +778,25 @@ static int __init register_tpacpi_subdri
****************************************************************************
****************************************************************************/
-static int dispatch_procfs_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int dispatch_proc_show(struct seq_file *m, void *v)
{
- struct ibm_struct *ibm = data;
- int len;
+ struct ibm_struct *ibm = m->private;
if (!ibm || !ibm->read)
return -EINVAL;
+ return ibm->read(m);
+}
- len = ibm->read(page);
- if (len < 0)
- return len;
-
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
-
- return len;
+static int dispatch_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dispatch_proc_show, PDE(inode)->data);
}
-static int dispatch_procfs_write(struct file *file,
+static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
- unsigned long count, void *data)
+ size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = data;
+ struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
char *kernbuf;
int ret;
@@ -834,6 +825,15 @@ static int dispatch_procfs_write(struct
return ret;
}
+static const struct file_operations dispatch_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = dispatch_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dispatch_proc_write,
+};
+
static char *next_cmd(char **cmds)
{
char *start = *cmds;
@@ -1264,6 +1264,7 @@ static int __init tpacpi_new_rfkill(cons
struct tpacpi_rfk *atp_rfk;
int res;
bool sw_state = false;
+ bool hw_state;
int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1298,7 +1299,8 @@ static int __init tpacpi_new_rfkill(cons
rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
- rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
+ hw_state = tpacpi_rfk_check_hwblock_state();
+ rfkill_set_hw_state(atp_rfk->rfkill, hw_state);
res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
@@ -1311,6 +1313,9 @@ static int __init tpacpi_new_rfkill(cons
}
tpacpi_rfkill_switches[id] = atp_rfk;
+
+ printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
+ name, (sw_state || hw_state) ? "" : "un");
return 0;
}
@@ -1383,12 +1388,11 @@ static ssize_t tpacpi_rfk_sysfs_enable_s
}
/* procfs -------------------------------------------------------------- */
-static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
+static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id,
+ struct seq_file *m)
{
- int len = 0;
-
if (id >= TPACPI_RFK_SW_MAX)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
int status;
@@ -1402,13 +1406,13 @@ static int tpacpi_rfk_procfs_read(const
return status;
}
- len += sprintf(p + len, "status:\t\t%s\n",
+ seq_printf(m, "status:\t\t%s\n",
(status == TPACPI_RFK_RADIO_ON) ?
"enabled" : "disabled");
- len += sprintf(p + len, "commands:\tenable, disable\n");
+ seq_printf(m, "commands:\tenable, disable\n");
}
- return len;
+ return 0;
}
static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
@@ -1779,7 +1783,7 @@ static const struct tpacpi_quirk tpacpi_
TPV_QL1('7', '9', 'E', '3', '5', '0'), /* T60/p */
TPV_QL1('7', 'C', 'D', '2', '2', '2'), /* R60, R60i */
- TPV_QL0('7', 'E', 'D', '0'), /* R60e, R60i */
+ TPV_QL1('7', 'E', 'D', '0', '1', '5'), /* R60e, R60i */
/* BIOS FW BIOS VERS EC FW EC VERS */
TPV_QI2('1', 'W', '9', '0', '1', 'V', '2', '8'), /* R50e (1) */
@@ -1795,8 +1799,8 @@ static const struct tpacpi_quirk tpacpi_
TPV_QI1('7', '4', '6', '4', '2', '7'), /* X41 (0) */
TPV_QI1('7', '5', '6', '0', '2', '0'), /* X41t (0) */
- TPV_QL0('7', 'B', 'D', '7'), /* X60/s */
- TPV_QL0('7', 'J', '3', '0'), /* X60t */
+ TPV_QL1('7', 'B', 'D', '7', '4', '0'), /* X60/s */
+ TPV_QL1('7', 'J', '3', '0', '1', '3'), /* X60t */
/* (0) - older versions lack DMI EC fw string and functionality */
/* (1) - older versions known to lack functionality */
@@ -1886,14 +1890,11 @@ static int __init thinkpad_acpi_driver_i
return 0;
}
-static int thinkpad_acpi_driver_read(char *p)
+static int thinkpad_acpi_driver_read(struct seq_file *m)
{
- int len = 0;
-
- len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
- len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
-
- return len;
+ seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
+ seq_printf(m, "version:\t%s\n", TPACPI_VERSION);
+ return 0;
}
static struct ibm_struct thinkpad_acpi_driver_data = {
@@ -2073,6 +2074,7 @@ static struct attribute_set *hotkey_dev_
static void tpacpi_driver_event(const unsigned int hkey_event);
static void hotkey_driver_event(const unsigned int scancode);
+static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2189,7 +2191,8 @@ static int hotkey_mask_set(u32 mask)
fwmask, hotkey_acpi_mask);
}
- hotkey_mask_warn_incomplete_mask();
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+ hotkey_mask_warn_incomplete_mask();
return rc;
}
@@ -2254,6 +2257,8 @@ static int tpacpi_hotkey_driver_mask_set
rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
mutex_unlock(&hotkey_mutex);
return rc;
@@ -2538,7 +2543,7 @@ static void hotkey_poll_stop_sync(void)
}
/* call with hotkey_mutex held */
-static void hotkey_poll_setup(bool may_warn)
+static void hotkey_poll_setup(const bool may_warn)
{
const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2569,7 +2574,7 @@ static void hotkey_poll_setup(bool may_w
}
}
-static void hotkey_poll_setup_safe(bool may_warn)
+static void hotkey_poll_setup_safe(const bool may_warn)
{
mutex_lock(&hotkey_mutex);
hotkey_poll_setup(may_warn);
@@ -2587,7 +2592,11 @@ static void hotkey_poll_set_freq(unsigne
#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-static void hotkey_poll_setup_safe(bool __unused)
+static void hotkey_poll_setup(const bool __unused)
+{
+}
+
+static void hotkey_poll_setup_safe(const bool __unused)
{
}
@@ -2597,16 +2606,11 @@ static int hotkey_inputdev_open(struct i
{
switch (tpacpi_lifecycle) {
case TPACPI_LIFE_INIT:
- /*
- * hotkey_init will call hotkey_poll_setup_safe
- * at the appropriate moment
- */
- return 0;
- case TPACPI_LIFE_EXITING:
- return -EBUSY;
case TPACPI_LIFE_RUNNING:
hotkey_poll_setup_safe(false);
return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
}
/* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2617,7 +2621,7 @@ static int hotkey_inputdev_open(struct i
static void hotkey_inputdev_close(struct input_dev *dev)
{
/* disable hotkey polling when possible */
- if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
!(hotkey_source_mask & hotkey_driver_mask))
hotkey_poll_setup_safe(false);
}
@@ -3185,6 +3189,8 @@ static int __init hotkey_init(struct ibm
int res, i;
int status;
int hkeyv;
+ bool radiosw_state = false;
+ bool tabletsw_state = false;
unsigned long quirks;
@@ -3290,6 +3296,7 @@ static int __init hotkey_init(struct ibm
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!tpacpi_wlsw_emulstate;
printk(TPACPI_INFO
"radio switch emulation enabled\n");
} else
@@ -3297,6 +3304,7 @@ static int __init hotkey_init(struct ibm
/* Not all thinkpads have a hardware radio switch */
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!status;
printk(TPACPI_INFO
"radio switch found; radios are %s\n",
enabled(status, 0));
@@ -3308,11 +3316,11 @@ static int __init hotkey_init(struct ibm
/* For X41t, X60t, X61t Tablets... */
if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
tp_features.hotkey_tablet = 1;
+ tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
printk(TPACPI_INFO
"possible tablet mode switch found; "
"ThinkPad in %s mode\n",
- (status & TP_HOTKEY_TABLET_MASK)?
- "tablet" : "laptop");
+ (tabletsw_state) ? "tablet" : "laptop");
res = add_to_attr_set(hotkey_dev_attributes,
&dev_attr_hotkey_tablet_mode.attr);
}
@@ -3347,16 +3355,14 @@ static int __init hotkey_init(struct ibm
TPACPI_HOTKEY_MAP_SIZE);
}
- set_bit(EV_KEY, tpacpi_inputdev->evbit);
- set_bit(EV_MSC, tpacpi_inputdev->evbit);
- set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
+ input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
tpacpi_inputdev->keycode = hotkey_keycode_map;
for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
if (hotkey_keycode_map[i] != KEY_RESERVED) {
- set_bit(hotkey_keycode_map[i],
- tpacpi_inputdev->keybit);
+ input_set_capability(tpacpi_inputdev, EV_KEY,
+ hotkey_keycode_map[i]);
} else {
if (i < sizeof(hotkey_reserved_mask)*8)
hotkey_reserved_mask |= 1 << i;
@@ -3364,12 +3370,14 @@ static int __init hotkey_init(struct ibm
}
if (tp_features.hotkey_wlsw) {
- set_bit(EV_SW, tpacpi_inputdev->evbit);
- set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
+ input_report_switch(tpacpi_inputdev,
+ SW_RFKILL_ALL, radiosw_state);
}
if (tp_features.hotkey_tablet) {
- set_bit(EV_SW, tpacpi_inputdev->evbit);
- set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(tpacpi_inputdev,
+ SW_TABLET_MODE, tabletsw_state);
}
/* Do not issue duplicate brightness change events to
@@ -3436,8 +3444,6 @@ static int __init hotkey_init(struct ibm
tpacpi_inputdev->close = &hotkey_inputdev_close;
hotkey_poll_setup_safe(true);
- tpacpi_send_radiosw_update();
- tpacpi_input_send_tabletsw();
return 0;
@@ -3545,49 +3551,57 @@ static bool hotkey_notify_usrevent(const
}
}
+static void thermal_dump_all_sensors(void);
+
static bool hotkey_notify_thermal(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
{
+ bool known = true;
+
/* 0x6000-0x6FFF: thermal alarms */
*send_acpi_ev = true;
*ignore_acpi_ev = false;
switch (hkey) {
+ case TP_HKEY_EV_THM_TABLE_CHANGED:
+ printk(TPACPI_INFO
+ "EC reports that Thermal Table has changed\n");
+ /* recommended action: do nothing, we don't have
+ * Lenovo ATM information */
+ return true;
case TP_HKEY_EV_ALARM_BAT_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
- return true;
+ break;
case TP_HKEY_EV_ALARM_BAT_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
- return true;
+ break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
- return true;
+ break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: "
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
- return true;
- case TP_HKEY_EV_THM_TABLE_CHANGED:
- printk(TPACPI_INFO
- "EC reports that Thermal Table has changed\n");
- /* recommended action: do nothing, we don't have
- * Lenovo ATM information */
- return true;
+ break;
default:
printk(TPACPI_ALERT
"THERMAL ALERT: unknown thermal alarm received\n");
- return false;
+ known = false;
}
+
+ thermal_dump_all_sensors();
+
+ return known;
}
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
@@ -3635,13 +3649,19 @@ static void hotkey_notify(struct ibm_str
break;
case 3:
/* 0x3000-0x3FFF: bay-related wakeups */
- if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
+ switch (hkey) {
+ case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
printk(TPACPI_INFO
"bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
- } else {
+ break;
+ case TP_HKEY_EV_OPTDRV_EJ:
+ /* FIXME: kick libata if SATA link offline */
+ known_ev = true;
+ break;
+ default:
known_ev = false;
}
break;
@@ -3730,14 +3750,13 @@ static void hotkey_resume(void)
}
/* procfs -------------------------------------------------------------- */
-static int hotkey_read(char *p)
+static int hotkey_read(struct seq_file *m)
{
int res, status;
- int len = 0;
if (!tp_features.hotkey) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
if (mutex_lock_killable(&hotkey_mutex))
@@ -3749,17 +3768,16 @@ static int hotkey_read(char *p)
if (res)
return res;
- len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
if (hotkey_all_mask) {
- len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
- len += sprintf(p + len,
- "commands:\tenable, disable, reset, <mask>\n");
+ seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
+ seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
} else {
- len += sprintf(p + len, "mask:\t\tnot supported\n");
- len += sprintf(p + len, "commands:\tenable, disable, reset\n");
+ seq_printf(m, "mask:\t\tnot supported\n");
+ seq_printf(m, "commands:\tenable, disable, reset\n");
}
- return len;
+ return 0;
}
static void hotkey_enabledisable_warn(bool enable)
@@ -3852,7 +3870,7 @@ enum {
TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
enum {
@@ -3898,10 +3916,11 @@ static int bluetooth_set_status(enum tpa
}
#endif
- /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
- status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_BLUETOOTH_RADIOSSW;
+ status = TP_ACPI_BLUETOOTH_RADIOSSW
+ | TP_ACPI_BLUETOOTH_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
@@ -4025,9 +4044,9 @@ static int __init bluetooth_init(struct
}
/* procfs -------------------------------------------------------------- */
-static int bluetooth_read(char *p)
+static int bluetooth_read(struct seq_file *m)
{
- return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, p);
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, m);
}
static int bluetooth_write(char *buf)
@@ -4052,7 +4071,7 @@ enum {
TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
@@ -4089,10 +4108,11 @@ static int wan_set_status(enum tpacpi_rf
}
#endif
- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
- status = TP_ACPI_WANCARD_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_WANCARD_RADIOSSW;
+ status = TP_ACPI_WANCARD_RADIOSSW
+ | TP_ACPI_WANCARD_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
@@ -4215,9 +4235,9 @@ static int __init wan_init(struct ibm_in
}
/* procfs -------------------------------------------------------------- */
-static int wan_read(char *p)
+static int wan_read(struct seq_file *m)
{
- return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, p);
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, m);
}
static int wan_write(char *buf)
@@ -4592,16 +4612,19 @@ static int video_expand_toggle(void)
/* not reached */
}
-static int video_read(char *p)
+static int video_read(struct seq_file *m)
{
int status, autosw;
- int len = 0;
if (video_supported == TPACPI_VIDEO_NONE) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
+ /* Even reads can crash X.org, so... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
status = video_outputsw_get();
if (status < 0)
return status;
@@ -4610,20 +4633,20 @@ static int video_read(char *p)
if (autosw < 0)
return autosw;
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
- len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
if (video_supported == TPACPI_VIDEO_NEW)
- len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
- len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
- len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
- len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
+ seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
+ seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
+ seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
+ seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
if (video_supported == TPACPI_VIDEO_NEW)
- len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
- len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
- len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
+ seq_printf(m, "commands:\tdvi_enable, dvi_disable\n");
+ seq_printf(m, "commands:\tauto_enable, auto_disable\n");
+ seq_printf(m, "commands:\tvideo_switch, expand_toggle\n");
- return len;
+ return 0;
}
static int video_write(char *buf)
@@ -4635,6 +4658,10 @@ static int video_write(char *buf)
if (video_supported == TPACPI_VIDEO_NONE)
return -ENODEV;
+ /* Even reads can crash X.org, let alone writes... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
enable = 0;
disable = 0;
@@ -4815,25 +4842,24 @@ static void light_exit(void)
flush_workqueue(tpacpi_wq);
}
-static int light_read(char *p)
+static int light_read(struct seq_file *m)
{
- int len = 0;
int status;
if (!tp_features.light) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
} else if (!tp_features.light_status) {
- len += sprintf(p + len, "status:\t\tunknown\n");
- len += sprintf(p + len, "commands:\ton, off\n");
+ seq_printf(m, "status:\t\tunknown\n");
+ seq_printf(m, "commands:\ton, off\n");
} else {
status = light_get_status();
if (status < 0)
return status;
- len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
- len += sprintf(p + len, "commands:\ton, off\n");
+ seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
+ seq_printf(m, "commands:\ton, off\n");
}
- return len;
+ return 0;
}
static int light_write(char *buf)
@@ -4911,20 +4937,18 @@ static void cmos_exit(void)
device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
}
-static int cmos_read(char *p)
+static int cmos_read(struct seq_file *m)
{
- int len = 0;
-
/* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
R30, R31, T20-22, X20-21 */
if (!cmos_handle)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-21)\n");
}
- return len;
+ return 0;
}
static int cmos_write(char *buf)
@@ -5299,15 +5323,13 @@ static int __init led_init(struct ibm_in
((s) == TPACPI_LED_OFF ? "off" : \
((s) == TPACPI_LED_ON ? "on" : "blinking"))
-static int led_read(char *p)
+static int led_read(struct seq_file *m)
{
- int len = 0;
-
if (!led_supported) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
- len += sprintf(p + len, "status:\t\tsupported\n");
+ seq_printf(m, "status:\t\tsupported\n");
if (led_supported == TPACPI_LED_570) {
/* 570 */
@@ -5316,15 +5338,15 @@ static int led_read(char *p)
status = led_get_status(i);
if (status < 0)
return -EIO;
- len += sprintf(p + len, "%d:\t\t%s\n",
+ seq_printf(m, "%d:\t\t%s\n",
i, str_led_status(status));
}
}
- len += sprintf(p + len, "commands:\t"
+ seq_printf(m, "commands:\t"
"<led> on, <led> off, <led> blink (<led> is 0-15)\n");
- return len;
+ return 0;
}
static int led_write(char *buf)
@@ -5397,18 +5419,16 @@ static int __init beep_init(struct ibm_i
return (beep_handle)? 0 : 1;
}
-static int beep_read(char *p)
+static int beep_read(struct seq_file *m)
{
- int len = 0;
-
if (!beep_handle)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-17)\n");
}
- return len;
+ return 0;
}
static int beep_write(char *buf)
@@ -5461,8 +5481,11 @@ enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
+
+ TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
};
+
#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
struct ibm_thermal_sensors_struct {
s32 temp[TPACPI_MAX_THERMAL_SENSORS];
@@ -5552,6 +5575,28 @@ static int thermal_get_sensors(struct ib
return n;
}
+static void thermal_dump_all_sensors(void)
+{
+ int n, i;
+ struct ibm_thermal_sensors_struct t;
+
+ n = thermal_get_sensors(&t);
+ if (n <= 0)
+ return;
+
+ printk(TPACPI_NOTICE
+ "temperatures (Celsius):");
+
+ for (i = 0; i < n; i++) {
+ if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
+ printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
+ else
+ printk(KERN_CONT " N/A");
+ }
+
+ printk(KERN_CONT "\n");
+}
+
/* sysfs temp##_input -------------------------------------------------- */
static ssize_t thermal_temp_input_show(struct device *dev,
@@ -5567,7 +5612,7 @@ static ssize_t thermal_temp_input_show(s
res = thermal_get_sensor(idx, &value);
if (res)
return res;
- if (value == TP_EC_THERMAL_TMP_NA * 1000)
+ if (value == TPACPI_THERMAL_SENSOR_NA)
return -ENXIO;
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -5736,7 +5781,7 @@ static void thermal_exit(void)
case TPACPI_THERMAL_ACPI_TMP07:
case TPACPI_THERMAL_ACPI_UPDT:
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
- &thermal_temp_input16_group);
+ &thermal_temp_input8_group);
break;
case TPACPI_THERMAL_NONE:
default:
@@ -5744,9 +5789,8 @@ static void thermal_exit(void)
}
}
-static int thermal_read(char *p)
+static int thermal_read(struct seq_file *m)
{
- int len = 0;
int n, i;
struct ibm_thermal_sensors_struct t;
@@ -5754,16 +5798,16 @@ static int thermal_read(char *p)
if (unlikely(n < 0))
return n;
- len += sprintf(p + len, "temperatures:\t");
+ seq_printf(m, "temperatures:\t");
if (n > 0) {
for (i = 0; i < (n - 1); i++)
- len += sprintf(p + len, "%d ", t.temp[i] / 1000);
- len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
+ seq_printf(m, "%d ", t.temp[i] / 1000);
+ seq_printf(m, "%d\n", t.temp[i] / 1000);
} else
- len += sprintf(p + len, "not supported\n");
+ seq_printf(m, "not supported\n");
- return len;
+ return 0;
}
static struct ibm_struct thermal_driver_data = {
@@ -5778,39 +5822,38 @@ static struct ibm_struct thermal_driver_
static u8 ecdump_regs[256];
-static int ecdump_read(char *p)
+static int ecdump_read(struct seq_file *m)
{
- int len = 0;
int i, j;
u8 v;
- len += sprintf(p + len, "EC "
+ seq_printf(m, "EC "
" +00 +01 +02 +03 +04 +05 +06 +07"
" +08 +09 +0a +0b +0c +0d +0e +0f\n");
for (i = 0; i < 256; i += 16) {
- len += sprintf(p + len, "EC 0x%02x:", i);
+ seq_printf(m, "EC 0x%02x:", i);
for (j = 0; j < 16; j++) {
if (!acpi_ec_read(i + j, &v))
break;
if (v != ecdump_regs[i + j])
- len += sprintf(p + len, " *%02x", v);
+ seq_printf(m, " *%02x", v);
else
- len += sprintf(p + len, " %02x", v);
+ seq_printf(m, " %02x", v);
ecdump_regs[i + j] = v;
}
- len += sprintf(p + len, "\n");
+ seq_putc(m, '\n');
if (j != 16)
break;
}
/* These are way too dangerous to advertise openly... */
#if 0
- len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
+ seq_printf(m, "commands:\t0x<offset> 0x<value>"
" (<offset> is 00-ff, <value> is 00-ff)\n");
- len += sprintf(p + len, "commands:\t0x<offset> <value> "
+ seq_printf(m, "commands:\t0x<offset> <value> "
" (<offset> is 00-ff, <value> is 0-255)\n");
#endif
- return len;
+ return 0;
}
static int ecdump_write(char *buf)
@@ -6073,6 +6116,12 @@ static int brightness_get(struct backlig
return status & TP_EC_BACKLIGHT_LVLMSK;
}
+static void tpacpi_brightness_notify_change(void)
+{
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_HOTKEY);
+}
+
static struct backlight_ops ibm_backlight_data = {
.get_brightness = brightness_get,
.update_status = brightness_update_status,
@@ -6094,13 +6143,13 @@ static const struct tpacpi_quirk brightn
TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
/* Models with ATI GPUs that can use ECNVRAM */
- TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
- TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
/* Models with Intel Extreme Graphics 2 */
- TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
@@ -6227,6 +6276,12 @@ static int __init brightness_init(struct
ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
backlight_update_status(ibm_backlight_device);
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
+ "brightness: registering brightness hotkeys "
+ "as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_BRGHTUP_MASK
+ | TP_ACPI_HKEY_BRGHTDWN_MASK);;
return 0;
}
@@ -6251,23 +6306,22 @@ static void brightness_exit(void)
tpacpi_brightness_checkpoint_nvram();
}
-static int brightness_read(char *p)
+static int brightness_read(struct seq_file *m)
{
- int len = 0;
int level;
level = brightness_get(NULL);
if (level < 0) {
- len += sprintf(p + len, "level:\t\tunreadable\n");
+ seq_printf(m, "level:\t\tunreadable\n");
} else {
- len += sprintf(p + len, "level:\t\t%d\n", level);
- len += sprintf(p + len, "commands:\tup, down\n");
- len += sprintf(p + len, "commands:\tlevel <level>"
+ seq_printf(m, "level:\t\t%d\n", level);
+ seq_printf(m, "commands:\tup, down\n");
+ seq_printf(m, "commands:\tlevel <level>"
" (<level> is 0-%d)\n",
(tp_features.bright_16levels) ? 15 : 7);
}
- return len;
+ return 0;
}
static int brightness_write(char *buf)
@@ -6303,6 +6357,9 @@ static int brightness_write(char *buf)
* Doing it this way makes the syscall restartable in case of EINTR
*/
rc = brightness_set(level);
+ if (!rc && ibm_backlight_device)
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_SYSFS);
return (rc == -EINTR)? -ERESTARTSYS : rc;
}
@@ -6321,22 +6378,21 @@ static struct ibm_struct brightness_driv
static int volume_offset = 0x30;
-static int volume_read(char *p)
+static int volume_read(struct seq_file *m)
{
- int len = 0;
u8 level;
if (!acpi_ec_read(volume_offset, &level)) {
- len += sprintf(p + len, "level:\t\tunreadable\n");
+ seq_printf(m, "level:\t\tunreadable\n");
} else {
- len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
- len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
- len += sprintf(p + len, "commands:\tup, down, mute\n");
- len += sprintf(p + len, "commands:\tlevel <level>"
+ seq_printf(m, "level:\t\t%d\n", level & 0xf);
+ seq_printf(m, "mute:\t\t%s\n", onoff(level, 6));
+ seq_printf(m, "commands:\tup, down, mute\n");
+ seq_printf(m, "commands:\tlevel <level>"
" (<level> is 0-15)\n");
}
- return len;
+ return 0;
}
static int volume_write(char *buf)
@@ -7488,9 +7544,8 @@ static void fan_resume(void)
}
}
-static int fan_read(char *p)
+static int fan_read(struct seq_file *m)
{
- int len = 0;
int rc;
u8 status;
unsigned int speed = 0;
@@ -7502,7 +7557,7 @@ static int fan_read(char *p)
if (rc < 0)
return rc;
- len += sprintf(p + len, "status:\t\t%s\n"
+ seq_printf(m, "status:\t\t%s\n"
"level:\t\t%d\n",
(status != 0) ? "enabled" : "disabled", status);
break;
@@ -7513,54 +7568,54 @@ static int fan_read(char *p)
if (rc < 0)
return rc;
- len += sprintf(p + len, "status:\t\t%s\n",
+ seq_printf(m, "status:\t\t%s\n",
(status != 0) ? "enabled" : "disabled");
rc = fan_get_speed(&speed);
if (rc < 0)
return rc;
- len += sprintf(p + len, "speed:\t\t%d\n", speed);
+ seq_printf(m, "speed:\t\t%d\n", speed);
if (status & TP_EC_FAN_FULLSPEED)
/* Disengaged mode takes precedence */
- len += sprintf(p + len, "level:\t\tdisengaged\n");
+ seq_printf(m, "level:\t\tdisengaged\n");
else if (status & TP_EC_FAN_AUTO)
- len += sprintf(p + len, "level:\t\tauto\n");
+ seq_printf(m, "level:\t\tauto\n");
else
- len += sprintf(p + len, "level:\t\t%d\n", status);
+ seq_printf(m, "level:\t\t%d\n", status);
break;
case TPACPI_FAN_NONE:
default:
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
}
if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
- len += sprintf(p + len, "commands:\tlevel <level>");
+ seq_printf(m, "commands:\tlevel <level>");
switch (fan_control_access_mode) {
case TPACPI_FAN_WR_ACPI_SFAN:
- len += sprintf(p + len, " (<level> is 0-7)\n");
+ seq_printf(m, " (<level> is 0-7)\n");
break;
default:
- len += sprintf(p + len, " (<level> is 0-7, "
+ seq_printf(m, " (<level> is 0-7, "
"auto, disengaged, full-speed)\n");
break;
}
}
if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
- len += sprintf(p + len, "commands:\tenable, disable\n"
+ seq_printf(m, "commands:\tenable, disable\n"
"commands:\twatchdog <timeout> (<timeout> "
"is 0 (off), 1-120 (seconds))\n");
if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
- len += sprintf(p + len, "commands:\tspeed <speed>"
+ seq_printf(m, "commands:\tspeed <speed>"
" (<speed> is 0-65535)\n");
- return len;
+ return 0;
}
static int fan_write_cmd_level(const char *cmd, int *rc)
@@ -7702,6 +7757,13 @@ static struct ibm_struct fan_driver_data
*/
static void tpacpi_driver_event(const unsigned int hkey_event)
{
+ if (ibm_backlight_device) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_BRGHT_UP:
+ case TP_HKEY_EV_BRGHT_DOWN:
+ tpacpi_brightness_notify_change();
+ }
+ }
}
@@ -7834,19 +7896,20 @@ static int __init ibm_init(struct ibm_in
"%s installed\n", ibm->name);
if (ibm->read) {
- entry = create_proc_entry(ibm->name,
- S_IFREG | S_IRUGO | S_IWUSR,
- proc_dir);
+ mode_t mode = iibm->base_procfs_mode;
+
+ if (!mode)
+ mode = S_IRUGO;
+ if (ibm->write)
+ mode |= S_IWUSR;
+ entry = proc_create_data(ibm->name, mode, proc_dir,
+ &dispatch_proc_fops, ibm);
if (!entry) {
printk(TPACPI_ERR "unable to create proc entry %s\n",
ibm->name);
ret = -ENODEV;
goto err_out;
}
- entry->data = ibm;
- entry->read_proc = &dispatch_procfs_read;
- if (ibm->write)
- entry->write_proc = &dispatch_procfs_write;
ibm->flags.proc_created = 1;
}
@@ -8027,6 +8090,7 @@ static struct ibm_init_struct ibms_init[
#ifdef CONFIG_THINKPAD_ACPI_VIDEO
{
.init = video_init,
+ .base_procfs_mode = S_IRUSR,
.data = &video_driver_data,
},
#endif
@@ -8093,32 +8157,32 @@ static int __init set_ibm_param(const ch
return -EINVAL;
}
-module_param(experimental, int, 0);
+module_param(experimental, int, 0444);
MODULE_PARM_DESC(experimental,
"Enables experimental features when non-zero");
module_param_named(debug, dbg_level, uint, 0);
MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-module_param(force_load, bool, 0);
+module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load,
"Attempts to load the driver even on a "
"mis-identified ThinkPad when true");
-module_param_named(fan_control, fan_control_allowed, bool, 0);
+module_param_named(fan_control, fan_control_allowed, bool, 0444);
MODULE_PARM_DESC(fan_control,
"Enables setting fan parameters features when true");
-module_param_named(brightness_mode, brightness_mode, uint, 0);
+module_param_named(brightness_mode, brightness_mode, uint, 0444);
MODULE_PARM_DESC(brightness_mode,
"Selects brightness control strategy: "
"0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
-module_param(brightness_enable, uint, 0);
+module_param(brightness_enable, uint, 0444);
MODULE_PARM_DESC(brightness_enable,
"Enables backlight control when 1, disables when 0");
-module_param(hotkey_report_mode, uint, 0);
+module_param(hotkey_report_mode, uint, 0444);
MODULE_PARM_DESC(hotkey_report_mode,
"used for backwards compatibility with userspace, "
"see documentation");
@@ -8141,25 +8205,25 @@ TPACPI_PARAM(volume);
TPACPI_PARAM(fan);
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
-module_param(dbg_wlswemul, uint, 0);
+module_param(dbg_wlswemul, uint, 0444);
MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
MODULE_PARM_DESC(wlsw_state,
"Initial state of the emulated WLSW switch");
-module_param(dbg_bluetoothemul, uint, 0);
+module_param(dbg_bluetoothemul, uint, 0444);
MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
MODULE_PARM_DESC(bluetooth_state,
"Initial state of the emulated bluetooth switch");
-module_param(dbg_wwanemul, uint, 0);
+module_param(dbg_wwanemul, uint, 0444);
MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
MODULE_PARM_DESC(wwan_state,
"Initial state of the emulated WWAN switch");
-module_param(dbg_uwbemul, uint, 0);
+module_param(dbg_uwbemul, uint, 0444);
MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
MODULE_PARM_DESC(uwb_state,
@@ -8352,6 +8416,7 @@ static int __init thinkpad_acpi_module_i
PCI_VENDOR_ID_IBM;
tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
}
for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
ret = ibm_init(&ibms_init[i]);
@@ -8362,6 +8427,9 @@ static int __init thinkpad_acpi_module_i
return ret;
}
}
+
+ tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
printk(TPACPI_ERR "unable to register input device\n");
@@ -8371,7 +8439,6 @@ static int __init thinkpad_acpi_module_i
tp_features.input_device_registered = 1;
}
- tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
return 0;
}
file:473e5f25e3dd6b4cc3e665978ba05a0014e4476e -> file:66c2d6a6d3600c9beec6306f4e91d45720ae9645
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -723,6 +723,9 @@ cmos_do_probe(struct device *dev, struct
}
}
+ cmos_rtc.dev = dev;
+ dev_set_drvdata(dev, &cmos_rtc);
+
cmos_rtc.rtc = rtc_device_register(driver_name, dev,
&cmos_rtc_ops, THIS_MODULE);
if (IS_ERR(cmos_rtc.rtc)) {
@@ -730,8 +733,6 @@ cmos_do_probe(struct device *dev, struct
goto cleanup0;
}
- cmos_rtc.dev = dev;
- dev_set_drvdata(dev, &cmos_rtc);
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
spin_lock_irq(&rtc_lock);
file:03ea530981d1e66328625fb26567cc40e35f9f23 -> file:44c4399ee7144fc2e8cffca50d8a96426b05a8fb
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -271,12 +271,13 @@ static int coh901331_resume(struct platf
{
struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(rtap->irq);
- else
+ } else {
clk_enable(rtap->clk);
writel(rtap->irqmaskstore, rtap->virtbase + COH901331_IRQ_MASK);
clk_disable(rtap->clk);
+ }
return 0;
}
#else
file:eb99ee4fa0f5778a39e4faddd892766b06d0f078 -> file:861d91d012e053a4c4907015a93e67ea436318b3
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -775,7 +775,7 @@ static int __devinit ds1307_probe(struct
read_rtc:
/* read RTC registers */
- tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf);
+ tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
if (tmp != 8) {
pr_debug("read error %d\n", tmp);
err = -EIO;
@@ -860,7 +860,7 @@ read_rtc:
if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
tmp += 12;
i2c_smbus_write_byte_data(client,
- DS1307_REG_HOUR,
+ ds1307->offset + DS1307_REG_HOUR,
bin2bcd(tmp));
}
file:e0d7b9991505564c412e4771f8061154c5adc449 -> file:43bfffe1ec2b381fe8703bac1247f31c43759991
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -456,8 +456,6 @@ static int __devinit s3c_rtc_probe(struc
pr_debug("s3c2410_rtc: RTCCON=%02x\n",
readb(s3c_rtc_base + S3C2410_RTCCON));
- s3c_rtc_setfreq(&pdev->dev, 1);
-
device_init_wakeup(&pdev->dev, 1);
/* register RTC and exit */
@@ -474,6 +472,9 @@ static int __devinit s3c_rtc_probe(struc
rtc->max_user_freq = 128;
platform_set_drvdata(pdev, rtc);
+
+ s3c_rtc_setfreq(&pdev->dev, 1);
+
return 0;
err_nortc:
file:0391d759dfdbd5fb1676f68920f67cec6d8ef37a -> file:a5b8e7b927019e66bdcad4cb4335a18fd3cb31a3
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_d
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
- kfree (usg);
- dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
+ kfree(usg);
rcode = -ENOMEM;
goto cleanup;
}
file:477542602284661dbcb7d0d65d65223d612d3ec4 -> file:9e71ac611146ab86432664af1c2a40c7d2070ea0
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_c
if (info->scsi.phase == PHASE_IDLE)
fas216_kick(info);
- mod_timer(&info->eh_timer, 30 * HZ);
+ mod_timer(&info->eh_timer, jiffies + 30 * HZ);
spin_unlock_irqrestore(&info->host_lock, flags);
/*
file:f1a4246f890c044a9cb548366cd7a79865a42f4a -> file:aab4a39cf84df83112d767d5784f72b79f67734b
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -384,12 +384,12 @@ static int iscsi_prep_scsi_cmd_pdu(struc
WARN_ON(hdrlength >= 256);
hdr->hlength = hdrlength & 0xFF;
+ hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
if (session->tt->init_task && session->tt->init_task(task))
return -EIO;
task->state = ISCSI_TASK_RUNNING;
- hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
conn->scsicmd_pdus_cnt++;
@@ -2823,14 +2823,15 @@ static void iscsi_start_session_recovery
session->state = ISCSI_STATE_TERMINATE;
else if (conn->stop_stage != STOP_CONN_RECOVER)
session->state = ISCSI_STATE_IN_RECOVERY;
+
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
spin_unlock_bh(&session->lock);
del_timer_sync(&conn->transport_timer);
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
- old_stop_stage = conn->stop_stage;
- conn->stop_stage = flag;
conn->c_stage = ISCSI_CONN_STOPPED;
spin_unlock_bh(&session->lock);
file:e15501170698a19957d22f92c6fbc2e3e89ac0c6 -> file:816ab97eb16da1c3269999e72912303dbe2f2c99
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -394,11 +394,15 @@ int sas_ata_init_host_and_port(struct do
void sas_ata_task_abort(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
+ struct request_queue *q = qc->scsicmd->device->request_queue;
struct completion *waiting;
+ unsigned long flags;
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
+ spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
file:1c558d3bce18c4817623078944e86557eb3adb9f -> file:39fb9aa93fe51175e4a18ae3c4b47cc4e3d2d3ce
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1025,6 +1025,8 @@ int __sas_task_abort(struct sas_task *ta
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
+ struct request_queue *q = sc->device->request_queue;
+ unsigned long flags;
/* Escape for libsas internal commands */
if (!sc) {
@@ -1039,7 +1041,9 @@ void sas_task_abort(struct sas_task *tas
return;
}
+ spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(sc->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
scsi_schedule_eh(sc->device->host);
}
file:518712cc7253c2d46ad6a9450e0c4774d0d52e79 -> file:202fa0f4b805c2987c76d0df2b72387bfc71097f
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -3282,6 +3282,7 @@ static int megasas_mgmt_compat_ioctl_fw(
compat_alloc_user_space(sizeof(struct megasas_iocpacket));
int i;
int error = 0;
+ compat_uptr_t ptr;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -3294,9 +3295,22 @@ static int megasas_mgmt_compat_ioctl_fw(
copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
return -EFAULT;
- for (i = 0; i < MAX_IOCTL_SGE; i++) {
- compat_uptr_t ptr;
+ /*
+ * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+ if (ioc->sense_len) {
+ void __user **sense_ioc_ptr =
+ (void __user **)(ioc->frame.raw + ioc->sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+ (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
+ }
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
if (get_user(ptr, &cioc->sgl[i].iov_base) ||
put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
copy_in_user(&ioc->sgl[i].iov_len,
file:1743640070ca402979021a2ec0029bcffae21f62 -> file:f10bf70c58df6eed8f30c9adb8f1a2d790a54a7e
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -5721,6 +5721,8 @@ _scsih_remove(struct pci_dev *pdev)
struct _sas_port *mpt2sas_port;
struct _sas_device *sas_device;
struct _sas_node *expander_sibling;
+ struct _raid_device *raid_device, *next;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
struct workqueue_struct *wq;
unsigned long flags;
@@ -5734,6 +5736,21 @@ _scsih_remove(struct pci_dev *pdev)
if (wq)
destroy_workqueue(wq);
+ /* release all the volumes */
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+ "(0x%016llx)\n", ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
/* free ports attached to the sas_host */
retry_again:
list_for_each_entry(mpt2sas_port,
file:c790d45876c47591d04f8906b79aaa134cbccb2d -> file:cae6b2cf492fb099cd2b7b6a2257dd8a768c2105
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdat
{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
+ { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
{ } /* terminate list */
};
file:8371d917a9a2408242a2439c9c69f298cc012964 -> file:49ac4148493b9746a31cfdc50dcbe8a6d705f618
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1640,8 +1640,10 @@ qla1280_load_firmware_pio(struct scsi_ql
uint16_t mb[MAILBOX_REGISTER_COUNT], i;
int err;
+ spin_unlock_irq(ha->host->host_lock);
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
&ha->pdev->dev);
+ spin_lock_irq(ha->host->host_lock);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
ql1280_board_tbl[ha->devnum].fwname, err);
@@ -1699,8 +1701,10 @@ qla1280_load_firmware_dma(struct scsi_ql
return -ENOMEM;
#endif
+ spin_unlock_irq(ha->host->host_lock);
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
&ha->pdev->dev);
+ spin_lock_irq(ha->host->host_lock);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
ql1280_board_tbl[ha->devnum].fwname, err);
file:f3d1d1afa95b268a6ac47626e004f522b7223918 -> file:65ef03ca56873ac001f22a2fb4c5b1296c65a04b
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct q
extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
#endif /* _QLA_GBL_H */
file:b20a7169aac28990a2debb7670716668b9815d37 -> file:f3e5e30dd5ffde3b12735fed344a6a77209e548a
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1347,16 +1347,22 @@ qla2x00_status_entry(scsi_qla_host_t *vh
sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
- sense_len = le32_to_cpu(sts24->sense_len);
- rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
- resid_len = le32_to_cpu(sts24->rsp_residual_count);
- fw_resid_len = le32_to_cpu(sts24->residual_len);
+ if (scsi_status & SS_SENSE_LEN_VALID)
+ sense_len = le32_to_cpu(sts24->sense_len);
+ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+ rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
+ if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+ resid_len = le32_to_cpu(sts24->rsp_residual_count);
+ if (comp_status == CS_DATA_UNDERRUN)
+ fw_resid_len = le32_to_cpu(sts24->residual_len);
rsp_info = sts24->data;
sense_data = sts24->data;
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
} else {
- sense_len = le16_to_cpu(sts->req_sense_length);
- rsp_info_len = le16_to_cpu(sts->rsp_info_len);
+ if (scsi_status & SS_SENSE_LEN_VALID)
+ sense_len = le16_to_cpu(sts->req_sense_length);
+ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+ rsp_info_len = le16_to_cpu(sts->rsp_info_len);
resid_len = le32_to_cpu(sts->residual_length);
rsp_info = sts->rsp_info;
sense_data = sts->req_sense_data;
@@ -1443,38 +1449,62 @@ qla2x00_status_entry(scsi_qla_host_t *vh
break;
case CS_DATA_UNDERRUN:
- resid = resid_len;
+ DEBUG2(printk(KERN_INFO
+ "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
+ "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
+ vha->host_no, cp->device->id, cp->device->lun, comp_status,
+ scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
+ cp->underflow));
+
/* Use F/W calculated residual length. */
- if (IS_FWI2_CAPABLE(ha)) {
- if (!(scsi_status & SS_RESIDUAL_UNDER)) {
- lscsi_status = 0;
- } else if (resid != fw_resid_len) {
- scsi_status &= ~SS_RESIDUAL_UNDER;
- lscsi_status = 0;
+ resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
+ scsi_set_resid(cp, resid);
+ if (scsi_status & SS_RESIDUAL_UNDER) {
+ if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
+ DEBUG2(printk(
+ "scsi(%ld:%d:%d:%d) Dropped frame(s) "
+ "detected (%x of %x bytes)...residual "
+ "length mismatch...retrying command.\n",
+ vha->host_no, cp->device->channel,
+ cp->device->id, cp->device->lun, resid,
+ scsi_bufflen(cp)));
+
+ cp->result = DID_ERROR << 16 | lscsi_status;
+ break;
}
- resid = fw_resid_len;
- }
- if (scsi_status & SS_RESIDUAL_UNDER) {
- scsi_set_resid(cp, resid);
- } else {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d) UNDERRUN status detected "
- "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
- "os_underflow=0x%x\n", vha->host_no,
- cp->device->id, cp->device->lun, comp_status,
- scsi_status, resid_len, resid, cp->cmnd[0],
- cp->underflow));
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+ "detected (%x of %x bytes)...returning "
+ "error status.\n", vha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid, scsi_bufflen(cp));
+
+ cp->result = DID_ERROR << 16;
+ break;
+ }
+ } else if (!lscsi_status) {
+ DEBUG2(printk(
+ "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
+ "(%x of %x bytes)...firmware reported underrun..."
+ "retrying command.\n", vha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid, scsi_bufflen(cp)));
+ cp->result = DID_ERROR << 16;
+ break;
}
+ cp->result = DID_OK << 16 | lscsi_status;
+
/*
* Check to see if SCSI Status is non zero. If so report SCSI
* Status.
*/
if (lscsi_status != 0) {
- cp->result = DID_OK << 16 | lscsi_status;
-
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
DEBUG2(printk(KERN_INFO
"scsi(%ld): QUEUE FULL status detected "
@@ -1501,42 +1531,6 @@ qla2x00_status_entry(scsi_qla_host_t *vh
break;
qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
- } else {
- /*
- * If RISC reports underrun and target does not report
- * it then we must have a lost frame, so tell upper
- * layer to retry it by reporting an error.
- */
- if (!(scsi_status & SS_RESIDUAL_UNDER)) {
- DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
- "frame(s) detected (%x of %x bytes)..."
- "retrying command.\n",
- vha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, resid,
- scsi_bufflen(cp)));
-
- scsi_set_resid(cp, resid);
- cp->result = DID_ERROR << 16;
- break;
- }
-
- /* Handle mid-layer underflow */
- if ((unsigned)(scsi_bufflen(cp) - resid) <
- cp->underflow) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", vha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- scsi_bufflen(cp));
-
- cp->result = DID_ERROR << 16;
- break;
- }
-
- /* Everybody online, looking good... */
- cp->result = DID_OK << 16;
}
break;
@@ -2018,7 +2012,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id
spin_lock_irq(&ha->hardware_lock);
- vha = qla25xx_get_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
if (!ha->mqenable) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2246,30 +2240,28 @@ qla2x00_request_irqs(struct qla_hw_data
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
- !IS_QLA8432(ha) && !IS_QLA8001(ha))
- goto skip_msix;
+ !IS_QLA8432(ha) && !IS_QLA8001(ha))
+ goto skip_msi;
+
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ (ha->pdev->subsystem_device == 0x7040 ||
+ ha->pdev->subsystem_device == 0x7041 ||
+ ha->pdev->subsystem_device == 0x1705)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
+ ha->pdev->subsystem_vendor,
+ ha->pdev->subsystem_device));
+ goto skip_msi;
+ }
if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
!QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
ha->pdev->revision, ha->fw_attributes));
-
goto skip_msix;
}
- if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
- (ha->pdev->subsystem_device == 0x7040 ||
- ha->pdev->subsystem_device == 0x7041 ||
- ha->pdev->subsystem_device == 0x1705)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
- ha->pdev->subsystem_vendor,
- ha->pdev->subsystem_device));
-
- goto skip_msi;
- }
-
ret = qla24xx_enable_msix(ha, rsp);
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2357,30 +2349,3 @@ int qla25xx_request_irq(struct rsp_que *
msix->rsp = rsp;
return ret;
}
-
-struct scsi_qla_host *
-qla25xx_get_host(struct rsp_que *rsp)
-{
- srb_t *sp;
- struct qla_hw_data *ha = rsp->hw;
- struct scsi_qla_host *vha = NULL;
- struct sts_entry_24xx *pkt;
- struct req_que *req;
- uint16_t que;
- uint32_t handle;
-
- pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
- que = MSW(pkt->handle);
- handle = (uint32_t) LSW(pkt->handle);
- req = ha->req_q_map[que];
- if (handle < MAX_OUTSTANDING_COMMANDS) {
- sp = req->outstanding_cmds[handle];
- if (sp)
- return sp->fcport->vha;
- else
- goto base_que;
- }
-base_que:
- vha = pci_get_drvdata(ha->pdev);
- return vha;
-}
file:e07b3617f019397188cd24674fdcfabe09e49eb5 -> file:4a69cc8c05e919b9b9780b214ad3148a51c14b30
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -638,11 +638,15 @@ failed:
static void qla_do_work(struct work_struct *work)
{
+ unsigned long flags;
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = rsp->hw;
- vha = qla25xx_get_host(rsp);
+ spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
+ spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
}
/* create response queue */
file:c4103bef41b59cc8988be725fa74822bba9e5d5d -> file:bc3e3636a3b8180cf1c04bd5ea44e37de1a92543
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -914,7 +914,8 @@ static int resp_start_stop(struct scsi_c
static sector_t get_sdebug_capacity(void)
{
if (scsi_debug_virtual_gb > 0)
- return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
+ return (sector_t)scsi_debug_virtual_gb *
+ (1073741824 / scsi_debug_sector_size);
else
return sdebug_store_sectors;
}
file:1b0060b791e8dc6fe0b1dd4ace0a1e3f3ea96c7e -> file:573921d00070d79f070bc9f3866f300839e4af98
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -301,7 +301,20 @@ static int scsi_check_sense(struct scsi_
if (scmd->device->allow_restart &&
(sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
return FAILED;
- return SUCCESS;
+
+ if (blk_barrier_rq(scmd->request))
+ /*
+ * barrier requests should always retry on UA
+ * otherwise block will get a spurious error
+ */
+ return NEEDS_RETRY;
+ else
+ /*
+ * for normal (non barrier) commands, pass the
+ * UA upwards for a determination in the
+ * completion functions
+ */
+ return SUCCESS;
/* these three are not supported */
case COPY_ABORTED:
file:b98f763931c5fadb9c0b438c7099786b6ac8d41b -> file:d9564fb04f62cd5c390aed97547adf4f49b6fc24
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -308,6 +308,9 @@ int scsi_nonblockable_ioctl(struct scsi_
case SG_SCSI_RESET_DEVICE:
val = SCSI_TRY_RESET_DEVICE;
break;
+ case SG_SCSI_RESET_TARGET:
+ val = SCSI_TRY_RESET_TARGET;
+ break;
case SG_SCSI_RESET_BUS:
val = SCSI_TRY_RESET_BUS;
break;
file:bc9a88145a71e9fe1813161cf6a9895bf99c8a61 -> file:41d712e828ebe5b5fc199224477ddf335374d176
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
- if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
- if (!(req->cmd_flags & REQ_QUIET))
+ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+ /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
+ * print since caller wants ATA registers. Only occurs on
+ * SCSI ATA PASS_THROUGH commands when CK_COND=1
+ */
+ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+ ;
+ else if (!(req->cmd_flags & REQ_QUIET))
scsi_print_sense("", cmd);
result = 0;
/* BLOCK_PC may have set error */
file:bf52decfdef48d41e0366ddcd892d4f0f9e05168 -> file:db02e31bae391cb173f779f72055b368403ed25f
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1215,6 +1215,15 @@ store_fc_vport_delete(struct device *dev
{
struct fc_vport *vport = transport_class_to_vport(dev);
struct Scsi_Host *shost = vport_to_shost(vport);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EBUSY;
+ }
+ vport->flags |= FC_VPORT_DELETING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
fc_queue_work(shost, &vport->vport_delete_work);
return count;
@@ -1804,6 +1813,9 @@ store_fc_host_vport_delete(struct device
list_for_each_entry(vport, &fc_host->vports, peers) {
if ((vport->channel == 0) &&
(vport->port_name == wwpn) && (vport->node_name == wwnn)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+ break;
+ vport->flags |= FC_VPORT_DELETING;
match = 1;
break;
}
@@ -3328,18 +3340,6 @@ fc_vport_terminate(struct fc_vport *vpor
unsigned long flags;
int stat;
- spin_lock_irqsave(shost->host_lock, flags);
- if (vport->flags & FC_VPORT_CREATING) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return -EBUSY;
- }
- if (vport->flags & (FC_VPORT_DEL)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return -EALREADY;
- }
- vport->flags |= FC_VPORT_DELETING;
- spin_unlock_irqrestore(shost->host_lock, flags);
-
if (i->f->vport_delete)
stat = i->f->vport_delete(vport);
else
@@ -3796,8 +3796,9 @@ fc_bsg_request_handler(struct request_qu
return;
while (!blk_queue_plugged(q)) {
- if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
- break;
+ if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ break;
req = blk_fetch_request(q);
if (!req)
file:9093c7261f330085bf4ba843b18b8e53ad4270cd -> file:7694a95bdb5361868a112ee6804bb2824f9f92c1
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -971,6 +971,7 @@ static void sd_prepare_flush(struct requ
{
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = SD_TIMEOUT;
+ rq->retries = SD_MAX_RETRIES;
rq->cmd[0] = SYNCHRONIZE_CACHE;
rq->cmd_len = 10;
}
file:55b034b72708e5f8b5cc873082e3a44003325ba7 -> file:3c8a0248ea45f69af399b56f5a91c78d115e6c7c
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -591,8 +591,6 @@ static int ses_intf_add(struct device *c
ses_dev->page10_len = len;
buf = NULL;
}
- kfree(hdr_buf);
-
scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
if (!scomp)
goto err_free;
@@ -604,6 +602,8 @@ static int ses_intf_add(struct device *c
goto err_free;
}
+ kfree(hdr_buf);
+
edev->scratch = ses_dev;
for (i = 0; i < components; i++)
edev->component[i].scratch = scomp + i;
file:deac67e35ce92e950c18061b279219c25454df25 -> file:48ead154bd02778f46345be4d54159ab32a693ff
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_de
{ "FUJ02E6", 0 },
/* Fujitsu Wacom 2FGT Tablet PC device */
{ "FUJ02E7", 0 },
+ /* Fujitsu Wacom 1FGT Tablet PC device */
+ { "FUJ02E9", 0 },
/*
* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
* disguise)
file:300cea768d746939762afee7f42a8ad2cf5d06a1 -> file:7feb902c32cb99b0570e76e9d8b8a9e99f52d162
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -930,6 +930,83 @@ static void cpm_uart_config_port(struct
}
}
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
+/*
+ * Write a string to the serial port
+ * Note that this is called with interrupts already disabled
+ */
+static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
+ const char *string, u_int count)
+{
+ unsigned int i;
+ cbd_t __iomem *bdp, *bdbase;
+ unsigned char *cpm_outp_addr;
+
+ /* Get the address of the host memory buffer.
+ */
+ bdp = pinfo->tx_cur;
+ bdbase = pinfo->tx_bd_base;
+
+ /*
+ * Now, do each character. This is not as bad as it looks
+ * since this is a holding FIFO and not a transmitting FIFO.
+ * We could add the complexity of filling the entire transmit
+ * buffer, but we would just wait longer between accesses......
+ */
+ for (i = 0; i < count; i++, string++) {
+ /* Wait for transmitter fifo to empty.
+ * Ready indicates output is ready, and xmt is doing
+ * that, not that it is ready for us to send.
+ */
+ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+ ;
+
+ /* Send the character out.
+ * If the buffer address is in the CPM DPRAM, don't
+ * convert it.
+ */
+ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
+ pinfo);
+ *cpm_outp_addr = *string;
+
+ out_be16(&bdp->cbd_datlen, 1);
+ setbits16(&bdp->cbd_sc, BD_SC_READY);
+
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = bdbase;
+ else
+ bdp++;
+
+ /* if a LF, also do CR... */
+ if (*string == 10) {
+ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+ ;
+
+ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
+ pinfo);
+ *cpm_outp_addr = 13;
+
+ out_be16(&bdp->cbd_datlen, 1);
+ setbits16(&bdp->cbd_sc, BD_SC_READY);
+
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = bdbase;
+ else
+ bdp++;
+ }
+ }
+
+ /*
+ * Finally, Wait for transmitter & holding register to empty
+ * and restore the IER
+ */
+ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+ ;
+
+ pinfo->tx_cur = bdp;
+}
+#endif
+
#ifdef CONFIG_CONSOLE_POLL
/* Serial polling routines for writing and reading from the uart while
* in an interrupt or debug context.
@@ -999,7 +1076,7 @@ static void cpm_put_poll_char(struct uar
static char ch[2];
ch[0] = (char)c;
- cpm_uart_early_write(pinfo->port.line, ch, 1);
+ cpm_uart_early_write(pinfo, ch, 1);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -1130,9 +1207,6 @@ static void cpm_uart_console_write(struc
u_int count)
{
struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
- unsigned int i;
- cbd_t __iomem *bdp, *bdbase;
- unsigned char *cp;
unsigned long flags;
int nolock = oops_in_progress;
@@ -1142,66 +1216,7 @@ static void cpm_uart_console_write(struc
spin_lock_irqsave(&pinfo->port.lock, flags);
}
- /* Get the address of the host memory buffer.
- */
- bdp = pinfo->tx_cur;
- bdbase = pinfo->tx_bd_base;
-
- /*
- * Now, do each character. This is not as bad as it looks
- * since this is a holding FIFO and not a transmitting FIFO.
- * We could add the complexity of filling the entire transmit
- * buffer, but we would just wait longer between accesses......
- */
- for (i = 0; i < count; i++, s++) {
- /* Wait for transmitter fifo to empty.
- * Ready indicates output is ready, and xmt is doing
- * that, not that it is ready for us to send.
- */
- while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
- ;
-
- /* Send the character out.
- * If the buffer address is in the CPM DPRAM, don't
- * convert it.
- */
- cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
- *cp = *s;
-
- out_be16(&bdp->cbd_datlen, 1);
- setbits16(&bdp->cbd_sc, BD_SC_READY);
-
- if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
- bdp = bdbase;
- else
- bdp++;
-
- /* if a LF, also do CR... */
- if (*s == 10) {
- while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
- ;
-
- cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
- *cp = 13;
-
- out_be16(&bdp->cbd_datlen, 1);
- setbits16(&bdp->cbd_sc, BD_SC_READY);
-
- if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
- bdp = bdbase;
- else
- bdp++;
- }
- }
-
- /*
- * Finally, Wait for transmitter & holding register to empty
- * and restore the IER
- */
- while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
- ;
-
- pinfo->tx_cur = bdp;
+ cpm_uart_early_write(pinfo, s, count);
if (unlikely(nolock)) {
local_irq_restore(flags);
file:18130f11238e6025b16682c9a00cca218727e370 -> file:2b550182a879db10044bc177649140bd52441033
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -119,7 +119,8 @@
#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
-#define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */
+#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
+#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
#define UCR4_INVR (1<<9) /* Inverted infrared reception */
#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
@@ -590,6 +591,9 @@ static int imx_setup_ufcr(struct imx_por
return 0;
}
+/* half the RX buffer size */
+#define CTSTL 16
+
static int imx_startup(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -606,6 +610,10 @@ static int imx_startup(struct uart_port
if (USE_IRDA(sport))
temp |= UCR4_IRSC;
+ /* set the trigger level for CTS */
+ temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
+ temp |= CTSTL<< UCR4_CTSTL_SHF;
+
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
if (USE_IRDA(sport)) {
@@ -1279,7 +1287,7 @@ static int serial_imx_probe(struct platf
sport->use_irda = 1;
#endif
- if (pdata->init) {
+ if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
@@ -1292,7 +1300,7 @@ static int serial_imx_probe(struct platf
return 0;
deinit:
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
clk_put(sport->clk);
@@ -1321,7 +1329,7 @@ static int serial_imx_remove(struct plat
clk_disable(sport->clk);
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
iounmap(sport->port.membase);
file:9681536163caa5bdf7a36fc1dcfff9f8da3a66ec -> file:bbf1cb21a7d3509d00d1461a29759927ee157e1a
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -233,6 +233,9 @@ void ssb_chipcommon_init(struct ssb_chip
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+ if (cc->dev->id.revision >= 11)
+ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
+ ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
file:64abd11f6fbbef64184011c9b4b7d0e0fb182ea8 -> file:8e194d557cace47e5efa2e12c1411544d410ba69
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -495,9 +495,9 @@ static void ssb_pmu_resources_init(struc
chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
}
+/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
void ssb_pmu_init(struct ssb_chipcommon *cc)
{
- struct ssb_bus *bus = cc->dev->bus;
u32 pmucap;
if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
@@ -509,15 +509,12 @@ void ssb_pmu_init(struct ssb_chipcommon
ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
- if (cc->pmu.rev >= 1) {
- if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
- chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
- ~SSB_CHIPCO_PMU_CTL_NOILPONW);
- } else {
- chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
- SSB_CHIPCO_PMU_CTL_NOILPONW);
- }
- }
+ if (cc->pmu.rev == 1)
+ chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
+ ~SSB_CHIPCO_PMU_CTL_NOILPONW);
+ else
+ chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
+ SSB_CHIPCO_PMU_CTL_NOILPONW);
ssb_pmu_pll_init(cc);
ssb_pmu_resources_init(cc);
}
file:9e50896233aa969e5fa145bcf630dd0ba9055363 -> file:17a178139548c781b4c78e55641b6e0455165b45
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -22,6 +22,7 @@
#include "ssb_private.h"
+bool ssb_is_sprom_available(struct ssb_bus *bus);
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCICORESWITCH_DEBUG 0
@@ -167,7 +168,7 @@ err_pci:
}
/* Get the word-offset for a SSB_SPROM_XXX define. */
-#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16))
+#define SPOFF(offset) ((offset) / sizeof(u16))
/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
#define SPEX16(_outvar, _offset, _mask, _shift) \
out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -252,8 +253,13 @@ static int sprom_do_read(struct ssb_bus
{
int i;
+ /* Check if SPROM can be read */
+ if (ioread16(bus->mmio + bus->sprom_offset) == 0xFFFF) {
+ ssb_printk(KERN_ERR PFX "Unable to read SPROM\n");
+ return -ENODEV;
+ }
for (i = 0; i < bus->sprom_size; i++)
- sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2));
+ sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
return 0;
}
@@ -284,7 +290,7 @@ static int sprom_do_write(struct ssb_bus
ssb_printk("75%%");
else if (i % 2)
ssb_printk(".");
- writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2));
+ writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
}
@@ -620,21 +626,49 @@ static int ssb_pci_sprom_get(struct ssb_
int err = -ENOMEM;
u16 *buf;
+ if (!ssb_is_sprom_available(bus)) {
+ ssb_printk(KERN_ERR PFX "No SPROM available!\n");
+ return -ENODEV;
+ }
+ if (bus->chipco.dev) { /* can be unavailible! */
+ /*
+ * get SPROM offset: SSB_SPROM_BASE1 except for
+ * chipcommon rev >= 31 or chip ID is 0x4312 and
+ * chipcommon status & 3 == 2
+ */
+ if (bus->chipco.dev->id.revision >= 31)
+ bus->sprom_offset = SSB_SPROM_BASE31;
+ else if (bus->chip_id == 0x4312 &&
+ (bus->chipco.status & 0x03) == 2)
+ bus->sprom_offset = SSB_SPROM_BASE31;
+ else
+ bus->sprom_offset = SSB_SPROM_BASE1;
+ } else {
+ bus->sprom_offset = SSB_SPROM_BASE1;
+ }
+ ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
+
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
goto out;
bus->sprom_size = SSB_SPROMSIZE_WORDS_R123;
- sprom_do_read(bus, buf);
+ err = sprom_do_read(bus, buf);
+ if (err)
+ goto out_free;
err = sprom_check_crc(buf, bus->sprom_size);
if (err) {
/* try for a 440 byte SPROM - revision 4 and higher */
kfree(buf);
buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ err = -ENOMEM;
goto out;
+ }
bus->sprom_size = SSB_SPROMSIZE_WORDS_R4;
- sprom_do_read(bus, buf);
+ err = sprom_do_read(bus, buf);
+ if (err)
+ goto out_free;
err = sprom_check_crc(buf, bus->sprom_size);
if (err) {
/* All CRC attempts failed.
file:eb708431cb964d36e57ab80b9a9d18b1e5f7f7cb -> file:5f7154d9d04e09fc0ef2c892bfac73cac03b0f53
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -179,3 +179,18 @@ const struct ssb_sprom *ssb_get_fallback
{
return fallback_sprom;
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
+bool ssb_is_sprom_available(struct ssb_bus *bus)
+{
+ /* status register only exists on chipcomon rev >= 11 and we need check
+ for >= 31 only */
+ /* this routine differs from specs as we do not access SPROM directly
+ on PCMCIA */
+ if (bus->bustype == SSB_BUSTYPE_PCI &&
+ bus->chipco.dev && /* can be unavailible! */
+ bus->chipco.dev->id.revision >= 31)
+ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
+
+ return true;
+}
file:9aef87fc81dcd3560cda172fef89471b87b14bec -> file:27829e77ca08f110ae6597d5d0871b233ce5eff6
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -123,7 +123,7 @@ static const struct ni_board_struct ni_b
.adbits = 12,
.ai_fifo_depth = 1024,
.alwaysdither = 0,
- .gainlkup = ai_gain_16,
+ .gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 12,
file:cca4e869f0ecf10868a17d7d7cf060f6b94b87df -> file:5c9c1bc3eb61260be5a24ea969be3a041de802fc
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v2.2"
+#define DRIVER_VERSION "v2.4"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"
/*
@@ -80,6 +80,9 @@ sampling rate. If you sample two channel
* 2.0: PWM seems to be stable and is not interfering with the other functions
* 2.1: changed PWM API
* 2.2: added firmware kernel request to fix an udev problem
+ * 2.3: corrected a bug in bulk timeouts which were far too short
+ * 2.4: fixed a bug which causes the driver to hang when it ran out of data.
+ * Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
*
*/
@@ -101,8 +104,8 @@ sampling rate. If you sample two channel
#define BOARDNAME "usbdux"
-/* timeout for the USB-transfer */
-#define EZTIMEOUT 30
+/* timeout for the USB-transfer in ms*/
+#define BULK_TIMEOUT 1000
/* constants for "firmware" upload and download */
#define USBDUXSUB_FIRMWARE 0xA0
@@ -531,6 +534,7 @@ static void usbduxsub_ai_IsocIrq(struct
}
}
/* tell comedi that data is there */
+ s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(this_usbduxsub->comedidev, s);
}
@@ -750,7 +754,7 @@ static int usbduxsub_start(struct usbdux
/* Length */
1,
/* Timeout */
- EZTIMEOUT);
+ BULK_TIMEOUT);
if (errcode < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: control msg failed (start)\n");
@@ -780,7 +784,7 @@ static int usbduxsub_stop(struct usbduxs
/* Length */
1,
/* Timeout */
- EZTIMEOUT);
+ BULK_TIMEOUT);
if (errcode < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: control msg failed (stop)\n");
@@ -810,7 +814,7 @@ static int usbduxsub_upload(struct usbdu
/* length */
len,
/* timeout */
- EZTIMEOUT);
+ BULK_TIMEOUT);
dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
if (errcode < 0) {
dev_err(&usbduxsub->interface->dev, "comedi_: upload failed\n");
@@ -1110,7 +1114,7 @@ static int send_dux_commands(struct usbd
usb_sndbulkpipe(this_usbduxsub->usbdev,
COMMAND_OUT_EP),
this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
- &nsent, 10);
+ &nsent, BULK_TIMEOUT);
if (result < 0)
dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
"could not transmit dux_command to the usb-device, "
@@ -1130,7 +1134,7 @@ static int receive_dux_commands(struct u
usb_rcvbulkpipe(this_usbduxsub->usbdev,
COMMAND_IN_EP),
this_usbduxsub->insnBuffer, SIZEINSNBUF,
- &nrec, 1);
+ &nrec, BULK_TIMEOUT);
if (result < 0) {
dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
"insn: USB error %d while receiving DUX command"
file:c2809f2a2ce0e0e72b1b930f39929a50179d3cd5 -> file:b12237f90db24e1e63a2013af0b57a1d130b7b96
--- a/drivers/staging/hv/Hv.c
+++ b/drivers/staging/hv/Hv.c
@@ -306,9 +306,9 @@ void HvCleanup(void)
DPRINT_ENTER(VMBUS);
if (gHvContext.SignalEventBuffer) {
+ kfree(gHvContext.SignalEventBuffer);
gHvContext.SignalEventBuffer = NULL;
gHvContext.SignalEventParam = NULL;
- kfree(gHvContext.SignalEventBuffer);
}
if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
file:26d79975387cc0b09e3335b8aeffc1b28af457ab -> file:f05f4e125c48893a2b249fd0b2eb1b6452570bdd
--- a/drivers/staging/hv/RndisFilter.c
+++ b/drivers/staging/hv/RndisFilter.c
@@ -756,6 +756,7 @@ static int RndisFilterOpenDevice(struct
ret = RndisFilterSetPacketFilter(Device,
NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
if (ret == 0)
Device->State = RNDIS_DEV_DATAINITIALIZED;
file:0d7459e2d0360b8996b104e1385df56bd9fd8150 -> file:4c3c8bc4bc38984a47c791a88a21f6cb9edfc79d
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -413,8 +413,7 @@ static int netvsc_probe(struct device *d
if (!net_drv_obj->Base.OnDeviceAdd)
return -1;
- net = alloc_netdev(sizeof(struct net_device_context), "seth%d",
- ether_setup);
+ net = alloc_etherdev(sizeof(struct net_device_context));
if (!net)
return -1;
file:894eecfc63ca5bd3e8e84fb2b5bf11dc4680df05 -> file:6acc49a55a5746150e9d392f8d8191939be3ead5
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -24,6 +24,8 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
#include "osd.h"
#include "logging.h"
#include "vmbus.h"
@@ -946,6 +948,19 @@ static irqreturn_t vmbus_isr(int irq, vo
}
}
+static struct dmi_system_id __initdata microsoft_hv_dmi_table[] = {
+ {
+ .ident = "Hyper-V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
+ },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, microsoft_hv_dmi_table);
+
static int __init vmbus_init(void)
{
int ret = 0;
@@ -957,6 +972,9 @@ static int __init vmbus_init(void)
vmbus_loglevel, HIWORD(vmbus_loglevel), LOWORD(vmbus_loglevel));
/* Todo: it is used for loglevel, to be ported to new kernel. */
+ if (!dmi_check_system(microsoft_hv_dmi_table))
+ return -ENODEV;
+
ret = vmbus_bus_init(VmbusInitialize);
DPRINT_EXIT(VMBUS_DRV);
@@ -973,6 +991,18 @@ static void __exit vmbus_exit(void)
return;
}
+/*
+ * We use a PCI table to determine if we should autoload this driver This is
+ * needed by distro tools to determine if the hyperv drivers should be
+ * installed and/or configured. We don't do anything else with the table, but
+ * it needs to be present.
+ */
+const static struct pci_device_id microsoft_hv_pci_table[] = {
+ { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, microsoft_hv_pci_table);
+
MODULE_LICENSE("GPL");
module_param(vmbus_irq, int, S_IRUGO);
module_param(vmbus_loglevel, int, S_IRUGO);
file:0bc0fb99d2e40c0692768a3213e2f781405c9e84 -> file:98b0f8e726fa07fbefa6d42db58d4a4b0355c00c
--- a/drivers/staging/rt2860/common/2860_rtmp_init.c
+++ b/drivers/staging/rt2860/common/2860_rtmp_init.c
@@ -716,7 +716,7 @@ VOID RTMPFreeTxRxRingMemory(
{
if ((pAd->RxRing.Cell[index].DmaBuf.AllocVa) && (pAd->RxRing.Cell[index].pNdisPacket))
{
- PCI_UNMAP_SINGLE(pObj->pci_dev, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
+ PCI_UNMAP_SINGLE(pAd, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
RELEASE_NDIS_PACKET(pAd, pAd->RxRing.Cell[index].pNdisPacket, NDIS_STATUS_SUCCESS);
}
}
file:66274d7666ff04de3b7e6215196d377e740f1b9d -> file:6d52d6adbb41f4878a9a6501dc04753eda145b8f
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -112,20 +112,25 @@ u32 rt_global_debug_component = \
static struct usb_device_id rtl8192_usb_id_tbl[] = {
/* Realtek */
+ {USB_DEVICE(0x0bda, 0x8171)},
{USB_DEVICE(0x0bda, 0x8192)},
{USB_DEVICE(0x0bda, 0x8709)},
/* Corega */
{USB_DEVICE(0x07aa, 0x0043)},
/* Belkin */
{USB_DEVICE(0x050d, 0x805E)},
+ {USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */
/* Sitecom */
{USB_DEVICE(0x0df6, 0x0031)},
+ {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */
/* EnGenius */
{USB_DEVICE(0x1740, 0x9201)},
/* Dlink */
{USB_DEVICE(0x2001, 0x3301)},
/* Zinwell */
{USB_DEVICE(0x5a57, 0x0290)},
+ /* Guillemot */
+ {USB_DEVICE(0x06f8, 0xe031)},
//92SU
{USB_DEVICE(0x0bda, 0x8172)},
{}
file:6da1021e8a65edd1d9baad721ae3d50bc58f78da -> file:a2566f1075d557014e4bcd443fb02ed36202dc72
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *
{
struct usbip_task *eh = &ud->eh;
+ if (eh->thread == current)
+ return; /* do not wait for myself */
+
wait_for_completion(&eh->thread_done);
usbip_dbg_eh("usbip_eh has finished\n");
}
file:53450b48eaa6e615ae4626f7aa4192003bbc3447 -> file:269d1e2382b75d050e5bb17c89d09af97ac9a7fd
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1089,11 +1089,13 @@ device_found1(struct pci_dev *pcid, cons
}
//2008-07-21-01<Add>by MikeLiu
//register wpadev
+#if 0
if(wpa_set_wpadev(pDevice, 1)!=0) {
printk("Fail to Register WPADEV?\n");
unregister_netdev(pDevice->dev);
free_netdev(dev);
}
+#endif
device_print_info(pDevice);
pci_set_drvdata(pcid, pDevice);
return 0;
file:e4eca7810bcf1bed6821086e4bab6130525e3c17 -> file:0e64037a8de14d59e2c3454adbb1988443b66c60
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *a
{
wb->use = 0;
acm->transmitting--;
+ usb_autopm_put_interface_async(acm->control);
}
/*
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *a
}
dbg("%s susp_count: %d", __func__, acm->susp_count);
+ usb_autopm_get_interface_async(acm->control);
if (acm->susp_count) {
- acm->delayed_wb = wb;
- schedule_work(&acm->waker);
+ if (!acm->delayed_wb)
+ acm->delayed_wb = wb;
+ else
+ usb_autopm_put_interface_async(acm->control);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
@@ -534,23 +538,6 @@ static void acm_softint(struct work_stru
tty_kref_put(tty);
}
-static void acm_waker(struct work_struct *waker)
-{
- struct acm *acm = container_of(waker, struct acm, waker);
- int rv;
-
- rv = usb_autopm_get_interface(acm->control);
- if (rv < 0) {
- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
- return;
- }
- if (acm->delayed_wb) {
- acm_start_wb(acm, acm->delayed_wb);
- acm->delayed_wb = NULL;
- }
- usb_autopm_put_interface(acm->control);
-}
-
/*
* TTY handlers
*/
@@ -1178,7 +1165,6 @@ made_compressed_probe:
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
- INIT_WORK(&acm->waker, acm_waker);
init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
@@ -1215,7 +1201,7 @@ made_compressed_probe:
if (rcv->urb == NULL) {
dev_dbg(&intf->dev,
"out of memory (read urbs usb_alloc_urb)\n");
- goto alloc_fail7;
+ goto alloc_fail6;
}
rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -1239,7 +1225,7 @@ made_compressed_probe:
if (snd->urb == NULL) {
dev_dbg(&intf->dev,
"out of memory (write urbs usb_alloc_urb)");
- goto alloc_fail7;
+ goto alloc_fail8;
}
if (usb_endpoint_xfer_int(epwrite))
@@ -1278,6 +1264,7 @@ made_compressed_probe:
i = device_create_file(&intf->dev,
&dev_attr_iCountryCodeRelDate);
if (i < 0) {
+ device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
kfree(acm->country_codes);
goto skip_countries;
}
@@ -1314,6 +1301,7 @@ alloc_fail8:
usb_free_urb(acm->wb[i].urb);
alloc_fail7:
acm_read_buffers_free(acm);
+alloc_fail6:
for (i = 0; i < num_rx_buf; i++)
usb_free_urb(acm->ru[i].urb);
usb_free_urb(acm->ctrlurb);
@@ -1343,7 +1331,6 @@ static void stop_data_traffic(struct acm
tasklet_enable(&acm->urb_task);
cancel_work_sync(&acm->work);
- cancel_work_sync(&acm->waker);
}
static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1422,7 @@ static int acm_suspend(struct usb_interf
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
+ struct acm_wb *wb;
int rv = 0;
int cnt;
@@ -1449,6 +1437,21 @@ static int acm_resume(struct usb_interfa
mutex_lock(&acm->mutex);
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
+
+ spin_lock_irq(&acm->write_lock);
+ if (acm->delayed_wb) {
+ wb = acm->delayed_wb;
+ acm->delayed_wb = NULL;
+ spin_unlock_irq(&acm->write_lock);
+ acm_start_wb(acm, wb);
+ } else {
+ spin_unlock_irq(&acm->write_lock);
+ }
+
+ /*
+ * delayed error checking because we must
+ * do the write path at all cost
+ */
if (rv < 0)
goto err_out;
file:c4a0ee8ffccfb3ae4203c8cac4d091d88177b478 -> file:519eb638b6e960a8325d65deff2f7829932050f6
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@ struct acm {
struct mutex mutex;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
- struct work_struct waker;
wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
file:24120db005a2c41503d8ae8175560c38d57a92fb -> file:2f12e2da0e980cc112bad471ce3d877a44a1d426
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1176,6 +1176,13 @@ static int proc_do_submiturb(struct dev_
free_async(as);
return -ENOMEM;
}
+ /* Isochronous input data may end up being discontiguous
+ * if some of the packets are short. Clear the buffer so
+ * that the gaps don't leak kernel data to userspace.
+ */
+ if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
+ memset(as->urb->transfer_buffer, 0,
+ uurb->buffer_length);
}
as->urb->dev = ps->dev;
as->urb->pipe = (uurb->type << 30) |
@@ -1312,10 +1319,14 @@ static int processcompl(struct async *as
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer && urb->actual_length)
- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->actual_length))
+ if (as->userbuffer && urb->actual_length) {
+ if (urb->number_of_packets > 0) /* Isochronous */
+ i = urb->transfer_buffer_length;
+ else /* Non-Isoc */
+ i = urb->actual_length;
+ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
goto err_out;
+ }
if (put_user(as->status, &userurb->status))
goto err_out;
if (put_user(urb->actual_length, &userurb->actual_length))
file:4f864472c5c4db4eea4004cd49e902cb4acd75c9 -> file:d784a8b3a6bdb9d869ff2f20d63555bbfcb4f106
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -625,9 +625,6 @@ static int usb_uevent(struct device *dev
{
struct usb_device *usb_dev;
- /* driver is often null here; dev_dbg() would oops */
- pr_debug("usb %s: uevent\n", dev_name(dev));
-
if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
} else if (is_usb_interface(dev)) {
@@ -639,6 +636,7 @@ static int usb_uevent(struct device *dev
}
if (usb_dev->devnum < 0) {
+ /* driver is often null here; dev_dbg() would oops */
pr_debug("usb %s: already deleted?\n", dev_name(dev));
return -ENODEV;
}
@@ -1177,9 +1175,8 @@ static int usb_suspend_both(struct usb_d
udev->state == USB_STATE_SUSPENDED)
goto done;
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
-
if (msg.event & PM_EVENT_AUTO) {
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
status = autosuspend_check(udev, 0);
if (status < 0)
goto done;
@@ -1744,6 +1741,23 @@ int usb_external_resume_device(struct us
return status;
}
+static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
+{
+ /* Remote wakeup is needed only when we actually go to sleep.
+ * For things like FREEZE and QUIESCE, if the device is already
+ * autosuspended then its current wakeup setting is okay.
+ */
+ if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
+ udev->do_remote_wakeup = 0;
+ return;
+ }
+
+ /* Allow remote wakeup if it is enabled, even if no interface drivers
+ * actually want it.
+ */
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+}
+
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev;
@@ -1763,6 +1777,7 @@ int usb_suspend(struct device *dev, pm_m
}
udev->skip_sys_resume = 0;
+ choose_wakeup(udev, msg);
return usb_external_suspend_device(udev, msg);
}
file:05e6d313961e3e3ce9ed3a09126f80ba65f7daff -> file:1a78cd135aba22be68c6694a0d0ad2f1d655abdc
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_
* than a vendor-specific driver. */
else if (udev->descriptor.bDeviceClass !=
USB_CLASS_VENDOR_SPEC &&
- (!desc || desc->bInterfaceClass !=
+ (desc && desc->bInterfaceClass !=
USB_CLASS_VENDOR_SPEC)) {
best = c;
break;
file:34de475f016e790efbba02ada9067131870c294d -> file:24e620587b7634822e315d50c4c8275c63e73606
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -140,7 +140,7 @@ static const u8 usb3_rh_dev_descriptor[1
0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
- 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
+ 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
file:79782a1c43f6098d98ac008bb5591b8607911ca1 -> file:bcbe10476197b7e31bcdbe577518d58b4988bcea
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -234,7 +234,7 @@ struct hc_driver {
/* xHCI specific functions */
/* Called by usb_alloc_dev to alloc HC device structures */
int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
- /* Called by usb_release_dev to free HC device structures */
+ /* Called by usb_disconnect to free HC device structures */
void (*free_dev)(struct usb_hcd *, struct usb_device *);
/* Bandwidth computation functions */
file:1a7d54b7745e2c41feb57d3ba7b2329b9ba664f4 -> file:ed3aa7a91afe3f6ac30a14404af7dea9a2ba595c
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1508,6 +1508,15 @@ static inline void usb_stop_pm(struct us
#endif
+static void hub_free_dev(struct usb_device *udev)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ /* Root hubs aren't real devices, so don't free HCD resources */
+ if (hcd->driver->free_dev && udev->parent)
+ hcd->driver->free_dev(hcd, udev);
+}
+
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
@@ -1578,6 +1587,8 @@ void usb_disconnect(struct usb_device **
usb_stop_pm(udev);
+ hub_free_dev(udev);
+
put_device(&udev->dev);
}
@@ -3130,6 +3141,7 @@ loop_disable:
loop:
usb_ep0_reinit(udev);
release_address(udev);
+ hub_free_dev(udev);
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
file:97b40ce133f0a8b7165a742d885dd91656843bc1 -> file:4a6366a42129196a112216771f727eca8b643da4
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -515,13 +515,13 @@ static int fs_create_by_name (const char
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry)) {
+ if (!IS_ERR(*dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
error = usbfs_mkdir (parent->d_inode, *dentry, mode);
else
error = usbfs_create (parent->d_inode, *dentry, mode);
} else
- error = PTR_ERR(dentry);
+ error = PTR_ERR(*dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
file:ab93918d92076dedec3575b49e5eaacfb2c32e71 -> file:a61f1607f90e017d88bca76f9c27c9678f7689d5
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -41,6 +41,10 @@ static const struct usb_device_id usb_qu
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Artisman Watchdog Dongle */
+ { USB_DEVICE(0x04b4, 0x0526), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -64,6 +68,9 @@ static const struct usb_device_id usb_qu
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+ /* Broadcom BCM92035DGROM BT dongle */
+ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
file:52e5e31cdea76b224c14c72923c201a23d858c89 -> file:ab2d3e7c3f2ec4d2841c3715df559856de43e656
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -191,9 +191,6 @@ static void usb_release_dev(struct devic
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
- /* Root hubs aren't real devices, so don't free HCD resources */
- if (hcd->driver->free_dev && udev->parent)
- hcd->driver->free_dev(hcd, udev);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
file:e18c6773809f47f740988f96d66634a586d24cdb -> file:5aeabd8bda513a09388d96a3b2922d90d736903c
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd
*/
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
+ INIT_LIST_HEAD(&ehci->cached_sitd_list);
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
@@ -993,7 +994,7 @@ rescan:
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
- if (qh->hw->hw_info1 == 0) {
+ if (qh->hw == NULL) {
ehci_vdbg (ehci, "iso delay\n");
goto idle_timeout;
}
file:698f46135d5e8cd690eeff5176a70bf8554e09e2 -> file:6ac3976ef3b2a2be6b0d4d44f50cd5ecec052ff8
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -292,6 +292,16 @@ static int ehci_bus_resume (struct usb_h
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
+ /* clear phy low power mode before resume */
+ if (ehci->has_hostpc) {
+ u32 __iomem *hostpc_reg =
+ (u32 __iomem *)((u8 *)ehci->regs
+ + HOSTPC0 + 4 * (i & 0xff));
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
+ hostpc_reg);
+ mdelay(5);
+ }
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
if (test_bit(i, &ehci->bus_suspended) &&
@@ -676,6 +686,13 @@ static int ehci_hub_control (
if (temp & PORT_SUSPEND) {
if ((temp & PORT_PE) == 0)
goto error;
+ /* clear phy low power mode before resume */
+ if (hostpc_reg) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
+ hostpc_reg);
+ mdelay(5);
+ }
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
ehci_writel(ehci, temp | PORT_RESUME,
file:aeda96e0af67bd6eb43b3b607e2c7a3817da8c5d -> file:1f3f01eacaf09cecb88af40187910465cb49b0f3
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_q
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
if (ehci->async)
qh_put (ehci->async);
ehci->async = NULL;
file:a5535b5e3fe27860caebb3d50f228ea6d85fd1b8 -> file:6746a8a794d40a2d3030ecd3469a9081ad586e18
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1121,8 +1121,8 @@ iso_stream_find (struct ehci_hcd *ehci,
urb->interval);
}
- /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
- } else if (unlikely (stream->hw_info1 != 0)) {
+ /* if dev->ep [epnum] is a QH, hw is set */
+ } else if (unlikely (stream->hw != NULL)) {
ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
urb->dev->devpath, epnum,
usb_pipein(urb->pipe) ? "in" : "out");
@@ -1553,13 +1553,27 @@ itd_patch(
static inline void
itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
- /* always prepend ITD/SITD ... only QH tree is order-sensitive */
- itd->itd_next = ehci->pshadow [frame];
- itd->hw_next = ehci->periodic [frame];
- ehci->pshadow [frame].itd = itd;
+ union ehci_shadow *prev = &ehci->pshadow[frame];
+ __hc32 *hw_p = &ehci->periodic[frame];
+ union ehci_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(ehci, *hw_p);
+ if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(ehci, prev, type);
+ hw_p = shadow_next_periodic(ehci, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
itd->frame = frame;
wmb ();
- ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
+ *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
/* fit urb's itds into the selected schedule slot; activate as needed */
@@ -2113,13 +2127,27 @@ sitd_complete (
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
iso_stream_put (ehci, stream);
- /* OK to recycle this SITD now that its completion callback ran. */
+
done:
sitd->urb = NULL;
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
-
+ if (ehci->clock_frame != sitd->frame) {
+ /* OK to recycle this SITD now. */
+ sitd->stream = NULL;
+ list_move(&sitd->sitd_list, &stream->free_list);
+ iso_stream_put(ehci, stream);
+ } else {
+ /* HW might remember this SITD, so we can't recycle it yet.
+ * Move it to a safe place until a new frame starts.
+ */
+ list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
+ if (stream->refcount == 2) {
+ /* If iso_stream_put() were called here, stream
+ * would be freed. Instead, just prevent reuse.
+ */
+ stream->ep->hcpriv = NULL;
+ stream->ep = NULL;
+ }
+ }
return retval;
}
@@ -2185,9 +2213,10 @@ done:
/*-------------------------------------------------------------------------*/
-static void free_cached_itd_list(struct ehci_hcd *ehci)
+static void free_cached_lists(struct ehci_hcd *ehci)
{
struct ehci_itd *itd, *n;
+ struct ehci_sitd *sitd, *sn;
list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
struct ehci_iso_stream *stream = itd->stream;
@@ -2195,6 +2224,13 @@ static void free_cached_itd_list(struct
list_move(&itd->itd_list, &stream->free_list);
iso_stream_put(ehci, stream);
}
+
+ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+ struct ehci_iso_stream *stream = sitd->stream;
+ sitd->stream = NULL;
+ list_move(&sitd->sitd_list, &stream->free_list);
+ iso_stream_put(ehci, stream);
+ }
}
/*-------------------------------------------------------------------------*/
@@ -2221,7 +2257,7 @@ scan_periodic (struct ehci_hcd *ehci)
clock_frame = -1;
}
if (ehci->clock_frame != clock_frame) {
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
clock %= mod;
@@ -2384,7 +2420,7 @@ restart:
clock = now;
clock_frame = clock >> 3;
if (ehci->clock_frame != clock_frame) {
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
} else {
file:2d85e21ff282e9199be521fe630588cf8b3e01fb -> file:556c0b48f3abd73278206e5c1581eb3d32b73850
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controlle
int next_uframe; /* scan periodic, start here */
unsigned periodic_sched; /* periodic activity count */
- /* list of itds completed while clock_frame was still active */
+ /* list of itds & sitds completed while clock_frame was still active */
struct list_head cached_itd_list;
+ struct list_head cached_sitd_list;
unsigned clock_frame;
/* per root hub port */
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci
clear_bit (action, &ehci->actions);
}
-static void free_cached_itd_list(struct ehci_hcd *ehci);
+static void free_cached_lists(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/
@@ -394,9 +395,8 @@ struct ehci_iso_sched {
* acts like a qh would, if EHCI had them for ISO.
*/
struct ehci_iso_stream {
- /* first two fields match QH, but info1 == 0 */
- __hc32 hw_next;
- __hc32 hw_info1;
+ /* first field matches ehci_hq, but is NULL */
+ struct ehci_qh_hw *hw;
u32 refcount;
u8 bEndpointAddress;
file:32bbce9718f0626b98cc37e09b14faa51a5cc16f -> file:65cac8cc8921254718d835860a9ad2871d13b823
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -697,7 +697,7 @@ static int ohci_hub_control (
u16 wLength
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ports = hcd_to_bus (hcd)->root_hub->maxchild;
+ int ports = ohci->num_ports;
u32 temp;
int retval = 0;
file:100bf3d8437c12e7e1d68e090759c79c355aa5cb -> file:1f1d4fa6a778dca7f892da60b16b15b8db1a6395
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_pro
}
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c);
i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
out2:
clk_put(usb_clk);
out1:
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
out_i2c_driver:
i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct
pnx4008_unset_usb_bits();
clk_disable(usb_clk);
clk_put(usb_clk);
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
i2c_del_driver(&isp1301_driver);
file:9260c743baa6d66e8515f29621ec68c01ddb67ea -> file:e3548ee6681cb8a2196724c50cbc99e5ab104f3f
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66
/* this function must be called with interrupt disabled */
static void free_usb_address(struct r8a66597 *r8a66597,
- struct r8a66597_device *dev)
+ struct r8a66597_device *dev, int reset)
{
int port;
@@ -430,7 +430,13 @@ static void free_usb_address(struct r8a6
dev->state = USB_STATE_DEFAULT;
r8a66597->address_map &= ~(1 << dev->address);
dev->address = 0;
- dev_set_drvdata(&dev->udev->dev, NULL);
+ /*
+ * Only when resetting USB, it is necessary to erase drvdata. When
+ * a usb device with usb hub is disconnect, "dev->udev" is already
+ * freed on usb_desconnect(). So we cannot access the data.
+ */
+ if (reset)
+ dev_set_drvdata(&dev->udev->dev, NULL);
list_del(&dev->device_list);
kfree(dev);
@@ -1067,7 +1073,7 @@ static void r8a66597_usb_disconnect(stru
struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
disable_r8a66597_pipe_all(r8a66597, dev);
- free_usb_address(r8a66597, dev);
+ free_usb_address(r8a66597, dev, 0);
start_root_hub_sampling(r8a66597, port, 0);
}
@@ -2085,7 +2091,7 @@ static void update_usb_address_map(struc
spin_lock_irqsave(&r8a66597->lock, flags);
dev = get_r8a66597_device(r8a66597, addr);
disable_r8a66597_pipe_all(r8a66597, dev);
- free_usb_address(r8a66597, dev);
+ free_usb_address(r8a66597, dev, 0);
put_child_connect_map(r8a66597, addr);
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
@@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct u
rh->port |= (1 << USB_PORT_FEAT_RESET);
disable_r8a66597_pipe_all(r8a66597, dev);
- free_usb_address(r8a66597, dev);
+ free_usb_address(r8a66597, dev, 1);
r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
get_dvstctr_reg(port));
file:99cd00fd3514c7f25abb0b42977e14c61edbb6ca -> file:09197067fe6bc1032c4859fa557b5b15bb8be92c
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hc
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
+ synchronize_irq(hcd->irq);
del_timer_sync(&uhci->fsbr_timer);
release_uhci(uhci);
file:ecc131c3fe337a68788054dfc2dc695503b30a77 -> file:78c4edac1db14cf324cd90a039467d22f90ae53f
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_off
next = readl(base + ext_offset);
- if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
/* Find the first extended capability */
next = XHCI_HCC_EXT_CAPS(next);
- else
+ ext_offset = 0;
+ } else {
/* Find the next extended capability */
next = XHCI_EXT_CAPS_NEXT(next);
+ }
+
if (!next)
return 0;
/*
file:932f9993848175e476244fd23fa90303cc6b7d61 -> file:a24a92f7dbc0de64dc3e6384e014c762e4c96ca8
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -97,6 +97,33 @@ int xhci_halt(struct xhci_hcd *xhci)
}
/*
+ * Set the run bit and wait for the host to be running.
+ */
+int xhci_start(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ int ret;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_RUN);
+ xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+ /*
+ * Wait for the HCHalted Status bit to be 0 to indicate the host is
+ * running.
+ */
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, 0, XHCI_MAX_HALT_USEC);
+ if (ret == -ETIMEDOUT)
+ xhci_err(xhci, "Host took too long to start, "
+ "waited %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
+ return ret;
+}
+
+/*
* Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
*
* This resets pipelines, timers, counters, state machines, etc.
@@ -107,6 +134,7 @@ int xhci_reset(struct xhci_hcd *xhci)
{
u32 command;
u32 state;
+ int ret;
state = xhci_readl(xhci, &xhci->op_regs->status);
if ((state & STS_HALT) == 0) {
@@ -121,7 +149,17 @@ int xhci_reset(struct xhci_hcd *xhci)
/* XXX: Why does EHCI set this here? Shouldn't other code do this? */
xhci_to_hcd(xhci)->state = HC_STATE_HALT;
- return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
+ ret = handshake(xhci, &xhci->op_regs->command,
+ CMD_RESET, 0, 250 * 1000);
+ if (ret)
+ return ret;
+
+ xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
+ /*
+ * xHCI cannot write to any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+ return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
}
/*
@@ -460,13 +498,11 @@ int xhci_run(struct usb_hcd *hcd)
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);
- temp = xhci_readl(xhci, &xhci->op_regs->command);
- temp |= (CMD_RUN);
- xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
- temp);
- xhci_writel(xhci, temp, &xhci->op_regs->command);
- /* Flush PCI posted writes */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ if (xhci_start(xhci)) {
+ xhci_halt(xhci);
+ return -ENODEV;
+ }
+
xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
if (doorbell)
(*doorbell)(xhci);
@@ -1157,6 +1193,7 @@ static int xhci_configure_endpoint(struc
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
+ init_completion(cmd_completion);
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
@@ -1413,6 +1450,8 @@ void xhci_endpoint_reset(struct usb_hcd
kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
+ virt_ep->stopped_td = NULL;
+ virt_ep->stopped_trb = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
if (ret)
file:b8fd270a8b0d5f2630ed87dbcf6d24f8a566c86e -> file:dd71f02d5ff4e9b3868e9fb0decf7ed662bce169
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -496,6 +496,19 @@ static inline unsigned int xhci_get_endp
return EP_INTERVAL(interval);
}
+/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
+ return 0;
+ return ep->ss_ep_comp->desc.bmAttributes;
+}
+
static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
@@ -526,6 +539,36 @@ static inline u32 xhci_get_endpoint_type
return type;
}
+/* Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int max_burst;
+ int max_packet;
+
+ /* Only applies for interrupt or isochronous endpoints */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ return 0;
+
+ if (udev->speed == USB_SPEED_SUPER) {
+ if (ep->ss_ep_comp)
+ return ep->ss_ep_comp->desc.wBytesPerInterval;
+ xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
+ /* Assume no bursts, no multiple opportunities to send. */
+ return ep->desc.wMaxPacketSize;
+ }
+
+ max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ /* A 0 in max burst means 1 transfer per ESIT */
+ return max_packet * (max_burst + 1);
+}
+
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
@@ -537,6 +580,7 @@ int xhci_endpoint_init(struct xhci_hcd *
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
+ u32 max_esit_payload;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -550,6 +594,7 @@ int xhci_endpoint_init(struct xhci_hcd *
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+ ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
/* FIXME dig Mult and streams info out of ep companion desc */
@@ -595,6 +640,26 @@ int xhci_endpoint_init(struct xhci_hcd *
default:
BUG();
}
+ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
+
+ /*
+ * XXX no idea how to calculate the average TRB buffer length for bulk
+ * endpoints, as the driver gives us no clue how big each scatter gather
+ * list entry (or buffer) is going to be.
+ *
+ * For isochronous and interrupt endpoints, we set it to the max
+ * available, until we have new API in the USB core to allow drivers to
+ * declare how much bandwidth they actually need.
+ *
+ * Normally, it would be calculated by taking the total of the buffer
+ * lengths in the TD and then dividing by the number of TRBs in a TD,
+ * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
+ * use Event Data TRBs, and we don't chain in a link TRB on short
+ * transfers, we're basically dividing by 1.
+ */
+ ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
+
/* FIXME Debug endpoint context */
return 0;
}
file:821b7b4709de6531b28afb78c428ad4f5379ff81 -> file:317e8dc7c4f30513d61e511e8e486fe239928e29
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -241,10 +241,27 @@ static int room_on_ring(struct xhci_hcd
int i;
union xhci_trb *enq = ring->enqueue;
struct xhci_segment *enq_seg = ring->enq_seg;
+ struct xhci_segment *cur_seg;
+ unsigned int left_on_ring;
/* Check if ring is empty */
- if (enq == ring->dequeue)
+ if (enq == ring->dequeue) {
+ /* Can't use link trbs */
+ left_on_ring = TRBS_PER_SEGMENT - 1;
+ for (cur_seg = enq_seg->next; cur_seg != enq_seg;
+ cur_seg = cur_seg->next)
+ left_on_ring += TRBS_PER_SEGMENT - 1;
+
+ /* Always need one TRB free in the ring. */
+ left_on_ring -= 1;
+ if (num_trbs > left_on_ring) {
+ xhci_warn(xhci, "Not enough room on ring; "
+ "need %u TRBs, %u TRBs left\n",
+ num_trbs, left_on_ring);
+ return 0;
+ }
return 1;
+ }
/* Make sure there's an extra empty TRB available */
for (i = 0; i <= num_trbs; ++i) {
if (enq == ring->dequeue)
@@ -333,7 +350,8 @@ static struct xhci_segment *find_trb_seg
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK) &&
(generic_trb->field[3] & LINK_TOGGLE))
*cycle_state = ~(*cycle_state) & 0x1;
cur_seg = cur_seg->next;
@@ -389,7 +407,7 @@ void xhci_find_new_dequeue_state(struct
BUG();
trb = &state->new_deq_ptr->generic;
- if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -548,6 +566,8 @@ static void handle_stopped_endpoint(stru
/* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index);
}
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -1065,8 +1085,13 @@ static int handle_tx_event(struct xhci_h
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+
xhci_ring_cmd_db(xhci);
goto td_cleanup;
default:
@@ -1186,8 +1211,10 @@ static int handle_tx_event(struct xhci_h
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
- TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]);
}
file:4b254b6fa2456468731925199a69de85afd84602 -> file:db821e98210096b13f1728f8581ce414bba1c246
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -609,6 +609,10 @@ struct xhci_ep_ctx {
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
+/* tx_info bitmasks */
+#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
+#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
+
/**
* struct xhci_input_control_context
file:0025847743f30fc12500045e7980bd5568e4e83c -> file:be41ec35c655fb2610499416bf4fa4d3ce223532
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2435,7 +2435,8 @@ sisusb_open(struct inode *inode, struct
}
if (!sisusb->devinit) {
- if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) {
+ if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
+ sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
@@ -3167,7 +3168,7 @@ static int sisusb_probe(struct usb_inter
sisusb->present = 1;
- if (dev->speed == USB_SPEED_HIGH) {
+ if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
int initscreen = 1;
#ifdef INCL_SISUSB_CON
if (sisusb_first_vc > 0 &&
@@ -3245,6 +3246,7 @@ static struct usb_device_id sisusb_table
{ USB_DEVICE(0x0711, 0x0902) },
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
+ { USB_DEVICE(0x0711, 0x0920) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
file:bd254ec97d14be6d9b9e4db7e64bfbbca1f7d14c -> file:e3e087efa944dcea2971e3bf1256cd7d3763862e
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,8 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
@@ -72,9 +74,12 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
+ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
+ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
+ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
@@ -82,20 +87,24 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
{ USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
- { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -104,6 +113,7 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -114,6 +124,8 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
file:13a1b39f1590fdc96a16be67ca8cd5299797c0da -> file:8de85729b3ab2fe0c4da90da7ac8ac3e8e36975b
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -44,12 +44,13 @@
#include <linux/serial.h>
#include <linux/usb/serial.h>
#include "ftdi_sio.h"
+#include "ftdi_sio_ids.h"
/*
* Version Information
*/
#define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
static int debug;
@@ -144,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIR
+/*
+ * Device ID not listed? Test via module params product/vendor or
+ * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
+ */
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -551,9 +557,16 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
/*
- * Due to many user requests for multiple ELV devices we enable
- * them by default.
+ * ELV devices:
*/
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -570,11 +583,17 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -595,6 +614,7 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -638,6 +658,7 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
{ USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
{ USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
@@ -676,6 +697,7 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -696,6 +718,7 @@ static struct usb_device_id id_table_com
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
@@ -717,6 +740,18 @@ static struct usb_device_id id_table_com
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -2280,6 +2315,8 @@ static void ftdi_set_termios(struct tty_
"urb failed to set to rts/cts flow control\n");
}
+ /* raise DTR/RTS */
+ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
} else {
/*
* Xon/Xoff code
@@ -2327,6 +2364,8 @@ static void ftdi_set_termios(struct tty_
}
}
+ /* lower DTR/RTS */
+ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
return;
}
file:4586a24fafb020c5f128c6b385cc10d1611b887c -> file:b0e0d64f822e27b9bcf0c076d34e67ddecdebea5
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -1,7 +1,10 @@
/*
- * Definitions for the FTDI USB Single Port Serial Converter -
+ * Driver definitions for the FTDI USB Single Port Serial Converter -
* known as FTDI_SIO (Serial Input/Output application of the chipset)
*
+ * For USB vendor/product IDs (VID/PID), please see ftdi_sio_ids.h
+ *
+ *
* The example I have is known as the USC-1000 which is available from
* http://www.dse.co.nz - cat no XH4214 It looks similar to this:
* http://www.dansdata.com/usbser.htm but I can't be sure There are other
@@ -17,880 +20,7 @@
* Bill Ryder - bryder@sgi.com formerly of Silicon Graphics, Inc.- wrote the
* FTDI_SIO implementation.
*
- * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
- * from Rudolf Gugler
- *
- */
-
-#define FTDI_VID 0x0403 /* Vendor Id */
-#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
-#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
-#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
-#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
-#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
-#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
-#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
-#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
-#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
-#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */
-
-/* Larsen and Brusgaard AltiTrack/USBtrack */
-#define LARSENBRUSGAARD_VID 0x0FD8
-#define LB_ALTITRACK_PID 0x0001
-
-/* www.canusb.com Lawicel CANUSB device */
-#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
-
-/* AlphaMicro Components AMC-232USB01 device */
-#define FTDI_AMC232_PID 0xFF00 /* Product Id */
-
-/* www.candapter.com Ewert Energy Systems CANdapter device */
-#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
-
-/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
-/* the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
-#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
-#define FTDI_SCS_DEVICE_2_PID 0xD012
-#define FTDI_SCS_DEVICE_3_PID 0xD013
-#define FTDI_SCS_DEVICE_4_PID 0xD014
-#define FTDI_SCS_DEVICE_5_PID 0xD015
-#define FTDI_SCS_DEVICE_6_PID 0xD016
-#define FTDI_SCS_DEVICE_7_PID 0xD017
-
-/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
-#define FTDI_ACTZWAVE_PID 0xF2D0
-
-
-/* www.starting-point-systems.com µChameleon device */
-#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
-
-/* www.irtrans.de device */
-#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
-
-
-/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
-#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
-
-/* iPlus device */
-#define FTDI_IPLUS_PID 0xD070 /* Product Id */
-#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
-
-/* DMX4ALL DMX Interfaces */
-#define FTDI_DMX4ALL 0xC850
-
-/* OpenDCC (www.opendcc.de) product id */
-#define FTDI_OPENDCC_PID 0xBFD8
-#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
-#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
-#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
-
-/* Sprog II (Andrew Crosland's SprogII DCC interface) */
-#define FTDI_SPROG_II 0xF0C8
-
-/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
-/* they use the ftdi chipset for the USB interface and the vendor id is the same */
-#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
-#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
-#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
-#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
-#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
-#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
-#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
-#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
-
-/* Video Networks Limited / Homechoice in the UK use an ftdi-based device for their 1Mb */
-/* broadband internet service. The following PID is exhibited by the usb device supplied */
-/* (the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
-
-/*
- * PCDJ use ftdi based dj-controllers. The following PID is for their DAC-2 device
- * http://www.pcdjhardware.com/DAC2.asp (PID sent by Wouter Paesen)
- * (the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_PCDJ_DAC2_PID 0xFA88
-
-/*
- * The following are the values for the Matrix Orbital LCD displays,
- * which are the FT232BM ( similar to the 8U232AM )
- */
-#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
-
-/* OOCDlink by Joern Kaipf <joernk@web.de>
- * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
-#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
-
-/*
- * The following are the values for the Matrix Orbital FTDI Range
- * Anything in this range will use an FT232RL.
- */
-#define MTXORB_VID 0x1B3D
-#define MTXORB_FTDI_RANGE_0100_PID 0x0100
-#define MTXORB_FTDI_RANGE_0101_PID 0x0101
-#define MTXORB_FTDI_RANGE_0102_PID 0x0102
-#define MTXORB_FTDI_RANGE_0103_PID 0x0103
-#define MTXORB_FTDI_RANGE_0104_PID 0x0104
-#define MTXORB_FTDI_RANGE_0105_PID 0x0105
-#define MTXORB_FTDI_RANGE_0106_PID 0x0106
-#define MTXORB_FTDI_RANGE_0107_PID 0x0107
-#define MTXORB_FTDI_RANGE_0108_PID 0x0108
-#define MTXORB_FTDI_RANGE_0109_PID 0x0109
-#define MTXORB_FTDI_RANGE_010A_PID 0x010A
-#define MTXORB_FTDI_RANGE_010B_PID 0x010B
-#define MTXORB_FTDI_RANGE_010C_PID 0x010C
-#define MTXORB_FTDI_RANGE_010D_PID 0x010D
-#define MTXORB_FTDI_RANGE_010E_PID 0x010E
-#define MTXORB_FTDI_RANGE_010F_PID 0x010F
-#define MTXORB_FTDI_RANGE_0110_PID 0x0110
-#define MTXORB_FTDI_RANGE_0111_PID 0x0111
-#define MTXORB_FTDI_RANGE_0112_PID 0x0112
-#define MTXORB_FTDI_RANGE_0113_PID 0x0113
-#define MTXORB_FTDI_RANGE_0114_PID 0x0114
-#define MTXORB_FTDI_RANGE_0115_PID 0x0115
-#define MTXORB_FTDI_RANGE_0116_PID 0x0116
-#define MTXORB_FTDI_RANGE_0117_PID 0x0117
-#define MTXORB_FTDI_RANGE_0118_PID 0x0118
-#define MTXORB_FTDI_RANGE_0119_PID 0x0119
-#define MTXORB_FTDI_RANGE_011A_PID 0x011A
-#define MTXORB_FTDI_RANGE_011B_PID 0x011B
-#define MTXORB_FTDI_RANGE_011C_PID 0x011C
-#define MTXORB_FTDI_RANGE_011D_PID 0x011D
-#define MTXORB_FTDI_RANGE_011E_PID 0x011E
-#define MTXORB_FTDI_RANGE_011F_PID 0x011F
-#define MTXORB_FTDI_RANGE_0120_PID 0x0120
-#define MTXORB_FTDI_RANGE_0121_PID 0x0121
-#define MTXORB_FTDI_RANGE_0122_PID 0x0122
-#define MTXORB_FTDI_RANGE_0123_PID 0x0123
-#define MTXORB_FTDI_RANGE_0124_PID 0x0124
-#define MTXORB_FTDI_RANGE_0125_PID 0x0125
-#define MTXORB_FTDI_RANGE_0126_PID 0x0126
-#define MTXORB_FTDI_RANGE_0127_PID 0x0127
-#define MTXORB_FTDI_RANGE_0128_PID 0x0128
-#define MTXORB_FTDI_RANGE_0129_PID 0x0129
-#define MTXORB_FTDI_RANGE_012A_PID 0x012A
-#define MTXORB_FTDI_RANGE_012B_PID 0x012B
-#define MTXORB_FTDI_RANGE_012C_PID 0x012C
-#define MTXORB_FTDI_RANGE_012D_PID 0x012D
-#define MTXORB_FTDI_RANGE_012E_PID 0x012E
-#define MTXORB_FTDI_RANGE_012F_PID 0x012F
-#define MTXORB_FTDI_RANGE_0130_PID 0x0130
-#define MTXORB_FTDI_RANGE_0131_PID 0x0131
-#define MTXORB_FTDI_RANGE_0132_PID 0x0132
-#define MTXORB_FTDI_RANGE_0133_PID 0x0133
-#define MTXORB_FTDI_RANGE_0134_PID 0x0134
-#define MTXORB_FTDI_RANGE_0135_PID 0x0135
-#define MTXORB_FTDI_RANGE_0136_PID 0x0136
-#define MTXORB_FTDI_RANGE_0137_PID 0x0137
-#define MTXORB_FTDI_RANGE_0138_PID 0x0138
-#define MTXORB_FTDI_RANGE_0139_PID 0x0139
-#define MTXORB_FTDI_RANGE_013A_PID 0x013A
-#define MTXORB_FTDI_RANGE_013B_PID 0x013B
-#define MTXORB_FTDI_RANGE_013C_PID 0x013C
-#define MTXORB_FTDI_RANGE_013D_PID 0x013D
-#define MTXORB_FTDI_RANGE_013E_PID 0x013E
-#define MTXORB_FTDI_RANGE_013F_PID 0x013F
-#define MTXORB_FTDI_RANGE_0140_PID 0x0140
-#define MTXORB_FTDI_RANGE_0141_PID 0x0141
-#define MTXORB_FTDI_RANGE_0142_PID 0x0142
-#define MTXORB_FTDI_RANGE_0143_PID 0x0143
-#define MTXORB_FTDI_RANGE_0144_PID 0x0144
-#define MTXORB_FTDI_RANGE_0145_PID 0x0145
-#define MTXORB_FTDI_RANGE_0146_PID 0x0146
-#define MTXORB_FTDI_RANGE_0147_PID 0x0147
-#define MTXORB_FTDI_RANGE_0148_PID 0x0148
-#define MTXORB_FTDI_RANGE_0149_PID 0x0149
-#define MTXORB_FTDI_RANGE_014A_PID 0x014A
-#define MTXORB_FTDI_RANGE_014B_PID 0x014B
-#define MTXORB_FTDI_RANGE_014C_PID 0x014C
-#define MTXORB_FTDI_RANGE_014D_PID 0x014D
-#define MTXORB_FTDI_RANGE_014E_PID 0x014E
-#define MTXORB_FTDI_RANGE_014F_PID 0x014F
-#define MTXORB_FTDI_RANGE_0150_PID 0x0150
-#define MTXORB_FTDI_RANGE_0151_PID 0x0151
-#define MTXORB_FTDI_RANGE_0152_PID 0x0152
-#define MTXORB_FTDI_RANGE_0153_PID 0x0153
-#define MTXORB_FTDI_RANGE_0154_PID 0x0154
-#define MTXORB_FTDI_RANGE_0155_PID 0x0155
-#define MTXORB_FTDI_RANGE_0156_PID 0x0156
-#define MTXORB_FTDI_RANGE_0157_PID 0x0157
-#define MTXORB_FTDI_RANGE_0158_PID 0x0158
-#define MTXORB_FTDI_RANGE_0159_PID 0x0159
-#define MTXORB_FTDI_RANGE_015A_PID 0x015A
-#define MTXORB_FTDI_RANGE_015B_PID 0x015B
-#define MTXORB_FTDI_RANGE_015C_PID 0x015C
-#define MTXORB_FTDI_RANGE_015D_PID 0x015D
-#define MTXORB_FTDI_RANGE_015E_PID 0x015E
-#define MTXORB_FTDI_RANGE_015F_PID 0x015F
-#define MTXORB_FTDI_RANGE_0160_PID 0x0160
-#define MTXORB_FTDI_RANGE_0161_PID 0x0161
-#define MTXORB_FTDI_RANGE_0162_PID 0x0162
-#define MTXORB_FTDI_RANGE_0163_PID 0x0163
-#define MTXORB_FTDI_RANGE_0164_PID 0x0164
-#define MTXORB_FTDI_RANGE_0165_PID 0x0165
-#define MTXORB_FTDI_RANGE_0166_PID 0x0166
-#define MTXORB_FTDI_RANGE_0167_PID 0x0167
-#define MTXORB_FTDI_RANGE_0168_PID 0x0168
-#define MTXORB_FTDI_RANGE_0169_PID 0x0169
-#define MTXORB_FTDI_RANGE_016A_PID 0x016A
-#define MTXORB_FTDI_RANGE_016B_PID 0x016B
-#define MTXORB_FTDI_RANGE_016C_PID 0x016C
-#define MTXORB_FTDI_RANGE_016D_PID 0x016D
-#define MTXORB_FTDI_RANGE_016E_PID 0x016E
-#define MTXORB_FTDI_RANGE_016F_PID 0x016F
-#define MTXORB_FTDI_RANGE_0170_PID 0x0170
-#define MTXORB_FTDI_RANGE_0171_PID 0x0171
-#define MTXORB_FTDI_RANGE_0172_PID 0x0172
-#define MTXORB_FTDI_RANGE_0173_PID 0x0173
-#define MTXORB_FTDI_RANGE_0174_PID 0x0174
-#define MTXORB_FTDI_RANGE_0175_PID 0x0175
-#define MTXORB_FTDI_RANGE_0176_PID 0x0176
-#define MTXORB_FTDI_RANGE_0177_PID 0x0177
-#define MTXORB_FTDI_RANGE_0178_PID 0x0178
-#define MTXORB_FTDI_RANGE_0179_PID 0x0179
-#define MTXORB_FTDI_RANGE_017A_PID 0x017A
-#define MTXORB_FTDI_RANGE_017B_PID 0x017B
-#define MTXORB_FTDI_RANGE_017C_PID 0x017C
-#define MTXORB_FTDI_RANGE_017D_PID 0x017D
-#define MTXORB_FTDI_RANGE_017E_PID 0x017E
-#define MTXORB_FTDI_RANGE_017F_PID 0x017F
-#define MTXORB_FTDI_RANGE_0180_PID 0x0180
-#define MTXORB_FTDI_RANGE_0181_PID 0x0181
-#define MTXORB_FTDI_RANGE_0182_PID 0x0182
-#define MTXORB_FTDI_RANGE_0183_PID 0x0183
-#define MTXORB_FTDI_RANGE_0184_PID 0x0184
-#define MTXORB_FTDI_RANGE_0185_PID 0x0185
-#define MTXORB_FTDI_RANGE_0186_PID 0x0186
-#define MTXORB_FTDI_RANGE_0187_PID 0x0187
-#define MTXORB_FTDI_RANGE_0188_PID 0x0188
-#define MTXORB_FTDI_RANGE_0189_PID 0x0189
-#define MTXORB_FTDI_RANGE_018A_PID 0x018A
-#define MTXORB_FTDI_RANGE_018B_PID 0x018B
-#define MTXORB_FTDI_RANGE_018C_PID 0x018C
-#define MTXORB_FTDI_RANGE_018D_PID 0x018D
-#define MTXORB_FTDI_RANGE_018E_PID 0x018E
-#define MTXORB_FTDI_RANGE_018F_PID 0x018F
-#define MTXORB_FTDI_RANGE_0190_PID 0x0190
-#define MTXORB_FTDI_RANGE_0191_PID 0x0191
-#define MTXORB_FTDI_RANGE_0192_PID 0x0192
-#define MTXORB_FTDI_RANGE_0193_PID 0x0193
-#define MTXORB_FTDI_RANGE_0194_PID 0x0194
-#define MTXORB_FTDI_RANGE_0195_PID 0x0195
-#define MTXORB_FTDI_RANGE_0196_PID 0x0196
-#define MTXORB_FTDI_RANGE_0197_PID 0x0197
-#define MTXORB_FTDI_RANGE_0198_PID 0x0198
-#define MTXORB_FTDI_RANGE_0199_PID 0x0199
-#define MTXORB_FTDI_RANGE_019A_PID 0x019A
-#define MTXORB_FTDI_RANGE_019B_PID 0x019B
-#define MTXORB_FTDI_RANGE_019C_PID 0x019C
-#define MTXORB_FTDI_RANGE_019D_PID 0x019D
-#define MTXORB_FTDI_RANGE_019E_PID 0x019E
-#define MTXORB_FTDI_RANGE_019F_PID 0x019F
-#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
-#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
-#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
-#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
-#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
-#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
-#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
-#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
-#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
-#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
-#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
-#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
-#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
-#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
-#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
-#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
-#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
-#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
-#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
-#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
-#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
-#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
-#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
-#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
-#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
-#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
-#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
-#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
-#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
-#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
-#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
-#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
-#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
-#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
-#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
-#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
-#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
-#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
-#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
-#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
-#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
-#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
-#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
-#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
-#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
-#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
-#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
-#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
-#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
-#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
-#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
-#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
-#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
-#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
-#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
-#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
-#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
-#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
-#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
-#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
-#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
-#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
-#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
-#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
-#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
-#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
-#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
-#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
-#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
-#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
-#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
-#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
-#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
-#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
-#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
-#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
-#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
-#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
-#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
-#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
-#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
-#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
-#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
-#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
-#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
-#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
-#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
-#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
-#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
-#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
-#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
-#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
-#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
-#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
-#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
-#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
-
-
-
-/* Interbiometrics USB I/O Board */
-/* Developed for Interbiometrics by Rudolf Gugler */
-#define INTERBIOMETRICS_VID 0x1209
-#define INTERBIOMETRICS_IOBOARD_PID 0x1002
-#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
-
-/*
- * The following are the values for the Perle Systems
- * UltraPort USB serial converters
- */
-#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
-
-/*
- * The following are the values for the Sealevel SeaLINK+ adapters.
- * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and
- * removed some PIDs that don't seem to match any existing products.)
- */
-#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */
-#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */
-#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */
-#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */
-#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */
-#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */
-#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */
-#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */
-#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */
-#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */
-#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */
-#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */
-#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */
-#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */
-#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */
-#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */
-#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */
-#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */
-#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */
-#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */
-#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */
-#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */
-#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */
-#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */
-#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */
-#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */
-#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */
-#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */
-#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */
-#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */
-#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */
-#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */
-#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */
-#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */
-#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */
-#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */
-#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */
-#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */
-#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
-#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
-#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
-#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
-#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
-#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
-#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
-#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
-#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
-#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
-
-/*
- * The following are the values for two KOBIL chipcard terminals.
- */
-#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
-#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
-#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
-
-/*
- * Icom ID-1 digital transceiver
- */
-
-#define ICOM_ID1_VID 0x0C26
-#define ICOM_ID1_PID 0x0004
-
-/*
- * ASK.fr devices
- */
-#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
-
-/*
- * FTDI USB UART chips used in construction projects from the
- * Elektor Electronics magazine (http://elektor-electronics.co.uk)
- */
-#define ELEKTOR_VID 0x0C7D
-#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
-
-/*
- * DSS-20 Sync Station for Sony Ericsson P800
- */
-#define FTDI_DSS20_PID 0xFC82
-
-/*
- * Home Electronics (www.home-electro.com) USB gadgets
- */
-#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
-
-/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
-/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
-#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
-
-/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
-
-#define FTDI_TNC_X_PID 0xEBE0
-
-/*
- * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
- *
- * The previously included PID for the UO 100 module was incorrect.
- * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
- *
- * Armin Laeuger originally sent the PID for the UM 100 module.
- */
-#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG */
-#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
-#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
-#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
-#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */
-/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
- * MS Windows, rather than the FTDI Virtual Com Port drivers.
- * Maybe these will be easier to use with the libftdi/libusb user-space
- * drivers, or possibly the Comedi drivers in some cases. */
-#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
-#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
-#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
-#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
-#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
-#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
-#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
-#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */
-#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */
-#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */
-#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */
-#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */
-#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
-#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
-#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
-#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
-#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
-#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
-#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
-#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
-#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
-#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
-#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
-
-/*
- * Definitions for ID TECH (www.idt-net.com) devices
- */
-#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
-#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */
-
-/*
- * Definitions for Omnidirectional Control Technology, Inc. devices
- */
-#define OCT_VID 0x0B39 /* OCT vendor ID */
-/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
-/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
-/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
-#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
-
-/* an infrared receiver for user access control with IR tags */
-#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
-
-/*
- * Definitions for Artemis astronomical USB based cameras
- * Check it at http://www.artemisccd.co.uk/
- */
-#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */
-
-/*
- * Definitions for ATIK Instruments astronomical USB based cameras
- * Check it at http://www.atik-instruments.com/
- */
-#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */
-#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */
-#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */
-#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */
-#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */
-
-/*
- * Protego product ids
- */
-#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */
-#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */
-#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */
-#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
-
-/*
- * Gude Analog- und Digitalsysteme GmbH
- */
-#define FTDI_GUDEADS_E808_PID 0xE808
-#define FTDI_GUDEADS_E809_PID 0xE809
-#define FTDI_GUDEADS_E80A_PID 0xE80A
-#define FTDI_GUDEADS_E80B_PID 0xE80B
-#define FTDI_GUDEADS_E80C_PID 0xE80C
-#define FTDI_GUDEADS_E80D_PID 0xE80D
-#define FTDI_GUDEADS_E80E_PID 0xE80E
-#define FTDI_GUDEADS_E80F_PID 0xE80F
-#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */
-#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */
-#define FTDI_GUDEADS_E88A_PID 0xE88A
-#define FTDI_GUDEADS_E88B_PID 0xE88B
-#define FTDI_GUDEADS_E88C_PID 0xE88C
-#define FTDI_GUDEADS_E88D_PID 0xE88D
-#define FTDI_GUDEADS_E88E_PID 0xE88E
-#define FTDI_GUDEADS_E88F_PID 0xE88F
-
-/*
- * Linx Technologies product ids
- */
-#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
-#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
-#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
-#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
-#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
-
-/* CCS Inc. ICDU/ICDU40 product ID - the FT232BM is used in an in-circuit-debugger */
-/* unit for PIC16's/PIC18's */
-#define FTDI_CCSICDU20_0_PID 0xF9D0
-#define FTDI_CCSICDU40_1_PID 0xF9D1
-#define FTDI_CCSMACHX_2_PID 0xF9D2
-#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
-#define FTDI_CCSICDU64_4_PID 0xF9D4
-#define FTDI_CCSPRIME8_5_PID 0xF9D5
-
-/* Inside Accesso contactless reader (http://www.insidefr.com) */
-#define INSIDE_ACCESSO 0xFAD0
-
-/*
- * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
- */
-#define INTREPID_VID 0x093C
-#define INTREPID_VALUECAN_PID 0x0601
-#define INTREPID_NEOVI_PID 0x0701
-
-/*
- * Falcom Wireless Communications GmbH
- */
-#define FALCOM_VID 0x0F94 /* Vendor Id */
-#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
-#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
-
-/*
- * SUUNTO product ids
- */
-#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
-
-/*
- * Oceanic product ids
- */
-#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
-
-/*
- * TTi (Thurlby Thandar Instruments)
- */
-#define TTI_VID 0x103E /* Vendor Id */
-#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
-
-/*
- * Definitions for B&B Electronics products.
- */
-#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
-#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
-#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
-#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
-#define BANDB_USOPTL4_PID 0xAC11
-#define BANDB_USPTL4_PID 0xAC12
-#define BANDB_USO9ML2DR_2_PID 0xAC16
-#define BANDB_USO9ML2DR_PID 0xAC17
-#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
-#define BANDB_USOPTL4DR_PID 0xAC19
-#define BANDB_485USB9F_2W_PID 0xAC25
-#define BANDB_485USB9F_4W_PID 0xAC26
-#define BANDB_232USB9M_PID 0xAC27
-#define BANDB_485USBTB_2W_PID 0xAC33
-#define BANDB_485USBTB_4W_PID 0xAC34
-#define BANDB_TTL5USB9M_PID 0xAC49
-#define BANDB_TTL3USB9M_PID 0xAC50
-#define BANDB_ZZ_PROG1_USB_PID 0xBA02
-
-/*
- * RM Michaelides CANview USB (http://www.rmcan.com)
- * CAN fieldbus interface adapter, added by port GmbH www.port.de)
- * Ian Abbott changed the macro names for consistency.
- */
-#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */
-
-/*
- * EVER Eco Pro UPS (http://www.ever.com.pl/)
- */
-
-#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */
-
-/*
- * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
- * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
- * and I'm not entirely sure which are used by which.
- */
-#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
-#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
-
-/*
- * Mobility Electronics products.
- */
-#define MOBILITY_VID 0x1342
-#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */
-
-/*
- * microHAM product IDs (http://www.microham.com).
- * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
- * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
- * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
- */
-#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
-#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
-#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
-#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
-#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
-#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
-#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
-#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
-
-/*
- * Active Robots product ids.
- */
-#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */
-
-/*
- * Xsens Technologies BV products (http://www.xsens.com).
- */
-#define XSENS_CONVERTER_0_PID 0xD388
-#define XSENS_CONVERTER_1_PID 0xD389
-#define XSENS_CONVERTER_2_PID 0xD38A
-#define XSENS_CONVERTER_3_PID 0xD38B
-#define XSENS_CONVERTER_4_PID 0xD38C
-#define XSENS_CONVERTER_5_PID 0xD38D
-#define XSENS_CONVERTER_6_PID 0xD38E
-#define XSENS_CONVERTER_7_PID 0xD38F
-
-/*
- * Teratronik product ids.
- * Submitted by O. Wölfelschneider.
- */
-#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
-#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
-
-/*
- * Evolution Robotics products (http://www.evolution.com/).
- * Submitted by Shawn M. Lavelle.
- */
-#define EVOLUTION_VID 0xDEEE /* Vendor ID */
-#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
-#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
-#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
-#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
-
-/* Pyramid Computer GmbH */
-#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
-
-/*
- * NDI (www.ndigital.com) product ids
- */
-#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
-#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
-#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
-#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
-#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
-
-/*
- * Posiflex inc retail equipment (http://www.posiflex.com.tw)
- */
-#define POSIFLEX_VID 0x0d3a /* Vendor ID */
-#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */
-
-/*
- * Westrex International devices submitted by Cory Lee
- */
-#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
-#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
-
-/*
- * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
- */
-#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
-
-/*
- * Eclo (http://www.eclo.pt/) product IDs.
- * PID 0xEA90 submitted by Martin Grill.
- */
-#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */
-
-/*
- * Papouch products (http://www.papouch.com/)
- * Submitted by Folkert van Heusden
- */
-
-#define PAPOUCH_VID 0x5050 /* Vendor ID */
-#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
-#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
-
-/*
- * ACG Identification Technologies GmbH products (http://www.acg.de/).
- * Submitted by anton -at- goto10 -dot- org.
- */
-#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
-
-/*
- * Yost Engineering, Inc. products (www.yostengineering.com).
- * PID 0xE050 submitted by Aaron Prose.
- */
-#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */
-
-/*
- * ThorLabs USB motor drivers
- */
-#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */
-
-/*
- * Testo products (http://www.testo.com/)
- * Submitted by Colin Leroy
- */
-#define TESTO_VID 0x128D
-#define TESTO_USB_INTERFACE_PID 0x0001
-
-/*
- * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
- */
-#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
-
-/*
- * Tactrix OpenPort (ECU) devices.
- * OpenPort 1.3M submitted by Donour Sizemore.
- * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
- */
-#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */
-#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
-#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
-
-/*
- * Telldus Technologies
*/
-#define TELLDUS_VID 0x1781 /* Vendor ID */
-#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
-
-/*
- * IBS elektronik product ids
- * Submitted by Thomas Schleusener
- */
-#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */
-#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */
-#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */
-#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */
-#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */
-#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */
-#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */
-#define FTDI_IBS_PROD_PID 0xff3f /* future device */
-
-/*
- * MaxStream devices www.maxstream.net
- */
-#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */
-
-/* Olimex */
-#define OLIMEX_VID 0x15BA
-#define OLIMEX_ARM_USB_OCD_PID 0x0003
-
-/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
-/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
-#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
-#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
-
-/* www.elsterelectricity.com Elster Unicom III Optical Probe */
-#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
-
-/*
- * The Mobility Lab (TML)
- * Submitted by Pierre Castella
- */
-#define TML_VID 0x1B91 /* Vendor ID */
-#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
-
-/* Propox devices */
-#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
-
-/* Rig Expert Ukraine devices */
-#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
-
-/* Domintell products http://www.domintell.com */
-#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
-#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
-
-/* Alti-2 products http://www.alti-2.com */
-#define ALTI2_VID 0x1BC9
-#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/* Commands */
#define FTDI_SIO_RESET 0 /* Reset the port */
@@ -910,86 +40,6 @@
#define INTERFACE_C 3
#define INTERFACE_D 4
-/*
- * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
- * Submitted by Harald Welte <laforge@openmoko.org>
- */
-#define FIC_VID 0x1457
-#define FIC_NEO1973_DEBUG_PID 0x5118
-
-/*
- * RATOC REX-USB60F
- */
-#define RATOC_VENDOR_ID 0x0584
-#define RATOC_PRODUCT_ID_USB60F 0xb020
-
-/*
- * DIEBOLD BCS SE923
- */
-#define DIEBOLD_BCS_SE923_PID 0xfb99
-
-/*
- * Atmel STK541
- */
-#define ATMEL_VID 0x03eb /* Vendor ID */
-#define STK541_PID 0x2109 /* Zigbee Controller */
-
-/*
- * Dresden Elektronic Sensor Terminal Board
- */
-#define DE_VID 0x1cf1 /* Vendor ID */
-#define STB_PID 0x0001 /* Sensor Terminal Board */
-#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
-
-/*
- * Blackfin gnICE JTAG
- * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
- */
-#define ADI_VID 0x0456
-#define ADI_GNICE_PID 0xF000
-#define ADI_GNICEPLUS_PID 0xF001
-
-/*
- * JETI SPECTROMETER SPECBOS 1201
- * http://www.jeti.com/products/sys/scb/scb1201.php
- */
-#define JETI_VID 0x0c6c
-#define JETI_SPC1201_PID 0x04b2
-
-/*
- * Marvell SheevaPlug
- */
-#define MARVELL_VID 0x9e88
-#define MARVELL_SHEEVAPLUG_PID 0x9e8f
-
-#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
-
-/*
- * GN Otometrics (http://www.otometrics.com)
- * Submitted by Ville Sundberg.
- */
-#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
-#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
-
-/*
- * Bayer Ascensia Contour blood glucose meter USB-converter cable.
- * http://winglucofacts.com/cables/
- */
-#define BAYER_VID 0x1A79
-#define BAYER_CONTOUR_CABLE_PID 0x6001
-
-/*
- * Marvell OpenRD Base, Client
- * http://www.open-rd.org
- * OpenRD Base, Client use VID 0x0403
- */
-#define MARVELL_OPENRD_PID 0x9e90
-
-/*
- * Hameg HO820 and HO870 interface (using VID 0x0403)
- */
-#define HAMEG_HO820_PID 0xed74
-#define HAMEG_HO870_PID 0xed71
/*
* BmRequestType: 1100 0000b
@@ -1504,4 +554,3 @@ typedef enum {
* B2..7 Length of message - (not including Byte 0)
*
*/
-
file:af54af2a65f28e10d2956a21734586f7026fc29c(new)
--- /dev/null
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -0,0 +1,1041 @@
+/*
+ * vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
+ * Please keep numerically sorted within individual areas, thanks!
+ *
+ * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
+ * from Rudolf Gugler
+ *
+ */
+
+
+/**********************************/
+/***** devices using FTDI VID *****/
+/**********************************/
+
+
+#define FTDI_VID 0x0403 /* Vendor Id */
+
+
+/*** "original" FTDI device PIDs ***/
+
+#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
+#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
+#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
+#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
+#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
+
+
+/*** third-party PIDs (using FTDI_VID) ***/
+
+/*
+ * Marvell OpenRD Base, Client
+ * http://www.open-rd.org
+ * OpenRD Base, Client use VID 0x0403
+ */
+#define MARVELL_OPENRD_PID 0x9e90
+
+/* www.candapter.com Ewert Energy Systems CANdapter device */
+#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
+#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+
+/* OOCDlink by Joern Kaipf <joernk@web.de>
+ * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
+#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
+
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
+
+#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
+
+/* OpenDCC (www.opendcc.de) product id */
+#define FTDI_OPENDCC_PID 0xBFD8
+#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
+#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
+#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+
+/*
+ * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
+ */
+#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
+
+/* DMX4ALL DMX Interfaces */
+#define FTDI_DMX4ALL 0xC850
+
+/*
+ * ASK.fr devices
+ */
+#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
+
+/* www.starting-point-systems.com µChameleon device */
+#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
+
+/*
+ * Tactrix OpenPort (ECU) devices.
+ * OpenPort 1.3M submitted by Donour Sizemore.
+ * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
+ */
+#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */
+#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
+#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
+
+/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
+/* the VID is the standard ftdi vid (FTDI_VID) */
+#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
+#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
+#define FTDI_SCS_DEVICE_2_PID 0xD012
+#define FTDI_SCS_DEVICE_3_PID 0xD013
+#define FTDI_SCS_DEVICE_4_PID 0xD014
+#define FTDI_SCS_DEVICE_5_PID 0xD015
+#define FTDI_SCS_DEVICE_6_PID 0xD016
+#define FTDI_SCS_DEVICE_7_PID 0xD017
+
+/* iPlus device */
+#define FTDI_IPLUS_PID 0xD070 /* Product Id */
+#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
+
+/*
+ * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
+ */
+#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
+
+/* Propox devices */
+#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+
+/*
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+#define XSENS_CONVERTER_0_PID 0xD388
+#define XSENS_CONVERTER_1_PID 0xD389
+#define XSENS_CONVERTER_2_PID 0xD38A
+#define XSENS_CONVERTER_3_PID 0xD38B
+#define XSENS_CONVERTER_4_PID 0xD38C
+#define XSENS_CONVERTER_5_PID 0xD38D
+#define XSENS_CONVERTER_6_PID 0xD38E
+#define XSENS_CONVERTER_7_PID 0xD38F
+
+/*
+ * NDI (www.ndigital.com) product ids
+ */
+#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
+#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
+#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
+#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
+#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
+
+/*
+ * Westrex International devices submitted by Cory Lee
+ */
+#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
+#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
+
+/*
+ * ACG Identification Technologies GmbH products (http://www.acg.de/).
+ * Submitted by anton -at- goto10 -dot- org.
+ */
+#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
+
+/*
+ * Definitions for Artemis astronomical USB based cameras
+ * Check it at http://www.artemisccd.co.uk/
+ */
+#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */
+
+/*
+ * Definitions for ATIK Instruments astronomical USB based cameras
+ * Check it at http://www.atik-instruments.com/
+ */
+#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */
+#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */
+#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */
+#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */
+#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */
+
+/*
+ * Yost Engineering, Inc. products (www.yostengineering.com).
+ * PID 0xE050 submitted by Aaron Prose.
+ */
+#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */
+
+/*
+ * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+ * All of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
+ *
+ * The previously included PID for the UO 100 module was incorrect.
+ * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
+ *
+ * Armin Laeuger originally sent the PID for the UM 100 module.
+ */
+#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
+#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
+#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
+#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
+#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
+#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
+#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
+#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
+#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
+#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
+#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
+#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
+#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
+#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
+#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
+#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
+#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
+#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
+#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
+#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
+#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
+#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
+#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
+#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
+#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */
+#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */
+#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */
+#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */
+#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */
+#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */
+#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
+#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
+#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
+/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
+ * MS Windows, rather than the FTDI Virtual Com Port drivers.
+ * Maybe these will be easier to use with the libftdi/libusb user-space
+ * drivers, or possibly the Comedi drivers in some cases. */
+#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
+#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
+#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
+#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
+#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
+
+/*
+ * EVER Eco Pro UPS (http://www.ever.com.pl/)
+ */
+
+#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */
+
+/*
+ * Active Robots product ids.
+ */
+#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */
+
+/* Pyramid Computer GmbH */
+#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
+
+/* www.elsterelectricity.com Elster Unicom III Optical Probe */
+#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
+
+/*
+ * Gude Analog- und Digitalsysteme GmbH
+ */
+#define FTDI_GUDEADS_E808_PID 0xE808
+#define FTDI_GUDEADS_E809_PID 0xE809
+#define FTDI_GUDEADS_E80A_PID 0xE80A
+#define FTDI_GUDEADS_E80B_PID 0xE80B
+#define FTDI_GUDEADS_E80C_PID 0xE80C
+#define FTDI_GUDEADS_E80D_PID 0xE80D
+#define FTDI_GUDEADS_E80E_PID 0xE80E
+#define FTDI_GUDEADS_E80F_PID 0xE80F
+#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */
+#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */
+#define FTDI_GUDEADS_E88A_PID 0xE88A
+#define FTDI_GUDEADS_E88B_PID 0xE88B
+#define FTDI_GUDEADS_E88C_PID 0xE88C
+#define FTDI_GUDEADS_E88D_PID 0xE88D
+#define FTDI_GUDEADS_E88E_PID 0xE88E
+#define FTDI_GUDEADS_E88F_PID 0xE88F
+
+/*
+ * Eclo (http://www.eclo.pt/) product IDs.
+ * PID 0xEA90 submitted by Martin Grill.
+ */
+#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */
+
+/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
+#define FTDI_TNC_X_PID 0xEBE0
+
+/*
+ * Teratronik product ids.
+ * Submitted by O. Wölfelschneider.
+ */
+#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
+#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
+
+/* Rig Expert Ukraine devices */
+#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
+
+/*
+ * Hameg HO820 and HO870 interface (using VID 0x0403)
+ */
+#define HAMEG_HO820_PID 0xed74
+#define HAMEG_HO870_PID 0xed71
+
+/*
+ * MaxStream devices www.maxstream.net
+ */
+#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */
+
+/*
+ * microHAM product IDs (http://www.microham.com).
+ * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
+ * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
+ * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
+ */
+#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
+#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
+#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
+#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
+#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
+#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
+#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
+#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
+
+/* Domintell products http://www.domintell.com */
+#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
+#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
+
+/*
+ * The following are the values for the Perle Systems
+ * UltraPort USB serial converters
+ */
+#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
+
+/* Sprog II (Andrew Crosland's SprogII DCC interface) */
+#define FTDI_SPROG_II 0xF0C8
+
+/* an infrared receiver for user access control with IR tags */
+#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
+
+/* ACT Solutions HomePro ZWave interface
+ (http://www.act-solutions.com/HomePro.htm) */
+#define FTDI_ACTZWAVE_PID 0xF2D0
+
+/*
+ * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
+ * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
+ * and I'm not entirely sure which are used by which.
+ */
+#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
+#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
+
+/*
+ * Linx Technologies product ids
+ */
+#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
+#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
+#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
+#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
+#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
+
+/*
+ * Oceanic product ids
+ */
+#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
+
+/*
+ * SUUNTO product ids
+ */
+#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
+
+/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
+/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
+#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
+
+/* CCS Inc. ICDU/ICDU40 product ID -
+ * the FT232BM is used in an in-circuit-debugger unit for PIC16's/PIC18's */
+#define FTDI_CCSICDU20_0_PID 0xF9D0
+#define FTDI_CCSICDU40_1_PID 0xF9D1
+#define FTDI_CCSMACHX_2_PID 0xF9D2
+#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
+#define FTDI_CCSICDU64_4_PID 0xF9D4
+#define FTDI_CCSPRIME8_5_PID 0xF9D5
+
+/*
+ * The following are the values for the Matrix Orbital LCD displays,
+ * which are the FT232BM ( similar to the 8U232AM )
+ */
+#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
+
+/*
+ * Home Electronics (www.home-electro.com) USB gadgets
+ */
+#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
+
+/* Inside Accesso contactless reader (http://www.insidefr.com) */
+#define INSIDE_ACCESSO 0xFAD0
+
+/*
+ * ThorLabs USB motor drivers
+ */
+#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */
+
+/*
+ * Protego product ids
+ */
+#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */
+#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */
+#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */
+#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
+
+/*
+ * DSS-20 Sync Station for Sony Ericsson P800
+ */
+#define FTDI_DSS20_PID 0xFC82
+
+/* www.irtrans.de device */
+#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
+
+/*
+ * RM Michaelides CANview USB (http://www.rmcan.com) (FTDI_VID)
+ * CAN fieldbus interface adapter, added by port GmbH www.port.de)
+ * Ian Abbott changed the macro names for consistency.
+ */
+#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */
+/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
+#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
+
+#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 (FTDI_VID) */
+
+#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
+
+/*
+ * PCDJ use ftdi based dj-controllers. The following PID is
+ * for their DAC-2 device http://www.pcdjhardware.com/DAC2.asp
+ * (the VID is the standard ftdi vid (FTDI_VID), PID sent by Wouter Paesen)
+ */
+#define FTDI_PCDJ_DAC2_PID 0xFA88
+
+#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG (FTDI_VID) */
+
+/*
+ * DIEBOLD BCS SE923 (FTDI_VID)
+ */
+#define DIEBOLD_BCS_SE923_PID 0xfb99
+
+/* www.crystalfontz.com devices
+ * - thanx for providing free devices for evaluation !
+ * they use the ftdi chipset for the USB interface
+ * and the vendor id is the same
+ */
+#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
+#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
+#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
+#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
+#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
+#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
+#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
+#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
+
+/*
+ * Video Networks Limited / Homechoice in the UK use an ftdi-based device
+ * for their 1Mb broadband internet service. The following PID is exhibited
+ * by the usb device supplied (the VID is the standard ftdi vid (FTDI_VID)
+ */
+#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
+
+/* AlphaMicro Components AMC-232USB01 device (FTDI_VID) */
+#define FTDI_AMC232_PID 0xFF00 /* Product Id */
+
+/*
+ * IBS elektronik product ids (FTDI_VID)
+ * Submitted by Thomas Schleusener
+ */
+#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */
+#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */
+#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */
+#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */
+#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */
+#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */
+#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */
+#define FTDI_IBS_PROD_PID 0xff3f /* future device */
+/* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
+#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
+
+
+
+/********************************/
+/** third-party VID/PID combos **/
+/********************************/
+
+
+
+/*
+ * Atmel STK541
+ */
+#define ATMEL_VID 0x03eb /* Vendor ID */
+#define STK541_PID 0x2109 /* Zigbee Controller */
+
+/*
+ * Blackfin gnICE JTAG
+ * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
+ */
+#define ADI_VID 0x0456
+#define ADI_GNICE_PID 0xF000
+#define ADI_GNICEPLUS_PID 0xF001
+
+/*
+ * RATOC REX-USB60F
+ */
+#define RATOC_VENDOR_ID 0x0584
+#define RATOC_PRODUCT_ID_USB60F 0xb020
+
+/*
+ * Contec products (http://www.contec.com)
+ * Submitted by Daniel Sangorrin
+ */
+#define CONTEC_VID 0x06CE /* Vendor ID */
+#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+/*
+ * Contec products (http://www.contec.com)
+ * Submitted by Daniel Sangorrin
+ */
+#define CONTEC_VID 0x06CE /* Vendor ID */
+#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+/*
+ * Definitions for B&B Electronics products.
+ */
+#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
+#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
+#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
+#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
+#define BANDB_USOPTL4_PID 0xAC11
+#define BANDB_USPTL4_PID 0xAC12
+#define BANDB_USO9ML2DR_2_PID 0xAC16
+#define BANDB_USO9ML2DR_PID 0xAC17
+#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
+#define BANDB_USOPTL4DR_PID 0xAC19
+#define BANDB_485USB9F_2W_PID 0xAC25
+#define BANDB_485USB9F_4W_PID 0xAC26
+#define BANDB_232USB9M_PID 0xAC27
+#define BANDB_485USBTB_2W_PID 0xAC33
+#define BANDB_485USBTB_4W_PID 0xAC34
+#define BANDB_TTL5USB9M_PID 0xAC49
+#define BANDB_TTL3USB9M_PID 0xAC50
+#define BANDB_ZZ_PROG1_USB_PID 0xBA02
+
+/*
+ * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
+ */
+#define INTREPID_VID 0x093C
+#define INTREPID_VALUECAN_PID 0x0601
+#define INTREPID_NEOVI_PID 0x0701
+
+/*
+ * Definitions for ID TECH (www.idt-net.com) devices
+ */
+#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
+#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */
+
+/*
+ * Definitions for Omnidirectional Control Technology, Inc. devices
+ */
+#define OCT_VID 0x0B39 /* OCT vendor ID */
+/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
+/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
+/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
+#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
+
+/*
+ * Icom ID-1 digital transceiver
+ */
+
+#define ICOM_ID1_VID 0x0C26
+#define ICOM_ID1_PID 0x0004
+
+/*
+ * GN Otometrics (http://www.otometrics.com)
+ * Submitted by Ville Sundberg.
+ */
+#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
+#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
+
+/*
+ * The following are the values for the Sealevel SeaLINK+ adapters.
+ * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and
+ * removed some PIDs that don't seem to match any existing products.)
+ */
+#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */
+#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */
+#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */
+#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */
+#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */
+#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */
+#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */
+#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */
+#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */
+#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */
+#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */
+#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */
+#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */
+#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */
+#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */
+#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */
+#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */
+#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */
+#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */
+#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */
+#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */
+#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */
+#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */
+#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */
+#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */
+#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */
+#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */
+#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */
+#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */
+#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */
+#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */
+#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */
+#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */
+#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */
+#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */
+#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */
+#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */
+#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */
+#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
+#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
+#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
+#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
+#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
+#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
+#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
+#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
+#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
+#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
+
+/*
+ * JETI SPECTROMETER SPECBOS 1201
+ * http://www.jeti.com/products/sys/scb/scb1201.php
+ */
+#define JETI_VID 0x0c6c
+#define JETI_SPC1201_PID 0x04b2
+
+/*
+ * FTDI USB UART chips used in construction projects from the
+ * Elektor Electronics magazine (http://elektor-electronics.co.uk)
+ */
+#define ELEKTOR_VID 0x0C7D
+#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
+
+/*
+ * Posiflex inc retail equipment (http://www.posiflex.com.tw)
+ */
+#define POSIFLEX_VID 0x0d3a /* Vendor ID */
+#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */
+
+/*
+ * The following are the values for two KOBIL chipcard terminals.
+ */
+#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
+#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
+#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
+
+#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
+#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
+
+/*
+ * Falcom Wireless Communications GmbH
+ */
+#define FALCOM_VID 0x0F94 /* Vendor Id */
+#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
+#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
+
+/* Larsen and Brusgaard AltiTrack/USBtrack */
+#define LARSENBRUSGAARD_VID 0x0FD8
+#define LB_ALTITRACK_PID 0x0001
+
+/*
+ * TTi (Thurlby Thandar Instruments)
+ */
+#define TTI_VID 0x103E /* Vendor Id */
+#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
+
+/* Interbiometrics USB I/O Board */
+/* Developed for Interbiometrics by Rudolf Gugler */
+#define INTERBIOMETRICS_VID 0x1209
+#define INTERBIOMETRICS_IOBOARD_PID 0x1002
+#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
+
+/*
+ * Testo products (http://www.testo.com/)
+ * Submitted by Colin Leroy
+ */
+#define TESTO_VID 0x128D
+#define TESTO_USB_INTERFACE_PID 0x0001
+
+/*
+ * Mobility Electronics products.
+ */
+#define MOBILITY_VID 0x1342
+#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */
+
+/*
+ * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
+ * Submitted by Harald Welte <laforge@openmoko.org>
+ */
+#define FIC_VID 0x1457
+#define FIC_NEO1973_DEBUG_PID 0x5118
+
+/* Olimex */
+#define OLIMEX_VID 0x15BA
+#define OLIMEX_ARM_USB_OCD_PID 0x0003
+
+/*
+ * Telldus Technologies
+ */
+#define TELLDUS_VID 0x1781 /* Vendor ID */
+#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
+
+/*
+ * RT Systems programming cables for various ham radios
+ */
+#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+
+/*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+ */
+#define BAYER_VID 0x1A79
+#define BAYER_CONTOUR_CABLE_PID 0x6001
+
+/*
+ * The following are the values for the Matrix Orbital FTDI Range
+ * Anything in this range will use an FT232RL.
+ */
+#define MTXORB_VID 0x1B3D
+#define MTXORB_FTDI_RANGE_0100_PID 0x0100
+#define MTXORB_FTDI_RANGE_0101_PID 0x0101
+#define MTXORB_FTDI_RANGE_0102_PID 0x0102
+#define MTXORB_FTDI_RANGE_0103_PID 0x0103
+#define MTXORB_FTDI_RANGE_0104_PID 0x0104
+#define MTXORB_FTDI_RANGE_0105_PID 0x0105
+#define MTXORB_FTDI_RANGE_0106_PID 0x0106
+#define MTXORB_FTDI_RANGE_0107_PID 0x0107
+#define MTXORB_FTDI_RANGE_0108_PID 0x0108
+#define MTXORB_FTDI_RANGE_0109_PID 0x0109
+#define MTXORB_FTDI_RANGE_010A_PID 0x010A
+#define MTXORB_FTDI_RANGE_010B_PID 0x010B
+#define MTXORB_FTDI_RANGE_010C_PID 0x010C
+#define MTXORB_FTDI_RANGE_010D_PID 0x010D
+#define MTXORB_FTDI_RANGE_010E_PID 0x010E
+#define MTXORB_FTDI_RANGE_010F_PID 0x010F
+#define MTXORB_FTDI_RANGE_0110_PID 0x0110
+#define MTXORB_FTDI_RANGE_0111_PID 0x0111
+#define MTXORB_FTDI_RANGE_0112_PID 0x0112
+#define MTXORB_FTDI_RANGE_0113_PID 0x0113
+#define MTXORB_FTDI_RANGE_0114_PID 0x0114
+#define MTXORB_FTDI_RANGE_0115_PID 0x0115
+#define MTXORB_FTDI_RANGE_0116_PID 0x0116
+#define MTXORB_FTDI_RANGE_0117_PID 0x0117
+#define MTXORB_FTDI_RANGE_0118_PID 0x0118
+#define MTXORB_FTDI_RANGE_0119_PID 0x0119
+#define MTXORB_FTDI_RANGE_011A_PID 0x011A
+#define MTXORB_FTDI_RANGE_011B_PID 0x011B
+#define MTXORB_FTDI_RANGE_011C_PID 0x011C
+#define MTXORB_FTDI_RANGE_011D_PID 0x011D
+#define MTXORB_FTDI_RANGE_011E_PID 0x011E
+#define MTXORB_FTDI_RANGE_011F_PID 0x011F
+#define MTXORB_FTDI_RANGE_0120_PID 0x0120
+#define MTXORB_FTDI_RANGE_0121_PID 0x0121
+#define MTXORB_FTDI_RANGE_0122_PID 0x0122
+#define MTXORB_FTDI_RANGE_0123_PID 0x0123
+#define MTXORB_FTDI_RANGE_0124_PID 0x0124
+#define MTXORB_FTDI_RANGE_0125_PID 0x0125
+#define MTXORB_FTDI_RANGE_0126_PID 0x0126
+#define MTXORB_FTDI_RANGE_0127_PID 0x0127
+#define MTXORB_FTDI_RANGE_0128_PID 0x0128
+#define MTXORB_FTDI_RANGE_0129_PID 0x0129
+#define MTXORB_FTDI_RANGE_012A_PID 0x012A
+#define MTXORB_FTDI_RANGE_012B_PID 0x012B
+#define MTXORB_FTDI_RANGE_012C_PID 0x012C
+#define MTXORB_FTDI_RANGE_012D_PID 0x012D
+#define MTXORB_FTDI_RANGE_012E_PID 0x012E
+#define MTXORB_FTDI_RANGE_012F_PID 0x012F
+#define MTXORB_FTDI_RANGE_0130_PID 0x0130
+#define MTXORB_FTDI_RANGE_0131_PID 0x0131
+#define MTXORB_FTDI_RANGE_0132_PID 0x0132
+#define MTXORB_FTDI_RANGE_0133_PID 0x0133
+#define MTXORB_FTDI_RANGE_0134_PID 0x0134
+#define MTXORB_FTDI_RANGE_0135_PID 0x0135
+#define MTXORB_FTDI_RANGE_0136_PID 0x0136
+#define MTXORB_FTDI_RANGE_0137_PID 0x0137
+#define MTXORB_FTDI_RANGE_0138_PID 0x0138
+#define MTXORB_FTDI_RANGE_0139_PID 0x0139
+#define MTXORB_FTDI_RANGE_013A_PID 0x013A
+#define MTXORB_FTDI_RANGE_013B_PID 0x013B
+#define MTXORB_FTDI_RANGE_013C_PID 0x013C
+#define MTXORB_FTDI_RANGE_013D_PID 0x013D
+#define MTXORB_FTDI_RANGE_013E_PID 0x013E
+#define MTXORB_FTDI_RANGE_013F_PID 0x013F
+#define MTXORB_FTDI_RANGE_0140_PID 0x0140
+#define MTXORB_FTDI_RANGE_0141_PID 0x0141
+#define MTXORB_FTDI_RANGE_0142_PID 0x0142
+#define MTXORB_FTDI_RANGE_0143_PID 0x0143
+#define MTXORB_FTDI_RANGE_0144_PID 0x0144
+#define MTXORB_FTDI_RANGE_0145_PID 0x0145
+#define MTXORB_FTDI_RANGE_0146_PID 0x0146
+#define MTXORB_FTDI_RANGE_0147_PID 0x0147
+#define MTXORB_FTDI_RANGE_0148_PID 0x0148
+#define MTXORB_FTDI_RANGE_0149_PID 0x0149
+#define MTXORB_FTDI_RANGE_014A_PID 0x014A
+#define MTXORB_FTDI_RANGE_014B_PID 0x014B
+#define MTXORB_FTDI_RANGE_014C_PID 0x014C
+#define MTXORB_FTDI_RANGE_014D_PID 0x014D
+#define MTXORB_FTDI_RANGE_014E_PID 0x014E
+#define MTXORB_FTDI_RANGE_014F_PID 0x014F
+#define MTXORB_FTDI_RANGE_0150_PID 0x0150
+#define MTXORB_FTDI_RANGE_0151_PID 0x0151
+#define MTXORB_FTDI_RANGE_0152_PID 0x0152
+#define MTXORB_FTDI_RANGE_0153_PID 0x0153
+#define MTXORB_FTDI_RANGE_0154_PID 0x0154
+#define MTXORB_FTDI_RANGE_0155_PID 0x0155
+#define MTXORB_FTDI_RANGE_0156_PID 0x0156
+#define MTXORB_FTDI_RANGE_0157_PID 0x0157
+#define MTXORB_FTDI_RANGE_0158_PID 0x0158
+#define MTXORB_FTDI_RANGE_0159_PID 0x0159
+#define MTXORB_FTDI_RANGE_015A_PID 0x015A
+#define MTXORB_FTDI_RANGE_015B_PID 0x015B
+#define MTXORB_FTDI_RANGE_015C_PID 0x015C
+#define MTXORB_FTDI_RANGE_015D_PID 0x015D
+#define MTXORB_FTDI_RANGE_015E_PID 0x015E
+#define MTXORB_FTDI_RANGE_015F_PID 0x015F
+#define MTXORB_FTDI_RANGE_0160_PID 0x0160
+#define MTXORB_FTDI_RANGE_0161_PID 0x0161
+#define MTXORB_FTDI_RANGE_0162_PID 0x0162
+#define MTXORB_FTDI_RANGE_0163_PID 0x0163
+#define MTXORB_FTDI_RANGE_0164_PID 0x0164
+#define MTXORB_FTDI_RANGE_0165_PID 0x0165
+#define MTXORB_FTDI_RANGE_0166_PID 0x0166
+#define MTXORB_FTDI_RANGE_0167_PID 0x0167
+#define MTXORB_FTDI_RANGE_0168_PID 0x0168
+#define MTXORB_FTDI_RANGE_0169_PID 0x0169
+#define MTXORB_FTDI_RANGE_016A_PID 0x016A
+#define MTXORB_FTDI_RANGE_016B_PID 0x016B
+#define MTXORB_FTDI_RANGE_016C_PID 0x016C
+#define MTXORB_FTDI_RANGE_016D_PID 0x016D
+#define MTXORB_FTDI_RANGE_016E_PID 0x016E
+#define MTXORB_FTDI_RANGE_016F_PID 0x016F
+#define MTXORB_FTDI_RANGE_0170_PID 0x0170
+#define MTXORB_FTDI_RANGE_0171_PID 0x0171
+#define MTXORB_FTDI_RANGE_0172_PID 0x0172
+#define MTXORB_FTDI_RANGE_0173_PID 0x0173
+#define MTXORB_FTDI_RANGE_0174_PID 0x0174
+#define MTXORB_FTDI_RANGE_0175_PID 0x0175
+#define MTXORB_FTDI_RANGE_0176_PID 0x0176
+#define MTXORB_FTDI_RANGE_0177_PID 0x0177
+#define MTXORB_FTDI_RANGE_0178_PID 0x0178
+#define MTXORB_FTDI_RANGE_0179_PID 0x0179
+#define MTXORB_FTDI_RANGE_017A_PID 0x017A
+#define MTXORB_FTDI_RANGE_017B_PID 0x017B
+#define MTXORB_FTDI_RANGE_017C_PID 0x017C
+#define MTXORB_FTDI_RANGE_017D_PID 0x017D
+#define MTXORB_FTDI_RANGE_017E_PID 0x017E
+#define MTXORB_FTDI_RANGE_017F_PID 0x017F
+#define MTXORB_FTDI_RANGE_0180_PID 0x0180
+#define MTXORB_FTDI_RANGE_0181_PID 0x0181
+#define MTXORB_FTDI_RANGE_0182_PID 0x0182
+#define MTXORB_FTDI_RANGE_0183_PID 0x0183
+#define MTXORB_FTDI_RANGE_0184_PID 0x0184
+#define MTXORB_FTDI_RANGE_0185_PID 0x0185
+#define MTXORB_FTDI_RANGE_0186_PID 0x0186
+#define MTXORB_FTDI_RANGE_0187_PID 0x0187
+#define MTXORB_FTDI_RANGE_0188_PID 0x0188
+#define MTXORB_FTDI_RANGE_0189_PID 0x0189
+#define MTXORB_FTDI_RANGE_018A_PID 0x018A
+#define MTXORB_FTDI_RANGE_018B_PID 0x018B
+#define MTXORB_FTDI_RANGE_018C_PID 0x018C
+#define MTXORB_FTDI_RANGE_018D_PID 0x018D
+#define MTXORB_FTDI_RANGE_018E_PID 0x018E
+#define MTXORB_FTDI_RANGE_018F_PID 0x018F
+#define MTXORB_FTDI_RANGE_0190_PID 0x0190
+#define MTXORB_FTDI_RANGE_0191_PID 0x0191
+#define MTXORB_FTDI_RANGE_0192_PID 0x0192
+#define MTXORB_FTDI_RANGE_0193_PID 0x0193
+#define MTXORB_FTDI_RANGE_0194_PID 0x0194
+#define MTXORB_FTDI_RANGE_0195_PID 0x0195
+#define MTXORB_FTDI_RANGE_0196_PID 0x0196
+#define MTXORB_FTDI_RANGE_0197_PID 0x0197
+#define MTXORB_FTDI_RANGE_0198_PID 0x0198
+#define MTXORB_FTDI_RANGE_0199_PID 0x0199
+#define MTXORB_FTDI_RANGE_019A_PID 0x019A
+#define MTXORB_FTDI_RANGE_019B_PID 0x019B
+#define MTXORB_FTDI_RANGE_019C_PID 0x019C
+#define MTXORB_FTDI_RANGE_019D_PID 0x019D
+#define MTXORB_FTDI_RANGE_019E_PID 0x019E
+#define MTXORB_FTDI_RANGE_019F_PID 0x019F
+#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
+#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
+#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
+#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
+#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
+#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
+#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
+#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
+#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
+#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
+#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
+#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
+#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
+#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
+#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
+#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
+#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
+#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
+#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
+#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
+#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
+#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
+#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
+#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
+#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
+#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
+#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
+#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
+#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
+#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
+#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
+#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
+#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
+#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
+#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
+#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
+#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
+#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
+#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
+#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
+#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
+#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
+#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
+#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
+#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
+#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
+#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
+#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
+#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
+#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
+#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
+#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
+#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
+#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
+#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
+#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
+#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
+#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
+#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
+#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
+#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
+#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
+#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
+#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
+#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
+#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
+#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
+#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
+#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
+#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
+#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
+#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
+#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
+#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
+#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
+#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
+#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
+#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
+#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
+#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
+#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
+#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
+#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
+#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
+#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
+#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
+#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
+#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
+#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
+#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
+#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
+#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
+#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
+#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
+#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
+#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
+
+
+
+/*
+ * The Mobility Lab (TML)
+ * Submitted by Pierre Castella
+ */
+#define TML_VID 0x1B91 /* Vendor ID */
+#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
+
+/* Alti-2 products http://www.alti-2.com */
+#define ALTI2_VID 0x1BC9
+#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
+
+/*
+ * Dresden Elektronic Sensor Terminal Board
+ */
+#define DE_VID 0x1cf1 /* Vendor ID */
+#define STB_PID 0x0001 /* Sensor Terminal Board */
+#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
+
+/*
+ * Papouch products (http://www.papouch.com/)
+ * Submitted by Folkert van Heusden
+ */
+
+#define PAPOUCH_VID 0x5050 /* Vendor ID */
+#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
+#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
+#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
+
+/*
+ * Marvell SheevaPlug
+ */
+#define MARVELL_VID 0x9e88
+#define MARVELL_SHEEVAPLUG_PID 0x9e8f
+
+/*
+ * Evolution Robotics products (http://www.evolution.com/).
+ * Submitted by Shawn M. Lavelle.
+ */
+#define EVOLUTION_VID 0xDEEE /* Vendor ID */
+#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
+#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
+#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
+#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
+
+/*
+ * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
+ */
+#define MJSG_GENERIC_PID 0x9378
+#define MJSG_SR_RADIO_PID 0x9379
+#define MJSG_XM_RADIO_PID 0x937A
+#define MJSG_HD_RADIO_PID 0x937C
+
+/*
+ * Xverve Signalyzer tools (http://www.signalyzer.com/)
+ */
+#define XVERVE_SIGNALYZER_ST_PID 0xBCA0
+#define XVERVE_SIGNALYZER_SLITE_PID 0xBCA1
+#define XVERVE_SIGNALYZER_SH2_PID 0xBCA2
+#define XVERVE_SIGNALYZER_SH4_PID 0xBCA4
+
file:95d8d26b9a44de53501d02ddd02f4d7e8c07fa1d -> file:2e0497b02260c3123ba130bd0fb37643c8b88f8d
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tt
kfree(port->read_urb->transfer_buffer);
port->read_urb->transfer_buffer = buffer;
port->read_urb->transfer_buffer_length = buffer_size;
+ port->bulk_in_buffer = buffer;
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
@@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tt
kfree(port->write_urb->transfer_buffer);
port->write_urb->transfer_buffer = buffer;
port->write_urb->transfer_buffer_length = buffer_size;
+ port->bulk_out_buffer = buffer;
port->bulk_out_size = buffer_size;
}
file:3a7873806f465aaf02a41213df7c2d41e5f866f1 -> file:682508289b44d3a0017f24e40df88adec331c7b9
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -310,6 +310,7 @@ err_cleanup:
usb_free_urb(priv->write_urb_pool[j]);
}
}
+ kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
return -ENOMEM;
file:45ea694b3ae6a4900d0a04810ef9a8b149ad7c32 -> file:9d99e682f96d0794ed9fd7600d1d1f6e1a60a144
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -345,7 +345,8 @@ static void kobil_close(struct usb_seria
/* FIXME: Add rts/dtr methods */
if (port->write_urb) {
- usb_kill_urb(port->write_urb);
+ usb_poison_urb(port->write_urb);
+ kfree(port->write_urb->transfer_buffer);
usb_free_urb(port->write_urb);
port->write_urb = NULL;
}
file:485fa9c5b10766698caabf03c580f92715c86bba -> file:a861cd2cb9318fd4692413686eb6117b50b7b588
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -714,7 +714,6 @@ static void mos7840_bulk_in_callback(str
mos7840_port = urb->context;
if (!mos7840_port) {
dbg("%s", "NULL mos7840_port pointer");
- mos7840_port->read_urb_busy = false;
return;
}
file:be3dff16d7c4d284fc67d84af47d33eb462d3a72 -> file:25860233012747c32b735998cef1c0de9638c51f
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -226,6 +226,7 @@ static int option_resume(struct usb_ser
#define AMOI_PRODUCT_H01 0x0800
#define AMOI_PRODUCT_H01A 0x7002
#define AMOI_PRODUCT_H02 0x0802
+#define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407
#define DELL_VENDOR_ID 0x413C
@@ -288,7 +289,9 @@ static int option_resume(struct usb_ser
#define QUALCOMM_VENDOR_ID 0x05C6
-#define MAXON_VENDOR_ID 0x16d8
+#define CMOTECH_VENDOR_ID 0x16d8
+#define CMOTECH_PRODUCT_6008 0x6008
+#define CMOTECH_PRODUCT_6280 0x6280
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
@@ -314,6 +317,7 @@ static int option_resume(struct usb_ser
#define QISDA_PRODUCT_H21_4512 0x4512
#define QISDA_PRODUCT_H21_4523 0x4523
#define QISDA_PRODUCT_H20_4515 0x4515
+#define QISDA_PRODUCT_H20_4518 0x4518
#define QISDA_PRODUCT_H20_4519 0x4519
/* TLAYTECH PRODUCTS */
@@ -332,6 +336,24 @@ static int option_resume(struct usb_ser
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S 0x0000
+#define PIRELLI_VENDOR_ID 0x1266
+#define PIRELLI_PRODUCT_C100_1 0x1002
+#define PIRELLI_PRODUCT_C100_2 0x1003
+#define PIRELLI_PRODUCT_1004 0x1004
+#define PIRELLI_PRODUCT_1005 0x1005
+#define PIRELLI_PRODUCT_1006 0x1006
+#define PIRELLI_PRODUCT_1007 0x1007
+#define PIRELLI_PRODUCT_1008 0x1008
+#define PIRELLI_PRODUCT_1009 0x1009
+#define PIRELLI_PRODUCT_100A 0x100a
+#define PIRELLI_PRODUCT_100B 0x100b
+#define PIRELLI_PRODUCT_100C 0x100c
+#define PIRELLI_PRODUCT_100D 0x100d
+#define PIRELLI_PRODUCT_100E 0x100e
+#define PIRELLI_PRODUCT_100F 0x100f
+#define PIRELLI_PRODUCT_1011 0x1011
+#define PIRELLI_PRODUCT_1012 0x1012
+
/* Airplus products */
#define AIRPLUS_VENDOR_ID 0x1011
#define AIRPLUS_PRODUCT_MCD650 0x3198
@@ -344,6 +366,12 @@ static int option_resume(struct usb_ser
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
+#define CINTERION_VENDOR_ID 0x0681
+
+/* Olivetti products */
+#define OLIVETTI_VENDOR_ID 0x0b3c
+#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -477,6 +505,7 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
@@ -520,7 +549,8 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
- { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -618,6 +648,180 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
@@ -635,6 +839,7 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
+ { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
@@ -646,6 +851,27 @@ static struct usb_device_id option_ids[]
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+ /* Pirelli */
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) },
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) },
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) },
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) },
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
+
+ { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -768,12 +994,19 @@ static int option_probe(struct usb_seria
const struct usb_device_id *id)
{
struct option_intf_private *data;
+
/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
return -ENODEV;
+ /* Bandrich modem and AT command interface is 0xff */
+ if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID ||
+ serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) &&
+ serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
+ return -ENODEV;
+
data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
file:7528b8d57f1cc9048001244f780524b8cca17690 -> file:8ab4ab2231ddfb4da64b0b7f5e48f4d6dc6df9f5
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -47,6 +47,35 @@ static struct usb_device_id id_table[] =
{USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
{USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
+ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
+ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
+ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
+ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
+ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
+ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
+ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
+ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
+ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
+ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
+ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
+ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
+ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
+ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
+ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
+ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
+ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
+ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
+ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
file:5019325ba25dda93486a08757c68cba44cf0b4a0 -> file:328578bdb455161bdcb9008317c2e8d8111e5bc1
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -195,6 +195,7 @@ static const struct sierra_iface_info di
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
+ { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
@@ -209,6 +210,7 @@ static struct usb_device_id id_table []
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
{ USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
{ USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
+ { USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */
/* Sierra Wireless C597 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
/* Sierra Wireless T598 */
@@ -567,14 +569,17 @@ static void sierra_indat_callback(struct
} else {
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
-
- tty_buffer_request_room(tty, urb->actual_length);
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
+ if (tty) {
+ tty_buffer_request_room(tty,
+ urb->actual_length);
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+
+ tty_kref_put(tty);
+ usb_serial_debug_data(debug, &port->dev,
+ __func__, urb->actual_length, data);
+ }
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
file:ad1f9232292d264506b16cb5b5bd8a625dc2e88d -> file:c14087018887e62213cb75965c4e6fa9c519e403
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5
.throttle = visor_throttle,
.unthrottle = visor_unthrottle,
.attach = clie_3_5_startup,
+ .release = visor_release,
.write = visor_write,
.write_room = visor_write_room,
.write_bulk_callback = visor_write_bulk_callback,
file:c932f9053188b32857a6b331ad188b3d88b97706 -> file:72150021cc79e3b663406b4c5b8b49dd2299da6c
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0
0 ),
/* Reported by Jan Dumon <j.dumon@option.com>
- * This device (wrongly) has a vendor-specific device descriptor.
- * The entry is needed so usb-storage can bind to it's mass-storage
+ * These devices (wrongly) have a vendor-specific device descriptor.
+ * These entries are needed so usb-storage can bind to their mass-storage
* interface as an interface driver */
UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
"Option",
@@ -1156,6 +1156,90 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0
US_SC_DEVICE, US_PR_DEVICE, NULL,
0 ),
+UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
+ "Option",
+ "GI 0452 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
+ "Option",
+ "GI 070x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
+ "Option",
+ "GI 1509 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
+ "Option",
+ "GI 1515 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
+ "Option",
+ "GI 1215 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",
@@ -1774,6 +1858,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These Appotech controllers are found in Picture Frames, they provide a
+ * (buggy) emulation of a cdrom drive which contains the windows software
+ * Uploading of pictures happens over the corresponding /dev/sg device. */
+UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
file:9edb8d7c295f38ed02a31642b3eba3be1c5681c9 -> file:73ab600efbf8b161a16f74e968a402414c24e324
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dm
static const struct dmi_system_id __initdata mbp_device_table[] = {
{
.callback = mbp_dmi_match,
+ .ident = "MacBook 1,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 2,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 3,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 4,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 4,2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookPro 3,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
file:5cc36cfbf07be275ce2b8ea92104c41e4bda1996 -> file:2c72a7ca736607d5bd565b6bc287020616f1773d
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -515,9 +515,9 @@ static int __devinit bfin_t350mcqb_probe
fbinfo->fbops = &bfin_t350mcqb_fb_ops;
fbinfo->flags = FBINFO_FLAG_DEFAULT;
- info->fb_buffer =
- dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
- GFP_KERNEL);
+ info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len +
+ ACTIVE_VIDEO_MEM_OFFSET,
+ &info->dma_handle, GFP_KERNEL);
if (NULL == info->fb_buffer) {
printk(KERN_ERR DRIVER_NAME
@@ -587,8 +587,8 @@ out7:
out6:
fb_dealloc_cmap(&fbinfo->cmap);
out4:
- dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
- info->dma_handle);
+ dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET,
+ info->fb_buffer, info->dma_handle);
out3:
framebuffer_release(fbinfo);
out2:
@@ -611,8 +611,9 @@ static int __devexit bfin_t350mcqb_remov
free_irq(info->irq, info);
if (info->fb_buffer != NULL)
- dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
- info->dma_handle);
+ dma_free_coherent(NULL, fbinfo->fix.smem_len +
+ ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer,
+ info->dma_handle);
fb_dealloc_cmap(&fbinfo->cmap);
file:eb12182b20598861937b46496b8716efcae1e366 -> file:d25df51bb0d2b3160efe478fb4e64f968014ea04
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regn
return 0;
}
+static void efifb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(info->aperture_base, info->aperture_size);
+ framebuffer_release(info);
+}
+
static struct fb_ops efifb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = efifb_destroy,
.fb_setcolreg = efifb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct pla
info->par = NULL;
info->aperture_base = efifb_fix.smem_start;
- info->aperture_size = size_total;
+ info->aperture_size = size_remap;
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
file:4d8c54c23dd7e320289fadec313d52ec5f333392 -> file:b043ac83c41265b884bbe07e2510519be4ba91dd
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -282,8 +282,17 @@ static int offb_set_par(struct fb_info *
return 0;
}
+static void offb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(info->aperture_base, info->aperture_size);
+ framebuffer_release(info);
+}
+
static struct fb_ops offb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = offb_destroy,
.fb_setcolreg = offb_setcolreg,
.fb_set_par = offb_set_par,
.fb_blank = offb_blank,
@@ -482,10 +491,14 @@ static void __init offb_init_fb(const ch
var->sync = 0;
var->vmode = FB_VMODE_NONINTERLACED;
+ /* set offb aperture size for generic probing */
+ info->aperture_base = address;
+ info->aperture_size = fix->smem_len;
+
info->fbops = &offb_ops;
info->screen_base = ioremap(address, fix->smem_len);
info->pseudo_palette = (void *) (info + 1);
- info->flags = FBINFO_DEFAULT | foreign_endian;
+ info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian;
fb_alloc_cmap(&info->cmap, 256, 0);
file:18b950706cad47f70ee83282a7cb7d905d902ef6 -> file:4cd50497264d3c65d092e1ca9c4fd55d2a7d0e22
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -400,6 +400,7 @@ static void __devexit e3d_pci_unregister
static struct pci_device_id e3d_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0), },
+ { PCI_DEVICE(0x1091, 0x7a0), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a2), },
{ .vendor = PCI_VENDOR_ID_3DLABS,
.device = PCI_ANY_ID,
file:28d9cf7cf72f9fd1ee8aeea96085da394d48702e -> file:d43859f76db248a39a23d39cdb47d73aca8e5393
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -473,7 +473,8 @@ static void vp_del_vqs(struct virtio_dev
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
info = vq->priv;
- if (vp_dev->per_vq_vectors)
+ if (vp_dev->per_vq_vectors &&
+ info->msix_vector != VIRTIO_MSI_NO_VECTOR)
free_irq(vp_dev->msix_entries[info->msix_vector].vector,
vq);
vp_del_vq(vq);
@@ -634,6 +635,9 @@ static int __devinit virtio_pci_probe(st
INIT_LIST_HEAD(&vp_dev->virtqueues);
spin_lock_init(&vp_dev->lock);
+ /* Disable MSI/MSIX to bring device to a known good state. */
+ pci_msi_off(pci_dev);
+
/* enable the device */
err = pci_enable_device(pci_dev);
if (err)
file:c7b3f9df2317387401961450f9746ed093a97e27 -> file:2159e668751cd7db5e9ddfb39c96984573801539
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -1,9 +1,8 @@
/*
* Blackfin On-Chip Watchdog Driver
- * Supports BF53[123]/BF53[467]/BF54[2489]/BF561
*
* Originally based on softdog.c
- * Copyright 2006-2007 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
* Copyright 2006-2007 Michele d'Amico
* Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
*
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
*/
static int bfin_wdt_set_timeout(unsigned long t)
{
- u32 cnt;
+ u32 cnt, max_t, sclk;
unsigned long flags;
- stampit();
+ sclk = get_sclk();
+ max_t = -1 / sclk;
+ cnt = t * sclk;
+ stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
- cnt = t * get_sclk();
- if (cnt < get_sclk()) {
+ if (t > max_t) {
printk(KERN_WARNING PFX "timeout value is too large\n");
return -EINVAL;
}
file:a6c5674c78e689414d1967d8b07fb90a28e12d3f -> file:0b9190754e243760ca1e8c1c08c859fae3a35ad9
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -443,7 +443,7 @@ static void hpwdt_ping(void)
static int hpwdt_change_timer(int new_margin)
{
/* Arbitrary, can't find the card's limits */
- if (new_margin < 30 || new_margin > 600) {
+ if (new_margin < 5 || new_margin > 600) {
printk(KERN_WARNING
"hpwdt: New value passed in is invalid: %d seconds.\n",
new_margin);
file:4bdb7f1a90772d780037bf2456e3656c1ad6a1ae -> file:e2ebe084986b4e953bf9ff385c837ad0a7a5201c
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -115,8 +115,37 @@ enum iTCO_chipsets {
TCO_3420, /* 3420 */
TCO_3450, /* 3450 */
TCO_EP80579, /* EP80579 */
- TCO_CPTD, /* CPT Desktop */
- TCO_CPTM, /* CPT Mobile */
+ TCO_CPT1, /* Cougar Point */
+ TCO_CPT2, /* Cougar Point Desktop */
+ TCO_CPT3, /* Cougar Point Mobile */
+ TCO_CPT4, /* Cougar Point */
+ TCO_CPT5, /* Cougar Point */
+ TCO_CPT6, /* Cougar Point */
+ TCO_CPT7, /* Cougar Point */
+ TCO_CPT8, /* Cougar Point */
+ TCO_CPT9, /* Cougar Point */
+ TCO_CPT10, /* Cougar Point */
+ TCO_CPT11, /* Cougar Point */
+ TCO_CPT12, /* Cougar Point */
+ TCO_CPT13, /* Cougar Point */
+ TCO_CPT14, /* Cougar Point */
+ TCO_CPT15, /* Cougar Point */
+ TCO_CPT16, /* Cougar Point */
+ TCO_CPT17, /* Cougar Point */
+ TCO_CPT18, /* Cougar Point */
+ TCO_CPT19, /* Cougar Point */
+ TCO_CPT20, /* Cougar Point */
+ TCO_CPT21, /* Cougar Point */
+ TCO_CPT22, /* Cougar Point */
+ TCO_CPT23, /* Cougar Point */
+ TCO_CPT24, /* Cougar Point */
+ TCO_CPT25, /* Cougar Point */
+ TCO_CPT26, /* Cougar Point */
+ TCO_CPT27, /* Cougar Point */
+ TCO_CPT28, /* Cougar Point */
+ TCO_CPT29, /* Cougar Point */
+ TCO_CPT30, /* Cougar Point */
+ TCO_CPT31, /* Cougar Point */
};
static struct {
@@ -173,8 +202,37 @@ static struct {
{"3420", 2},
{"3450", 2},
{"EP80579", 2},
- {"CPT Desktop", 2},
- {"CPT Mobile", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
{NULL, 0}
};
@@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci
{ ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
{ ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
{ ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
+ { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
+ { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
+ { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
+ { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
+ { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
+ { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
+ { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
+ { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
+ { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
+ { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
+ { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
+ { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
+ { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
+ { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
+ { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
+ { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
+ { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
+ { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
+ { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
+ { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
+ { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
+ { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
+ { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
+ { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
+ { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
+ { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
+ { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
+ { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
+ { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
+ { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
+ { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
file:eab33f1dbdf7013f451af491b882973ea017f718 -> file:7b547f53f65ee9267f19dd493a6ab7321310ed4c
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transact
#define PRINTF_BUFFER_SIZE 4096
char *printf_buffer;
- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
if (printf_buffer == NULL)
return -ENOMEM;
file:3902bf43a0883bfe4b92d384170362e53c910e0d -> file:5fb43bd19688a11e5c55a6ca1b00713344a93daa
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *f
P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
/* No mandatory locks */
- if (__mandatory_lock(inode))
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
file:36160424427124947179e7270bfb77e03dbc1686 -> file:38ebe789b5628e9ddb6a5076b523612b5b988476
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -157,6 +157,9 @@ static int btrfs_xattr_set_acl(struct in
int ret = 0;
struct posix_acl *acl = NULL;
+ if (!is_owner_or_cap(inode))
+ return -EPERM;
+
if (value) {
acl = posix_acl_from_xattr(value, size);
if (acl == NULL) {
file:cdbb054102b9f860ee4d119dfff39e891ac29438 -> file:b9840fa0982ead5db3562b859aad727cb7d9953f
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -947,7 +947,7 @@ static noinline long btrfs_ioctl_clone(s
*/
/* the destination must be opened for writing */
- if (!(file->f_mode & FMODE_WRITE))
+ if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
return -EINVAL;
ret = mnt_want_write(file->f_path.mnt);
@@ -959,12 +959,17 @@ static noinline long btrfs_ioctl_clone(s
ret = -EBADF;
goto out_drop_write;
}
+
src = src_file->f_dentry->d_inode;
ret = -EINVAL;
if (src == inode)
goto out_fput;
+ /* the src must be open for reading */
+ if (!(src_file->f_mode & FMODE_READ))
+ goto out_fput;
+
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
@@ -995,7 +1000,7 @@ static noinline long btrfs_ioctl_clone(s
/* determine range to clone */
ret = -EINVAL;
- if (off >= src->i_size || off + len > src->i_size)
+ if (off + len > src->i_size || off + len < off)
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
file:b5808cdb22325f0f21a1442910690772ad60431c -> file:039b5011d83b0f1481ec200f1229db568f733a02
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -77,6 +77,8 @@ static int cachefiles_check_cache_dir(st
/*
* check the security details of the on-disk cache
* - must be called with security override in force
+ * - must return with a security override in force - even in the case of an
+ * error
*/
int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
struct dentry *root,
@@ -99,6 +101,8 @@ int cachefiles_determine_cache_security(
* which create files */
ret = set_create_files_as(new, root->d_inode);
if (ret < 0) {
+ abort_creds(new);
+ cachefiles_begin_secure(cache, _saved_cred);
_leave(" = %d [cfa]", ret);
return ret;
}
file:29f1da761bbf10fc948f16e0c77a0d75dea22125 -> file:144540764dec7624943d5e4c3677a4299f7e0219
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1033,7 +1033,7 @@ init_cifs(void)
goto out_unregister_filesystem;
#endif
#ifdef CONFIG_CIFS_DFS_UPCALL
- rc = register_key_type(&key_type_dns_resolver);
+ rc = cifs_init_dns_resolver();
if (rc)
goto out_unregister_key_type;
#endif
@@ -1045,7 +1045,7 @@ init_cifs(void)
out_unregister_resolver_key:
#ifdef CONFIG_CIFS_DFS_UPCALL
- unregister_key_type(&key_type_dns_resolver);
+ cifs_exit_dns_resolver();
out_unregister_key_type:
#endif
#ifdef CONFIG_CIFS_UPCALL
@@ -1071,7 +1071,7 @@ exit_cifs(void)
cifs_proc_clean();
#ifdef CONFIG_CIFS_DFS_UPCALL
cifs_dfs_release_automount_timer();
- unregister_key_type(&key_type_dns_resolver);
+ cifs_exit_dns_resolver();
#endif
#ifdef CONFIG_CIFS_UPCALL
unregister_key_type(&cifs_spnego_key_type);
file:5d0fde18039c6780ed8e0dcad3ea3fe7ca07a094 -> file:c4dbc633361f35a2df0f8cffe18870d7ef4f159b
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -499,6 +499,7 @@ struct dfs_info3_param {
#define CIFS_FATTR_DFS_REFERRAL 0x1
#define CIFS_FATTR_DELETE_PENDING 0x2
#define CIFS_FATTR_NEED_REVAL 0x4
+#define CIFS_FATTR_INO_COLLISION 0x8
struct cifs_fattr {
u32 cf_flags;
file:5646727e33f543f70ec449b390d8955b66852dc9 -> file:05a9b776e1a8132a3853a09f20ae13d8f79abd8e
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fil
__u16 fileHandle, struct file *file,
struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid);
+ struct vfsmount *mnt,
+ struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb);
file:941441d3e3860163a51dd5f737cfe9771f283fa9 -> file:4e6dbab84435b6b47bbcbd6b1e04b1a8729b1026
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1430,6 +1430,8 @@ CIFSSMBWrite(const int xid, struct cifsT
__u32 bytes_sent;
__u16 byte_count;
+ *nbytes = 0;
+
/* cFYI(1, ("write at %lld %d bytes", offset, count));*/
if (tcon->ses == NULL)
return -ECONNABORTED;
@@ -1512,11 +1514,18 @@ CIFSSMBWrite(const int xid, struct cifsT
cifs_stats_inc(&tcon->num_writes);
if (rc) {
cFYI(1, ("Send error in write = %d", rc));
- *nbytes = 0;
} else {
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
+
+ /*
+ * Mask off high 16 bits when bytes written as returned by the
+ * server is greater than bytes requested by the client. Some
+ * OS/2 servers are known to set incorrect CountHigh values.
+ */
+ if (*nbytes > count)
+ *nbytes &= 0xFFFF;
}
cifs_buf_release(pSMB);
@@ -1605,6 +1614,14 @@ CIFSSMBWrite2(const int xid, struct cifs
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
+
+ /*
+ * Mask off high 16 bits when bytes written as returned by the
+ * server is greater than bytes requested by the client. OS/2
+ * servers are known to set incorrect CountHigh values.
+ */
+ if (*nbytes > count)
+ *nbytes &= 0xFFFF;
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
file:1f42f772865a54f1f45b516b090533a4a4715bf4 -> file:c3d6182d0ebe4a6cb926482a66694791eced5db4
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode
}
int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid)
+ struct vfsmount *mnt, struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;
cFYI(1, ("posix open %s", full_path));
@@ -241,7 +242,7 @@ int cifs_posix_open(char *full_path, str
/* get new inode and set it up */
if (*pinode == NULL) {
- *pinode = cifs_iget(mnt->mnt_sb, &fattr);
+ *pinode = cifs_iget(sb, &fattr);
if (!*pinode) {
rc = -ENOMEM;
goto posix_open_ret;
@@ -250,7 +251,8 @@ int cifs_posix_open(char *full_path, str
cifs_fattr_to_inode(*pinode, &fattr);
}
- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+ if (mnt)
+ cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
posix_open_ret:
kfree(presp_data);
@@ -314,13 +316,14 @@ cifs_create(struct inode *inode, struct
if (nd && (nd->flags & LOOKUP_OPEN))
oflags = nd->intent.open.flags;
else
- oflags = FMODE_READ;
+ oflags = FMODE_READ | SMB_O_CREAT;
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
- mode, oflags, &oplock, &fileHandle, xid);
+ rc = cifs_posix_open(full_path, &newinode,
+ nd ? nd->path.mnt : NULL,
+ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
negotation. EREMOTE indicates DFS junction, which is not
@@ -677,6 +680,7 @@ cifs_lookup(struct inode *parent_dir_ino
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
+ parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
&fileHandle, xid);
file:87948147d7ece68d64e7218c8331624410ac2196 -> file:16f31c129497e2679d11547c17ccca1e4cfffb86
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -23,12 +23,16 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/keyctl.h>
+#include <linux/key-type.h>
#include <keys/user-type.h>
#include "dns_resolve.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
+static const struct cred *dns_resolver_cache;
+
/* Checks if supplied name is IP address
* returns:
* 1 - name is IP
@@ -93,6 +97,7 @@ struct key_type key_type_dns_resolver =
int
dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
{
+ const struct cred *saved_cred;
int rc = -EAGAIN;
struct key *rkey = ERR_PTR(-EAGAIN);
char *name;
@@ -132,8 +137,15 @@ dns_resolve_server_name_to_ip(const char
goto skip_upcall;
}
+ saved_cred = override_creds(dns_resolver_cache);
rkey = request_key(&key_type_dns_resolver, name, "");
+ revert_creds(saved_cred);
if (!IS_ERR(rkey)) {
+ if (!(rkey->perm & KEY_USR_VIEW)) {
+ down_read(&rkey->sem);
+ rkey->perm |= KEY_USR_VIEW;
+ up_read(&rkey->sem);
+ }
len = rkey->type_data.x[0];
data = rkey->payload.data;
} else {
@@ -164,4 +176,61 @@ out:
return rc;
}
+int __init cifs_init_dns_resolver(void)
+{
+ struct cred *cred;
+ struct key *keyring;
+ int ret;
+
+ printk(KERN_NOTICE "Registering the %s key type\n",
+ key_type_dns_resolver.name);
+
+ /* create an override credential set with a special thread keyring in
+ * which DNS requests are cached
+ *
+ * this is used to prevent malicious redirections from being installed
+ * with add_key().
+ */
+ cred = prepare_kernel_cred(NULL);
+ if (!cred)
+ return -ENOMEM;
+
+ keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto failed_put_cred;
+ }
+
+ ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
+ if (ret < 0)
+ goto failed_put_key;
+
+ ret = register_key_type(&key_type_dns_resolver);
+ if (ret < 0)
+ goto failed_put_key;
+
+ /* instruct request_key() to use this special keyring as a cache for
+ * the results it looks up */
+ cred->thread_keyring = keyring;
+ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+ dns_resolver_cache = cred;
+ return 0;
+
+failed_put_key:
+ key_put(keyring);
+failed_put_cred:
+ put_cred(cred);
+ return ret;
+}
+void __exit cifs_exit_dns_resolver(void)
+{
+ key_revoke(dns_resolver_cache->thread_keyring);
+ unregister_key_type(&key_type_dns_resolver);
+ put_cred(dns_resolver_cache);
+ printk(KERN_NOTICE "Unregistered %s key type\n",
+ key_type_dns_resolver.name);
+}
file:966e9288930be75bc9d952cde2960ef3191db212 -> file:26b9eaa9f5ee5898ae4241412e4127512fc95ec4
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -24,8 +24,8 @@
#define _DNS_RESOLVE_H
#ifdef __KERNEL__
-#include <linux/key-type.h>
-extern struct key_type key_type_dns_resolver;
+extern int __init cifs_init_dns_resolver(void);
+extern void __exit cifs_exit_dns_resolver(void);
extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
#endif /* KERNEL */
file:429337eb7afec9f0933c72927eedcc7412207c88 -> file:5d1099a20cceda7f33a85869bf810a0b4f51af55
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -295,10 +295,12 @@ int cifs_open(struct inode *inode, struc
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
cFYI(1, ("posix open succeeded"));
/* no need for special case handling of setting mode
@@ -510,8 +512,9 @@ reopen_error_exit:
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
cFYI(1, ("posix reopen succeeded"));
goto reopen_success;
file:cababd8a52df32097f3219327ef491254ced34f0 -> file:303fd7f4dfe1a37a02dac79d38aae5bd7ea2e4c6
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -610,6 +610,16 @@ cifs_find_inode(struct inode *inode, voi
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
+ /*
+ * uh oh -- it's a directory. We can't use it since hardlinked dirs are
+ * verboten. Disable serverino and return it as if it were found, the
+ * caller can discard it, generate a uniqueid and retry the find
+ */
+ if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
+ fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
+ cifs_autodisable_serverino(CIFS_SB(inode->i_sb));
+ }
+
return 1;
}
@@ -629,15 +639,22 @@ cifs_iget(struct super_block *sb, struct
unsigned long hash;
struct inode *inode;
+retry_iget5_locked:
cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
/* hash down to 32-bits on 32-bit arch */
hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
-
- /* we have fattrs in hand, update the inode */
if (inode) {
+ /* was there a problematic inode number collision? */
+ if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
+ iput(inode);
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
+ goto retry_iget5_locked;
+ }
+
cifs_fattr_to_inode(inode, fattr);
if (sb->s_flags & MS_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
@@ -1267,6 +1284,10 @@ cifs_do_rename(int xid, struct dentry *f
if (rc == 0 || rc != -ETXTBSY)
return rc;
+ /* open-file renames don't work across directories */
+ if (to_dentry->d_parent != from_dentry->d_parent)
+ return rc;
+
/* open the file to be renamed -- we need DELETE perms */
rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
CREATE_NOT_DIR, &srcfid, &oplock, NULL,
file:7085a6275c4c9f0635383033dde2ee572bb046b0 -> file:6d6ff4fe60ea4fdfdfffb1c6718746266028d92b
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -723,15 +723,7 @@ ssetup_ntlmssp_authenticate:
/* calculate session key */
setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
- if (first_time) /* should this be moved into common code
- with similar ntlmv2 path? */
- /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
- response BB FIXME, v2_sess_key); */
-
- /* copy session key */
-
- /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
- bcc_ptr += LM2_SESS_KEY_SIZE; */
+ /* FIXME: calculate MAC key */
memcpy(bcc_ptr, (char *)v2_sess_key,
sizeof(struct ntlmv2_resp));
bcc_ptr += sizeof(struct ntlmv2_resp);
file:728f07ebc593db3d6287339cdfa77d5d3261bf8e -> file:268b7d19affa931abc91bcd2aebf977adee9f54b
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -638,38 +638,17 @@ out_lock:
return rc;
}
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+ size_t *bufsiz)
{
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
char *lower_buf;
- size_t lower_bufsiz;
- struct dentry *lower_dentry;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
- char *plaintext_name;
- size_t plaintext_name_size;
+ size_t lower_bufsiz = PATH_MAX;
mm_segment_t old_fs;
int rc;
- lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->readlink) {
- rc = -EINVAL;
- goto out;
- }
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- dentry->d_sb)->mount_crypt_stat;
- /*
- * If the lower filename is encrypted, it will result in a significantly
- * longer name. If needed, truncate the name after decode and decrypt.
- */
- if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
- lower_bufsiz = PATH_MAX;
- else
- lower_bufsiz = bufsiz;
- /* Released in this function */
lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
- if (lower_buf == NULL) {
- printk(KERN_ERR "%s: Out of memory whilst attempting to "
- "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
+ if (!lower_buf) {
rc = -ENOMEM;
goto out;
}
@@ -679,29 +658,31 @@ ecryptfs_readlink(struct dentry *dentry,
(char __user *)lower_buf,
lower_bufsiz);
set_fs(old_fs);
- if (rc >= 0) {
- rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
- &plaintext_name_size,
- dentry, lower_buf,
- rc);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to decode and "
- "decrypt filename; rc = [%d]\n", __func__,
- rc);
- goto out_free_lower_buf;
- }
- /* Check for bufsiz <= 0 done in sys_readlinkat() */
- rc = copy_to_user(buf, plaintext_name,
- min((size_t) bufsiz, plaintext_name_size));
- if (rc)
- rc = -EFAULT;
- else
- rc = plaintext_name_size;
- kfree(plaintext_name);
- fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
- }
-out_free_lower_buf:
+ if (rc < 0)
+ goto out;
+ lower_bufsiz = rc;
+ rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
+ lower_buf, lower_bufsiz);
+out:
kfree(lower_buf);
+ return rc;
+}
+
+static int
+ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+ char *kbuf;
+ size_t kbufsiz, copied;
+ int rc;
+
+ rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
+ if (rc)
+ goto out;
+ copied = min_t(size_t, bufsiz, kbufsiz);
+ rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
+ kfree(kbuf);
+ fsstack_copy_attr_atime(dentry->d_inode,
+ ecryptfs_dentry_to_lower(dentry)->d_inode);
out:
return rc;
}
@@ -971,6 +952,28 @@ out:
return rc;
}
+int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ int rc = 0;
+
+ mount_crypt_stat = &ecryptfs_superblock_to_private(
+ dentry->d_sb)->mount_crypt_stat;
+ generic_fillattr(dentry->d_inode, stat);
+ if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
+ char *target;
+ size_t targetsiz;
+
+ rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
+ if (!rc) {
+ kfree(target);
+ stat->size = targetsiz;
+ }
+ }
+ return rc;
+}
+
int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
@@ -995,7 +998,7 @@ ecryptfs_setxattr(struct dentry *dentry,
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->setxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1013,7 +1016,7 @@ ecryptfs_getxattr_lower(struct dentry *l
int rc = 0;
if (!lower_dentry->d_inode->i_op->getxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1040,7 +1043,7 @@ ecryptfs_listxattr(struct dentry *dentry
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->listxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1057,7 +1060,7 @@ static int ecryptfs_removexattr(struct d
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->removexattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1088,6 +1091,7 @@ const struct inode_operations ecryptfs_s
.put_link = ecryptfs_put_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
+ .getattr = ecryptfs_getattr_link,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
file:f1c17e87c5fbc852443f01da834f256f3c33262b -> file:3dfe7ce86b1b4b61f3dcf161ea8dd1d7fb96f307
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -30,9 +30,9 @@ static struct mutex ecryptfs_msg_ctx_lis
static struct hlist_head *ecryptfs_daemon_hash;
struct mutex ecryptfs_daemon_hash_mux;
-static int ecryptfs_hash_buckets;
+static int ecryptfs_hash_bits;
#define ecryptfs_uid_hash(uid) \
- hash_long((unsigned long)uid, ecryptfs_hash_buckets)
+ hash_long((unsigned long)uid, ecryptfs_hash_bits)
static u32 ecryptfs_msg_counter;
static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -485,18 +485,19 @@ int ecryptfs_init_messaging(void)
}
mutex_init(&ecryptfs_daemon_hash_mux);
mutex_lock(&ecryptfs_daemon_hash_mux);
- ecryptfs_hash_buckets = 1;
- while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
- ecryptfs_hash_buckets++;
+ ecryptfs_hash_bits = 1;
+ while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
+ ecryptfs_hash_bits++;
ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
- * ecryptfs_hash_buckets), GFP_KERNEL);
+ * (1 << ecryptfs_hash_bits)),
+ GFP_KERNEL);
if (!ecryptfs_daemon_hash) {
rc = -ENOMEM;
printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
mutex_unlock(&ecryptfs_daemon_hash_mux);
goto out;
}
- for (i = 0; i < ecryptfs_hash_buckets; i++)
+ for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
mutex_unlock(&ecryptfs_daemon_hash_mux);
ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
@@ -553,7 +554,7 @@ void ecryptfs_release_messaging(void)
int i;
mutex_lock(&ecryptfs_daemon_hash_mux);
- for (i = 0; i < ecryptfs_hash_buckets; i++) {
+ for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
int rc;
hlist_for_each_entry(daemon, elem,
file:b15a43a80ab78cc7dbb9e05dedef0dbb0dd0e6ee -> file:1a037f77aa52c1a0abcccfcfd24dcb9cceb63bff
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -85,7 +85,6 @@ static void ecryptfs_destroy_inode(struc
if (lower_dentry->d_inode) {
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
- d_drop(lower_dentry);
}
}
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
file:4cfab1cc75c03a0bb7654e5c8150c2009fe4aa11 -> file:d91e9d829bc1bcc52cce4c4965d375a3eb7d356f
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode
de->inode_no = cpu_to_le64(parent->i_ino);
memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
exofs_set_de_type(de, inode);
- kunmap_atomic(page, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
err = exofs_commit_chunk(page, 0, chunk_size);
fail:
page_cache_release(page);
file:9dc93168e2623ae09d26b1c7287f4fc30975e077 -> file:aa6fb6b9d3ad777624b5b918467ea58939d50c72
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -84,9 +84,11 @@ int ext4_check_dir_entry(const char *fun
if (error_msg != NULL)
ext4_error(dir->i_sb, function,
- "bad entry in directory #%lu: %s - "
- "offset=%u, inode=%u, rec_len=%d, name_len=%d",
- dir->i_ino, error_msg, offset,
+ "bad entry in directory #%lu: %s - block=%llu"
+ "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
+ dir->i_ino, error_msg,
+ (unsigned long long) bh->b_blocknr,
+ (unsigned) (offset%bh->b_size), offset,
le32_to_cpu(de->inode),
rlen, de->name_len);
return error_msg == NULL ? 1 : 0;
@@ -109,7 +111,7 @@ static int ext4_readdir(struct file *fil
if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX) &&
- ((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) ||
+ ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
((inode->i_size >> sb->s_blocksize_bits) == 1))) {
err = ext4_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
@@ -120,7 +122,7 @@ static int ext4_readdir(struct file *fil
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
- EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
+ ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX);
}
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
file:d0a2afbab009de427b26b64176679ba58d12782a -> file:fa6b79fb594dc2068fe920a3c3b8ab406b14ca9d
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -29,6 +29,9 @@
#include <linux/wait.h>
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
+#ifdef __KERNEL__
+#include <linux/compat.h>
+#endif
/*
* The fourth extended filesystem constants/structures
@@ -139,8 +142,8 @@ typedef struct ext4_io_end {
struct inode *inode; /* file being written to */
unsigned int flag; /* unwritten or not */
int error; /* I/O error code */
- ext4_lblk_t offset; /* offset in the file */
- size_t size; /* size of the extent */
+ loff_t offset; /* offset in the file */
+ ssize_t size; /* size of the extent */
struct work_struct work; /* data work queue */
} ext4_io_end_t;
@@ -284,10 +287,12 @@ struct flex_groups {
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
+#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
+#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
-#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */
+#define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
+#define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */
/* Flags that should be inherited by new inodes from their parent. */
#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
@@ -314,15 +319,81 @@ static inline __u32 ext4_mask_flags(umod
}
/*
- * Inode dynamic state flags
+ * Inode flags used for atomic set/get
*/
-#define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */
-#define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
-#define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
-#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
-#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
-#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */
-#define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/
+enum {
+ EXT4_INODE_SECRM = 0, /* Secure deletion */
+ EXT4_INODE_UNRM = 1, /* Undelete */
+ EXT4_INODE_COMPR = 2, /* Compress file */
+ EXT4_INODE_SYNC = 3, /* Synchronous updates */
+ EXT4_INODE_IMMUTABLE = 4, /* Immutable file */
+ EXT4_INODE_APPEND = 5, /* writes to file may only append */
+ EXT4_INODE_NODUMP = 6, /* do not dump file */
+ EXT4_INODE_NOATIME = 7, /* do not update atime */
+/* Reserved for compression usage... */
+ EXT4_INODE_DIRTY = 8,
+ EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
+ EXT4_INODE_NOCOMPR = 10, /* Don't compress */
+ EXT4_INODE_ECOMPR = 11, /* Compression error */
+/* End compression flags --- maybe not all used */
+ EXT4_INODE_INDEX = 12, /* hash-indexed directory */
+ EXT4_INODE_IMAGIC = 13, /* AFS directory */
+ EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */
+ EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */
+ EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */
+ EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/
+ EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */
+ EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
+ EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
+ EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
+ EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
+};
+
+#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
+#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
+ printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
+ EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
+
+/*
+ * Since it's pretty easy to mix up bit numbers and hex values, and we
+ * can't do a compile-time test for ENUM values, we use a run-time
+ * test to make sure that EXT4_XXX_FL is consistent with respect to
+ * EXT4_INODE_XXX. If all is well the printk and BUG_ON will all drop
+ * out so it won't cost any extra space in the compiled kernel image.
+ * But it's important that these values are the same, since we are
+ * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
+ * must be consistent with the values of FS_XXX_FL defined in
+ * include/linux/fs.h and the on-disk values found in ext2, ext3, and
+ * ext4 filesystems, and of course the values defined in e2fsprogs.
+ *
+ * It's not paranoia if the Murphy's Law really *is* out to get you. :-)
+ */
+static inline void ext4_check_flag_values(void)
+{
+ CHECK_FLAG_VALUE(SECRM);
+ CHECK_FLAG_VALUE(UNRM);
+ CHECK_FLAG_VALUE(COMPR);
+ CHECK_FLAG_VALUE(SYNC);
+ CHECK_FLAG_VALUE(IMMUTABLE);
+ CHECK_FLAG_VALUE(APPEND);
+ CHECK_FLAG_VALUE(NODUMP);
+ CHECK_FLAG_VALUE(NOATIME);
+ CHECK_FLAG_VALUE(DIRTY);
+ CHECK_FLAG_VALUE(COMPRBLK);
+ CHECK_FLAG_VALUE(NOCOMPR);
+ CHECK_FLAG_VALUE(ECOMPR);
+ CHECK_FLAG_VALUE(INDEX);
+ CHECK_FLAG_VALUE(IMAGIC);
+ CHECK_FLAG_VALUE(JOURNAL_DATA);
+ CHECK_FLAG_VALUE(NOTAIL);
+ CHECK_FLAG_VALUE(DIRSYNC);
+ CHECK_FLAG_VALUE(TOPDIR);
+ CHECK_FLAG_VALUE(HUGE_FILE);
+ CHECK_FLAG_VALUE(EXTENTS);
+ CHECK_FLAG_VALUE(EA_INODE);
+ CHECK_FLAG_VALUE(EOFBLOCKS);
+ CHECK_FLAG_VALUE(RESERVED);
+}
/* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input {
@@ -335,6 +406,18 @@ struct ext4_new_group_input {
__u16 unused;
};
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+struct compat_ext4_new_group_input {
+ u32 group;
+ compat_u64 block_bitmap;
+ compat_u64 inode_bitmap;
+ compat_u64 inode_table;
+ u32 blocks_count;
+ u16 reserved_blocks;
+ u16 unused;
+};
+#endif
+
/* The struct ext4_new_group_input in kernel space, with free_blocks_count */
struct ext4_new_group_data {
__u32 group;
@@ -361,14 +444,11 @@ struct ext4_new_group_data {
so set the magic i_delalloc_reserve_flag after taking the
inode allocation semaphore for */
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
- /* Call ext4_da_update_reserve_space() after successfully
- allocating the blocks */
-#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008
/* caller is from the direct IO path, request to creation of an
unitialized extents if not allocated, split the uninitialized
extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_DIO 0x0010
-#define EXT4_GET_BLOCKS_CONVERT 0x0020
+#define EXT4_GET_BLOCKS_DIO 0x0008
+#define EXT4_GET_BLOCKS_CONVERT 0x0010
#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
/* Convert extent to initialized after direct IO complete */
@@ -397,6 +477,7 @@ struct ext4_new_group_data {
#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
* ioctl commands in 32 bit emulation
*/
@@ -407,11 +488,13 @@ struct ext4_new_group_data {
#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
+#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
#ifdef CONFIG_JBD2_DEBUG
#define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
#endif
#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
+#endif
/*
@@ -615,9 +698,8 @@ struct ext4_ext_cache {
*/
struct ext4_inode_info {
__le32 i_data[15]; /* unconverted */
- __u32 i_flags;
- ext4_fsblk_t i_file_acl;
__u32 i_dtime;
+ ext4_fsblk_t i_file_acl;
/*
* i_block_group is the number of the block group which contains
@@ -627,7 +709,8 @@ struct ext4_inode_info {
* near to their parent directory's inode.
*/
ext4_group_t i_block_group;
- __u32 i_state; /* Dynamic state flags for ext4 */
+ unsigned long i_state_flags; /* Dynamic state flags */
+ unsigned long i_flags;
ext4_lblk_t i_dir_start_lookup;
#ifdef CONFIG_EXT4_FS_XATTR
@@ -693,6 +776,8 @@ struct ext4_inode_info {
unsigned int i_reserved_meta_blocks;
unsigned int i_allocated_meta_blocks;
unsigned short i_delalloc_reserved_flag;
+ sector_t i_da_metadata_calc_last_lblock;
+ int i_da_metadata_calc_len;
/* on-disk additional length */
__u16 i_extra_isize;
@@ -1045,6 +1130,37 @@ static inline int ext4_valid_inum(struct
(ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
+
+/*
+ * Inode dynamic state flags
+ */
+enum {
+ EXT4_STATE_JDATA, /* journaled data exists */
+ EXT4_STATE_NEW, /* inode is newly created */
+ EXT4_STATE_XATTR, /* has in-inode xattrs */
+ EXT4_STATE_NO_EXPAND, /* No space for expansion */
+ EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
+ EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
+ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
+ EXT4_STATE_NEWENTRY, /* File just added to dir */
+};
+
+#define EXT4_INODE_BIT_FNS(name, field) \
+static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
+{ \
+ return test_bit(bit, &EXT4_I(inode)->i_##field); \
+} \
+static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
+{ \
+ set_bit(bit, &EXT4_I(inode)->i_##field); \
+} \
+static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
+{ \
+ clear_bit(bit, &EXT4_I(inode)->i_##field); \
+}
+
+EXT4_INODE_BIT_FNS(flag, flags)
+EXT4_INODE_BIT_FNS(state, state_flags)
#else
/* Assume that user mode programs are passing in an ext4fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
@@ -1229,7 +1345,7 @@ struct ext4_dir_entry_2 {
#define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
EXT4_FEATURE_COMPAT_DIR_INDEX) && \
- (EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
+ ext4_test_inode_flag((dir), EXT4_INODE_INDEX))
#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
@@ -1438,6 +1554,8 @@ extern int ext4_block_truncate_page(hand
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int flush_aio_dio_completed_IO(struct inode *inode);
+extern void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -1637,6 +1755,7 @@ struct ext4_group_info {
ext4_grpblk_t bb_first_free; /* first free block */
ext4_grpblk_t bb_free; /* total free blocks */
ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
+ ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
struct list_head bb_prealloc_list;
#ifdef DOUBLE_CHECK
void *bb_bitmap;
@@ -1740,7 +1859,7 @@ extern void ext4_ext_release(struct supe
extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- loff_t len);
+ ssize_t len);
extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
sector_t block, unsigned int max_blocks,
struct buffer_head *bh, int flags);
file:2ca686454e875c976ff969d5ecf7e437d0dadd40 -> file:bdb6ce7e2eb48d08c1bf006e28fdc7b27a8651dd
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -225,7 +225,8 @@ static inline void ext4_ext_mark_initial
ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
}
-extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
+extern int ext4_ext_calc_metadata_amount(struct inode *inode,
+ sector_t lblocks);
extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
file:6a9409920deef2f6040b8d7279114cfc0612221d -> file:496249aeec965805afcd01082f821a13e181f17f
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -89,7 +89,7 @@ int __ext4_handle_dirty_metadata(const c
ext4_journal_abort_handle(where, __func__, bh,
handle, err);
} else {
- if (inode && bh)
+ if (inode)
mark_buffer_dirty_inode(bh, inode);
else
mark_buffer_dirty(bh);
file:1892a7763426e8f6021990256cf3056eb9a86e16 -> file:386095d5b7606394a89df86bd47a6cdd65ac95ae
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -282,7 +282,7 @@ static inline int ext4_should_journal_da
return 1;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
return 1;
- if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
+ if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
return 1;
return 0;
}
@@ -293,7 +293,7 @@ static inline int ext4_should_order_data
return 0;
if (!S_ISREG(inode->i_mode))
return 0;
- if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
+ if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
return 1;
@@ -306,7 +306,7 @@ static inline int ext4_should_writeback_
return 0;
if (EXT4_JOURNAL(inode) == NULL)
return 1;
- if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
+ if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
return 1;
file:8b8bae4c0cf4a0c8c2d043569858106708902b26 -> file:99482eae7b378292ee1d09d5dd947050f6b10a35
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -107,11 +107,8 @@ static int ext4_ext_truncate_extend_rest
if (err <= 0)
return err;
err = ext4_truncate_restart_trans(handle, inode, needed);
- /*
- * We have dropped i_data_sem so someone might have cached again
- * an extent we are going to truncate.
- */
- ext4_ext_invalidate_cache(inode);
+ if (err == 0)
+ err = -EAGAIN;
return err;
}
@@ -296,29 +293,44 @@ static inline int ext4_ext_space_root_id
* to allocate @blocks
* Worse case is one block per extent
*/
-int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
+int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
{
- int lcap, icap, rcap, leafs, idxs, num;
- int newextents = blocks;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int idxs, num = 0;
- rcap = ext4_ext_space_root_idx(inode, 0);
- lcap = ext4_ext_space_block(inode, 0);
- icap = ext4_ext_space_block_idx(inode, 0);
+ idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
+ / sizeof(struct ext4_extent_idx));
- /* number of new leaf blocks needed */
- num = leafs = (newextents + lcap - 1) / lcap;
+ /*
+ * If the new delayed allocation block is contiguous with the
+ * previous da block, it can share index blocks with the
+ * previous block, so we only need to allocate a new index
+ * block every idxs leaf blocks. At ldxs**2 blocks, we need
+ * an additional index block, and at ldxs**3 blocks, yet
+ * another index blocks.
+ */
+ if (ei->i_da_metadata_calc_len &&
+ ei->i_da_metadata_calc_last_lblock+1 == lblock) {
+ if ((ei->i_da_metadata_calc_len % idxs) == 0)
+ num++;
+ if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
+ num++;
+ if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
+ num++;
+ ei->i_da_metadata_calc_len = 0;
+ } else
+ ei->i_da_metadata_calc_len++;
+ ei->i_da_metadata_calc_last_lblock++;
+ return num;
+ }
/*
- * Worse case, we need separate index block(s)
- * to link all new leaf blocks
+ * In the worst case we need a new set of index blocks at
+ * every level of the inode's extent tree.
*/
- idxs = (leafs + icap - 1) / icap;
- do {
- num += idxs;
- idxs = (idxs + icap - 1) / icap;
- } while (idxs > rcap);
-
- return num;
+ ei->i_da_metadata_calc_len = 1;
+ ei->i_da_metadata_calc_last_lblock = lblock;
+ return ext_depth(inode) + 1;
}
static int
@@ -2248,7 +2260,7 @@ static int ext4_ext_remove_space(struct
int depth = ext_depth(inode);
struct ext4_ext_path *path;
handle_t *handle;
- int i = 0, err = 0;
+ int i, err;
ext_debug("truncate since %u\n", start);
@@ -2257,23 +2269,26 @@ static int ext4_ext_remove_space(struct
if (IS_ERR(handle))
return PTR_ERR(handle);
+again:
ext4_ext_invalidate_cache(inode);
/*
* We start scanning from right side, freeing all the blocks
* after i_size and walking into the tree depth-wise.
*/
+ depth = ext_depth(inode);
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
if (path == NULL) {
ext4_journal_stop(handle);
return -ENOMEM;
}
+ path[0].p_depth = depth;
path[0].p_hdr = ext_inode_hdr(inode);
if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
err = -EIO;
goto out;
}
- path[0].p_depth = depth;
+ i = err = 0;
while (i >= 0 && err == 0) {
if (i == depth) {
@@ -2367,6 +2382,8 @@ static int ext4_ext_remove_space(struct
out:
ext4_ext_drop_refs(path);
kfree(path);
+ if (err == -EAGAIN)
+ goto again;
ext4_journal_stop(handle);
return err;
@@ -2431,7 +2448,7 @@ static void bi_complete(struct bio *bio,
/* FIXME!! we need to try to merge to left or right after zero-out */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
- int ret = -EIO;
+ int ret;
struct bio *bio;
int blkbits, blocksize;
sector_t ee_pblock;
@@ -2455,6 +2472,9 @@ static int ext4_ext_zeroout(struct inode
len = ee_len;
bio = bio_alloc(GFP_NOIO, len);
+ if (!bio)
+ return -ENOMEM;
+
bio->bi_sector = ee_pblock;
bio->bi_bdev = inode->i_sb->s_bdev;
@@ -2482,17 +2502,15 @@ static int ext4_ext_zeroout(struct inode
submit_bio(WRITE, bio);
wait_for_completion(&event);
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
- ret = 0;
- else {
- ret = -EIO;
- break;
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ bio_put(bio);
+ return -EIO;
}
bio_put(bio);
ee_len -= done;
ee_pblock += done << (blkbits - 9);
}
- return ret;
+ return 0;
}
#define EXT4_EXT_ZERO_LEN 7
@@ -2517,11 +2535,21 @@ static int ext4_ext_convert_to_initializ
struct ext4_extent *ex2 = NULL;
struct ext4_extent *ex3 = NULL;
struct ext4_extent_header *eh;
- ext4_lblk_t ee_block;
+ ext4_lblk_t ee_block, eof_block;
unsigned int allocated, ee_len, depth;
ext4_fsblk_t newblock;
int err = 0;
int ret = 0;
+ int may_zeroout;
+
+ ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+ (unsigned long long)iblock, max_blocks);
+
+ eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+ if (eof_block < iblock + max_blocks)
+ eof_block = iblock + max_blocks;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
@@ -2530,16 +2558,23 @@ static int ext4_ext_convert_to_initializ
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (iblock - ee_block);
newblock = iblock - ee_block + ext_pblock(ex);
+
ex2 = ex;
orig_ex.ee_block = ex->ee_block;
orig_ex.ee_len = cpu_to_le16(ee_len);
ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully insde i_size or new_size.
+ */
+ may_zeroout = ee_block + ee_len <= eof_block;
+
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
- if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
+ if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2570,7 +2605,7 @@ static int ext4_ext_convert_to_initializ
if (allocated > max_blocks) {
unsigned int newdepth;
/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
- if (allocated <= EXT4_EXT_ZERO_LEN) {
+ if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
/*
* iblock == ee_block is handled by the zerouout
* at the beginning.
@@ -2646,7 +2681,7 @@ static int ext4_ext_convert_to_initializ
ex3->ee_len = cpu_to_le16(allocated - max_blocks);
ext4_ext_mark_uninitialized(ex3);
err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2670,8 +2705,10 @@ static int ext4_ext_convert_to_initializ
* update the extent length after successful insert of the
* split extent
*/
- orig_ex.ee_len = cpu_to_le16(ee_len -
- ext4_ext_get_actual_len(ex3));
+ ee_len -= ext4_ext_get_actual_len(ex3);
+ orig_ex.ee_len = cpu_to_le16(ee_len);
+ may_zeroout = ee_block + ee_len <= eof_block;
+
depth = newdepth;
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode, iblock, path);
@@ -2695,7 +2732,7 @@ static int ext4_ext_convert_to_initializ
* otherwise give the extent a chance to merge to left
*/
if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
- iblock != ee_block) {
+ iblock != ee_block && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2764,7 +2801,7 @@ static int ext4_ext_convert_to_initializ
goto out;
insert:
err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2824,14 +2861,21 @@ static int ext4_split_unwritten_extents(
struct ext4_extent *ex2 = NULL;
struct ext4_extent *ex3 = NULL;
struct ext4_extent_header *eh;
- ext4_lblk_t ee_block;
+ ext4_lblk_t ee_block, eof_block;
unsigned int allocated, ee_len, depth;
ext4_fsblk_t newblock;
int err = 0;
+ int may_zeroout;
+
+ ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+ (unsigned long long)iblock, max_blocks);
+
+ eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+ if (eof_block < iblock + max_blocks)
+ eof_block = iblock + max_blocks;
- ext_debug("ext4_split_unwritten_extents: inode %lu,"
- "iblock %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)iblock, max_blocks);
depth = ext_depth(inode);
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
@@ -2839,12 +2883,19 @@ static int ext4_split_unwritten_extents(
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (iblock - ee_block);
newblock = iblock - ee_block + ext_pblock(ex);
+
ex2 = ex;
orig_ex.ee_block = ex->ee_block;
orig_ex.ee_len = cpu_to_le16(ee_len);
ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
/*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully insde i_size or new_size.
+ */
+ may_zeroout = ee_block + ee_len <= eof_block;
+
+ /*
* If the uninitialized extent begins at the same logical
* block where the write begins, and the write completely
* covers the extent, then we don't need to split it.
@@ -2878,7 +2929,7 @@ static int ext4_split_unwritten_extents(
ex3->ee_len = cpu_to_le16(allocated - max_blocks);
ext4_ext_mark_uninitialized(ex3);
err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2902,8 +2953,10 @@ static int ext4_split_unwritten_extents(
* update the extent length after successful insert of the
* split extent
*/
- orig_ex.ee_len = cpu_to_le16(ee_len -
- ext4_ext_get_actual_len(ex3));
+ ee_len -= ext4_ext_get_actual_len(ex3);
+ orig_ex.ee_len = cpu_to_le16(ee_len);
+ may_zeroout = ee_block + ee_len <= eof_block;
+
depth = newdepth;
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode, iblock, path);
@@ -2949,7 +3002,7 @@ static int ext4_split_unwritten_extents(
goto out;
insert:
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -3029,6 +3082,14 @@ out:
return err;
}
+static void unmap_underlying_metadata_blocks(struct block_device *bdev,
+ sector_t block, int count)
+{
+ int i;
+ for (i = 0; i < count; i++)
+ unmap_underlying_metadata(bdev, block + i);
+}
+
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, unsigned int max_blocks,
@@ -3059,7 +3120,7 @@ ext4_ext_handle_uninitialized_extents(ha
if (io)
io->flag = DIO_AIO_UNWRITTEN;
else
- EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
+ ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
goto out;
}
/* async DIO end_io complete, convert the filled extent to written */
@@ -3104,6 +3165,30 @@ out:
} else
allocated = ret;
set_buffer_new(bh_result);
+ /*
+ * if we allocated more blocks than requested
+ * we need to make sure we unmap the extra block
+ * allocated. The actual needed block will get
+ * unmapped later when we find the buffer_head marked
+ * new.
+ */
+ if (allocated > max_blocks) {
+ unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
+ newblock + max_blocks,
+ allocated - max_blocks);
+ allocated = max_blocks;
+ }
+
+ /*
+ * If we have done fallocate with the offset that is already
+ * delayed allocated, we would have block reservation
+ * and quota reservation done in the delayed write path.
+ * But fallocate would have already updated quota and block
+ * count for this offset. So cancel these reservation
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, allocated, 0);
+
map_out:
set_buffer_mapped(bh_result);
out1:
@@ -3144,9 +3229,9 @@ int ext4_ext_get_blocks(handle_t *handle
{
struct ext4_ext_path *path = NULL;
struct ext4_extent_header *eh;
- struct ext4_extent newex, *ex;
+ struct ext4_extent newex, *ex, *last_ex;
ext4_fsblk_t newblock;
- int err = 0, depth, ret, cache_type;
+ int i, err = 0, depth, ret, cache_type;
unsigned int allocated = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3196,7 +3281,13 @@ int ext4_ext_get_blocks(handle_t *handle
* this situation is possible, though, _during_ tree modification;
* this is why assert can't be put in ext4_ext_find_extent()
*/
- BUG_ON(path[depth].p_ext == NULL && depth != 0);
+ if (path[depth].p_ext == NULL && depth != 0) {
+ ext4_error(inode->i_sb, __func__, "bad extent address "
+ "inode: %lu, iblock: %lu, depth: %d",
+ inode->i_ino, (unsigned long) iblock, depth);
+ err = -EIO;
+ goto out2;
+ }
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
@@ -3315,9 +3406,35 @@ int ext4_ext_get_blocks(handle_t *handle
if (io)
io->flag = DIO_AIO_UNWRITTEN;
else
- EXT4_I(inode)->i_state |=
- EXT4_STATE_DIO_UNWRITTEN;;
+ ext4_set_inode_state(inode,
+ EXT4_STATE_DIO_UNWRITTEN);
+ }
+ }
+
+ if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
+ if (unlikely(!eh->eh_entries)) {
+ ext4_error(inode->i_sb, __func__,
+ "inode#%lu, eh->eh_entries = 0 and "
+ "EOFBLOCKS_FL set", inode->i_ino);
+ err = -EIO;
+ goto out2;
}
+ last_ex = EXT_LAST_EXTENT(eh);
+ /*
+ * If the current leaf block was reached by looking at
+ * the last index block all the way down the tree, and
+ * we are extending the inode beyond the last extent
+ * in the current leaf block, then clear the
+ * EOFBLOCKS_FL flag.
+ */
+ for (i = depth-1; i >= 0; i--) {
+ if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
+ break;
+ }
+ if ((i < 0) &&
+ (iblock + ar.len > le32_to_cpu(last_ex->ee_block) +
+ ext4_ext_get_actual_len(last_ex)))
+ ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err) {
@@ -3333,9 +3450,18 @@ int ext4_ext_get_blocks(handle_t *handle
/* previous routine could use block we allocated */
newblock = ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
+ if (allocated > max_blocks)
+ allocated = max_blocks;
set_buffer_new(bh_result);
/*
+ * Update reserved blocks/metadata blocks after successful
+ * block allocation which had been deferred till now.
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, allocated, 1);
+
+ /*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an uninitialized extent.
*/
@@ -3443,6 +3569,13 @@ static void ext4_falloc_update_inode(str
i_size_write(inode, new_size);
if (new_size > EXT4_I(inode)->i_disksize)
ext4_update_i_disksize(inode, new_size);
+ } else {
+ /*
+ * Mark that we allocate beyond EOF so the subsequent truncate
+ * can proceed even if the new size is the same as i_size.
+ */
+ if (new_size > i_size_read(inode))
+ ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
}
@@ -3470,7 +3603,7 @@ long ext4_fallocate(struct inode *inode,
* currently supporting (pre)allocate mode for extent-based
* files _only_
*/
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EOPNOTSUPP;
/* preallocation to directories is currently not supported */
@@ -3489,6 +3622,11 @@ long ext4_fallocate(struct inode *inode,
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
mutex_lock(&inode->i_mutex);
+ ret = inode_newsize_ok(inode, (len + offset));
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
retry:
while (ret >= 0 && ret < max_blocks) {
block = block + ret;
@@ -3547,7 +3685,7 @@ retry:
* Returns 0 on success.
*/
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- loff_t len)
+ ssize_t len)
{
handle_t *handle;
ext4_lblk_t block;
@@ -3683,7 +3821,7 @@ static int ext4_xattr_fiemap(struct inod
int error = 0;
/* in-inode? */
- if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
struct ext4_iloc iloc;
int offset; /* offset of xattr in inode */
@@ -3696,6 +3834,7 @@ static int ext4_xattr_fiemap(struct inod
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
} else { /* external block */
physical = EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
@@ -3711,11 +3850,10 @@ int ext4_fiemap(struct inode *inode, str
__u64 start, __u64 len)
{
ext4_lblk_t start_blk;
- ext4_lblk_t len_blks;
int error = 0;
/* fallback to generic here if not in extents fmt */
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return generic_block_fiemap(inode, fieinfo, start, len,
ext4_get_block);
@@ -3725,8 +3863,14 @@ int ext4_fiemap(struct inode *inode, str
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
error = ext4_xattr_fiemap(inode, fieinfo);
} else {
+ ext4_lblk_t len_blks;
+ __u64 last_blk;
+
start_blk = start >> inode->i_sb->s_blocksize_bits;
- len_blks = len >> inode->i_sb->s_blocksize_bits;
+ last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
+ if (last_blk >= EXT_MAX_BLOCK)
+ last_blk = EXT_MAX_BLOCK-1;
+ len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
/*
* Walk the extent tree gathering extent information.
file:9630583cef280d541e87505eba2cfa37589266e3 -> file:2a6054129bbc7acb401064b541d4cc0f11f8cea3
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -35,9 +35,9 @@
*/
static int ext4_release_file(struct inode *inode, struct file *filp)
{
- if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
ext4_alloc_da_blocks(inode);
- EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
+ ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
}
/* if we are the last writer on the inode, drop the block reservation */
if ((filp->f_mode & FMODE_WRITE) &&
@@ -65,7 +65,7 @@ ext4_file_write(struct kiocb *iocb, cons
* is smaller than s_maxbytes, which is for extent-mapped files.
*/
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
size_t length = iov_length(iov, nr_segs);
file:d6049e40b161722a26c64b0027bccd8c20c12bf5 -> file:c3660a60c30051534d0ed891ee07eb79574ccd31
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -35,6 +35,29 @@
#include <trace/events/ext4.h>
/*
+ * If we're not journaling and this is a just-created file, we have to
+ * sync our parent directory (if it was freshly created) since
+ * otherwise it will only be written by writeback, leaving a huge
+ * window during which a crash may lose the file. This may apply for
+ * the parent directory's parent as well, and so on recursively, if
+ * they are also freshly created.
+ */
+static void ext4_sync_parent(struct inode *inode)
+{
+ struct dentry *dentry = NULL;
+
+ while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
+ ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
+ dentry = list_entry(inode->i_dentry.next,
+ struct dentry, d_alias);
+ if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
+ break;
+ inode = dentry->d_parent->d_inode;
+ sync_mapping_buffers(inode->i_mapping);
+ }
+}
+
+/*
* akpm: A new design for ext4_sync_file().
*
* This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
@@ -67,8 +90,12 @@ int ext4_sync_file(struct file *file, st
if (ret < 0)
return ret;
- if (!journal)
- return simple_fsync(file, dentry, datasync);
+ if (!journal) {
+ ret = simple_fsync(file, dentry, datasync);
+ if (!ret && !list_empty(&inode->i_dentry))
+ ext4_sync_parent(inode);
+ return ret;
+ }
/*
* data=writeback,ordered:
@@ -88,9 +115,21 @@ int ext4_sync_file(struct file *file, st
return ext4_force_commit(inode->i_sb);
commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
- if (jbd2_log_start_commit(journal, commit_tid))
- jbd2_log_wait_commit(journal, commit_tid);
- else if (journal->j_flags & JBD2_BARRIER)
+ if (jbd2_log_start_commit(journal, commit_tid)) {
+ /*
+ * When the journal is on a different device than the
+ * fs data disk, we need to issue the barrier in
+ * writeback mode. (In ordered mode, the jbd2 layer
+ * will take care of issuing the barrier. In
+ * data=journal, all of the data blocks are written to
+ * the journal device.)
+ */
+ if (ext4_should_writeback_data(inode) &&
+ (journal->j_fs_dev != journal->j_dev) &&
+ (journal->j_flags & JBD2_BARRIER))
+ blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+ ret = jbd2_log_wait_commit(journal, commit_tid);
+ } else if (journal->j_flags & JBD2_BARRIER)
blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
return ret;
}
file:f3624ead4f6c5136189e759d0bba8d0c9c2ca356 -> file:55a93f5bb0031a9c3836601fbe51374033310bd5
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -244,57 +244,50 @@ void ext4_free_inode(handle_t *handle, s
if (fatal)
goto error_return;
- /* Ok, now we can actually update the inode bitmaps.. */
- cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
- bit, bitmap_bh->b_data);
- if (!cleared)
- ext4_error(sb, "ext4_free_inode",
- "bit already cleared for inode %lu", ino);
- else {
- gdp = ext4_get_group_desc(sb, block_group, &bh2);
-
+ fatal = -ESRCH;
+ gdp = ext4_get_group_desc(sb, block_group, &bh2);
+ if (gdp) {
BUFFER_TRACE(bh2, "get_write_access");
fatal = ext4_journal_get_write_access(handle, bh2);
- if (fatal) goto error_return;
+ }
+ ext4_lock_group(sb, block_group);
+ cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
+ if (fatal || !cleared) {
+ ext4_unlock_group(sb, block_group);
+ goto out;
+ }
- if (gdp) {
- ext4_lock_group(sb, block_group);
- count = ext4_free_inodes_count(sb, gdp) + 1;
- ext4_free_inodes_set(sb, gdp, count);
- if (is_directory) {
- count = ext4_used_dirs_count(sb, gdp) - 1;
- ext4_used_dirs_set(sb, gdp, count);
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t f;
-
- f = ext4_flex_group(sbi, block_group);
- atomic_dec(&sbi->s_flex_groups[f].free_inodes);
- }
+ count = ext4_free_inodes_count(sb, gdp) + 1;
+ ext4_free_inodes_set(sb, gdp, count);
+ if (is_directory) {
+ count = ext4_used_dirs_count(sb, gdp) - 1;
+ ext4_used_dirs_set(sb, gdp, count);
+ percpu_counter_dec(&sbi->s_dirs_counter);
+ }
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+ ext4_unlock_group(sb, block_group);
- }
- gdp->bg_checksum = ext4_group_desc_csum(sbi,
- block_group, gdp);
- ext4_unlock_group(sb, block_group);
- percpu_counter_inc(&sbi->s_freeinodes_counter);
- if (is_directory)
- percpu_counter_dec(&sbi->s_dirs_counter);
+ percpu_counter_inc(&sbi->s_freeinodes_counter);
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t f = ext4_flex_group(sbi, block_group);
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t f;
+ atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+ if (is_directory)
+ atomic_dec(&sbi->s_flex_groups[f].used_dirs);
+ }
+ BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+ fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
+out:
+ if (cleared) {
+ BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+ if (!fatal)
+ fatal = err;
+ sb->s_dirt = 1;
+ } else
+ ext4_error(sb, "ext4_free_inode",
+ "bit already cleared for inode %lu", ino);
- f = ext4_flex_group(sbi, block_group);
- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
- }
- }
- BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, NULL, bh2);
- if (!fatal) fatal = err;
- }
- BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
- if (!fatal)
- fatal = err;
- sb->s_dirt = 1;
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, fatal);
@@ -504,7 +497,7 @@ static int find_group_orlov(struct super
if (S_ISDIR(mode) &&
((parent == sb->s_root->d_inode) ||
- (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
+ (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
int best_ndir = inodes_per_group;
int ret = -1;
@@ -779,7 +772,7 @@ static int ext4_claim_inode(struct super
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group);
- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+ atomic_inc(&sbi->s_flex_groups[f].used_dirs);
}
}
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
@@ -904,7 +897,7 @@ repeat_in_this_group:
BUFFER_TRACE(inode_bitmap_bh,
"call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle,
- inode,
+ NULL,
inode_bitmap_bh);
if (err)
goto fail;
@@ -1029,7 +1022,8 @@ got:
inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
- ei->i_state = EXT4_STATE_NEW;
+ ei->i_state_flags = 0;
+ ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
@@ -1050,7 +1044,7 @@ got:
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
/* set extent flag only for directory, file and normal symlink*/
if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
- EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
+ ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
ext4_ext_tree_init(handle, inode);
}
}
file:e233879ebbcb0595651ced91b3ecafc8fc492685 -> file:99596fc88821e8de7a48b1b8dbea17a55fb79dea
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -957,7 +957,7 @@ static int ext4_ind_get_blocks(handle_t
int count = 0;
ext4_fsblk_t first_block = 0;
- J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
+ J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, iblock, offsets,
&blocks_to_boundary);
@@ -1051,81 +1051,115 @@ qsize_t *ext4_get_reserved_space(struct
return &EXT4_I(inode)->i_reserved_quota;
}
#endif
+
/*
* Calculate the number of metadata blocks need to reserve
- * to allocate @blocks for non extent file based file
+ * to allocate a new block at @lblocks for non extent file based file
*/
-static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
+static int ext4_indirect_calc_metadata_amount(struct inode *inode,
+ sector_t lblock)
{
- int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- int ind_blks, dind_blks, tind_blks;
-
- /* number of new indirect blocks needed */
- ind_blks = (blocks + icap - 1) / icap;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
+ int blk_bits;
- dind_blks = (ind_blks + icap - 1) / icap;
+ if (lblock < EXT4_NDIR_BLOCKS)
+ return 0;
- tind_blks = 1;
+ lblock -= EXT4_NDIR_BLOCKS;
- return ind_blks + dind_blks + tind_blks;
+ if (ei->i_da_metadata_calc_len &&
+ (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
+ ei->i_da_metadata_calc_len++;
+ return 0;
+ }
+ ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
+ ei->i_da_metadata_calc_len = 1;
+ blk_bits = order_base_2(lblock);
+ return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
/*
* Calculate the number of metadata blocks need to reserve
- * to allocate given number of blocks
+ * to allocate a block located at @lblock
*/
-static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
+static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
{
- if (!blocks)
- return 0;
-
- if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
- return ext4_ext_calc_metadata_amount(inode, blocks);
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ return ext4_ext_calc_metadata_amount(inode, lblock);
- return ext4_indirect_calc_metadata_amount(inode, blocks);
+ return ext4_indirect_calc_metadata_amount(inode, lblock);
}
-static void ext4_da_update_reserve_space(struct inode *inode, int used)
+/*
+ * Called with i_data_sem down, which is important since we can call
+ * ext4_discard_preallocations() from here.
+ */
+void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- int total, mdb, mdb_free;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int mdb_free = 0, allocated_meta_blocks = 0;
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- /* recalculate the number of metablocks still need to be reserved */
- total = EXT4_I(inode)->i_reserved_data_blocks - used;
- mdb = ext4_calc_metadata_amount(inode, total);
-
- /* figure out how many metablocks to release */
- BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
- mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
-
- if (mdb_free) {
- /* Account for allocated meta_blocks */
- mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
+ spin_lock(&ei->i_block_reservation_lock);
+ if (unlikely(used > ei->i_reserved_data_blocks)) {
+ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
+ "with only %d reserved data blocks\n",
+ __func__, inode->i_ino, used,
+ ei->i_reserved_data_blocks);
+ WARN_ON(1);
+ used = ei->i_reserved_data_blocks;
+ }
+
+ /* Update per-inode reservations */
+ ei->i_reserved_data_blocks -= used;
+ used += ei->i_allocated_meta_blocks;
+ ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
+ allocated_meta_blocks = ei->i_allocated_meta_blocks;
+ ei->i_allocated_meta_blocks = 0;
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
- /* update fs dirty blocks counter */
+ if (ei->i_reserved_data_blocks == 0) {
+ /*
+ * We can release all of the reserved metadata blocks
+ * only when we have written all of the delayed
+ * allocation blocks.
+ */
+ mdb_free = ei->i_reserved_meta_blocks;
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
- EXT4_I(inode)->i_allocated_meta_blocks = 0;
- EXT4_I(inode)->i_reserved_meta_blocks = mdb;
}
-
- /* update per-inode reservations */
- BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
- EXT4_I(inode)->i_reserved_data_blocks -= used;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
- /*
- * free those over-booking quota for metadata blocks
- */
- if (mdb_free)
- vfs_dq_release_reservation_block(inode, mdb_free);
+ /* Update quota subsystem */
+ if (quota_claim) {
+ vfs_dq_claim_block(inode, used);
+ if (mdb_free)
+ vfs_dq_release_reservation_block(inode, mdb_free);
+ } else {
+ /*
+ * We did fallocate with an offset that is already delayed
+ * allocated. So on delayed allocated writeback we should
+ * not update the quota for allocated blocks. But then
+ * converting an fallocate region to initialized region would
+ * have caused a metadata allocation. So claim quota for
+ * that
+ */
+ if (allocated_meta_blocks)
+ vfs_dq_claim_block(inode, allocated_meta_blocks);
+ vfs_dq_release_reservation_block(inode, mdb_free + used -
+ allocated_meta_blocks);
+ }
/*
* If we have done all the pending block allocations and if
* there aren't any writers on the inode, we can discard the
* inode's preallocations.
*/
- if (!total && (atomic_read(&inode->i_writecount) == 0))
+ if ((ei->i_reserved_data_blocks == 0) &&
+ (atomic_read(&inode->i_writecount) == 0))
ext4_discard_preallocations(inode);
}
@@ -1240,7 +1274,7 @@ int ext4_get_blocks(handle_t *handle, st
* file system block.
*/
down_read((&EXT4_I(inode)->i_data_sem));
- if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
bh, 0);
} else {
@@ -1302,7 +1336,7 @@ int ext4_get_blocks(handle_t *handle, st
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
- if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
bh, flags);
} else {
@@ -1315,20 +1349,22 @@ int ext4_get_blocks(handle_t *handle, st
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
- EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
- }
+ /*
+ * Update reserved blocks/metadata blocks after successful
+ * block allocation which had been deferred till now. We don't
+ * support fallocate for non extent files. So we can update
+ * reserve space here.
+ */
+ if ((retval > 0) &&
+ (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
+ ext4_da_update_reserve_space(inode, retval, 1);
+ }
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
EXT4_I(inode)->i_delalloc_reserved_flag = 0;
- /*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now.
- */
- if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
- ext4_da_update_reserve_space(inode, retval);
-
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && buffer_mapped(bh)) {
int ret = check_block_validity(inode, "file system "
@@ -1800,7 +1836,7 @@ static int ext4_journalled_write_end(str
new_i_size = pos + copied;
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
- EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
if (new_i_size > EXT4_I(inode)->i_disksize) {
ext4_update_i_disksize(inode, new_i_size);
ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -1834,11 +1870,15 @@ static int ext4_journalled_write_end(str
return ret ? ret : copied;
}
-static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
+/*
+ * Reserve a single block located at lblock
+ */
+static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
{
int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned long md_needed, mdblocks, total = 0;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long md_needed, md_reserved;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1846,35 +1886,31 @@ static int ext4_da_reserve_space(struct
* worse case is one extent per block
*/
repeat:
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
- mdblocks = ext4_calc_metadata_amount(inode, total);
- BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
-
- md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
- total = md_needed + nrblocks;
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ spin_lock(&ei->i_block_reservation_lock);
+ md_reserved = ei->i_reserved_meta_blocks;
+ md_needed = ext4_calc_metadata_amount(inode, lblock);
+ spin_unlock(&ei->i_block_reservation_lock);
/*
* Make quota reservation here to prevent quota overflow
* later. Real quota accounting is done at pages writeout
* time.
*/
- if (vfs_dq_reserve_block(inode, total))
+ if (vfs_dq_reserve_block(inode, md_needed + 1))
return -EDQUOT;
- if (ext4_claim_free_blocks(sbi, total)) {
- vfs_dq_release_reservation_block(inode, total);
+ if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
+ vfs_dq_release_reservation_block(inode, md_needed + 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
return -ENOSPC;
}
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
- EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ spin_lock(&ei->i_block_reservation_lock);
+ ei->i_reserved_data_blocks++;
+ ei->i_reserved_meta_blocks += md_needed;
+ spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
}
@@ -1882,49 +1918,46 @@ repeat:
static void ext4_da_release_space(struct inode *inode, int to_free)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- int total, mdb, mdb_free, release;
+ struct ext4_inode_info *ei = EXT4_I(inode);
if (!to_free)
return; /* Nothing to release, exit */
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- if (!EXT4_I(inode)->i_reserved_data_blocks) {
+ if (unlikely(to_free > ei->i_reserved_data_blocks)) {
/*
- * if there is no reserved blocks, but we try to free some
- * then the counter is messed up somewhere.
- * but since this function is called from invalidate
- * page, it's harmless to return without any action
- */
- printk(KERN_INFO "ext4 delalloc try to release %d reserved "
- "blocks for inode %lu, but there is no reserved "
- "data blocks\n", to_free, inode->i_ino);
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
- return;
+ * if there aren't enough reserved blocks, then the
+ * counter is messed up somewhere. Since this
+ * function is called from invalidate page, it's
+ * harmless to return without any action.
+ */
+ ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
+ "ino %lu, to_free %d with only %d reserved "
+ "data blocks\n", inode->i_ino, to_free,
+ ei->i_reserved_data_blocks);
+ WARN_ON(1);
+ to_free = ei->i_reserved_data_blocks;
}
+ ei->i_reserved_data_blocks -= to_free;
- /* recalculate the number of metablocks still need to be reserved */
- total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
- mdb = ext4_calc_metadata_amount(inode, total);
-
- /* figure out how many metablocks to release */
- BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
- mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
-
- release = to_free + mdb_free;
-
- /* update fs dirty blocks counter for truncate case */
- percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
+ if (ei->i_reserved_data_blocks == 0) {
+ /*
+ * We can release all of the reserved metadata blocks
+ * only when we have written all of the delayed
+ * allocation blocks.
+ */
+ to_free += ei->i_reserved_meta_blocks;
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
+ }
- /* update per-inode reservations */
- BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
- EXT4_I(inode)->i_reserved_data_blocks -= to_free;
+ /* update fs dirty blocks counter */
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
- BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
- EXT4_I(inode)->i_reserved_meta_blocks = mdb;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
- vfs_dq_release_reservation_block(inode, release);
+ vfs_dq_release_reservation_block(inode, to_free);
}
static void ext4_da_page_release_reservation(struct page *page,
@@ -2229,10 +2262,10 @@ static int mpage_da_map_blocks(struct mp
* variables are updated after the blocks have been allocated.
*/
new.b_state = 0;
- get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
- EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+ get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
if (mpd->b_state & (1 << BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
+ get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
&new, get_blocks_flags);
if (blks < 0) {
@@ -2261,7 +2294,7 @@ static int mpage_da_map_blocks(struct mp
ext4_msg(mpd->inode->i_sb, KERN_CRIT,
"delayed block allocation failed for inode %lu at "
"logical offset %llu with max blocks %zd with "
- "error %d\n", mpd->inode->i_ino,
+ "error %d", mpd->inode->i_ino,
(unsigned long long) next,
mpd->b_size >> mpd->inode->i_blkbits, err);
printk(KERN_CRIT "This should not happen!! "
@@ -2328,8 +2361,17 @@ static void mpage_add_bh_to_extent(struc
sector_t next;
int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
+ /*
+ * XXX Don't go larger than mballoc is willing to allocate
+ * This is a stopgap solution. We eventually need to fold
+ * mpage_da_submit_io() into this function and then call
+ * ext4_get_blocks() multiple times in a loop
+ */
+ if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
+ goto flush_it;
+
/* check if thereserved journal credits might overflow */
- if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
+ if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
if (nrblocks >= EXT4_MAX_TRANS_DATA) {
/*
* With non-extent format we are limited by the journal
@@ -2530,7 +2572,7 @@ static int ext4_da_get_block_prep(struct
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
- ret = ext4_da_reserve_space(inode, 1);
+ ret = ext4_da_reserve_space(inode, iblock);
if (ret)
/* not enough space to reserve */
return ret;
@@ -2641,7 +2683,7 @@ static int __ext4_journalled_writepage(s
ret = err;
walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
- EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
return ret;
}
@@ -2794,7 +2836,7 @@ static int ext4_da_writepages_trans_bloc
* number of contiguous block. So we will limit
* number of contiguous block to a sane value
*/
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
(max_blocks > EXT4_MAX_TRANS_DATA))
max_blocks = EXT4_MAX_TRANS_DATA;
@@ -2914,7 +2956,7 @@ retry:
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
- "%ld pages, ino %lu; err %d\n", __func__,
+ "%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
goto out_writepages;
}
@@ -2989,7 +3031,7 @@ retry:
if (pages_skipped != wbc->pages_skipped)
ext4_msg(inode->i_sb, KERN_CRIT,
"This should not happen leaving %s "
- "with nr_to_write = %ld ret = %d\n",
+ "with nr_to_write = %ld ret = %d",
__func__, wbc->nr_to_write, ret);
/* Update index */
@@ -3005,8 +3047,7 @@ retry:
out_writepages:
if (!no_nrwrite_index_update)
wbc->no_nrwrite_index_update = 0;
- if (wbc->nr_to_write > nr_to_writebump)
- wbc->nr_to_write -= nr_to_writebump;
+ wbc->nr_to_write -= nr_to_writebump;
wbc->range_start = range_start;
trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
return ret;
@@ -3031,11 +3072,18 @@ static int ext4_nonda_switch(struct supe
if (2 * free_blocks < 3 * dirty_blocks ||
free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
/*
- * free block count is less that 150% of dirty blocks
- * or free blocks is less that watermark
+ * free block count is less than 150% of dirty blocks
+ * or free blocks is less than watermark
*/
return 1;
}
+ /*
+ * Even if we don't switch but are nearing capacity,
+ * start pushing delalloc when 1/2 of free blocks are dirty.
+ */
+ if (free_blocks < 2 * dirty_blocks)
+ writeback_inodes_sb_if_idle(sb);
+
return 0;
}
@@ -3043,7 +3091,7 @@ static int ext4_da_write_begin(struct fi
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- int ret, retries = 0;
+ int ret, retries = 0, quota_retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
@@ -3102,6 +3150,22 @@ retry:
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
+
+ if ((ret == -EDQUOT) &&
+ EXT4_I(inode)->i_reserved_meta_blocks &&
+ (quota_retries++ < 3)) {
+ /*
+ * Since we often over-estimate the number of meta
+ * data blocks required, we may sometimes get a
+ * spurios out of quota error even though there would
+ * be enough space once we write the data blocks and
+ * find out how many meta data blocks were _really_
+ * required. So try forcing the inode write to see if
+ * that helps.
+ */
+ write_inode_now(inode, (quota_retries == 3));
+ goto retry;
+ }
out:
return ret;
}
@@ -3290,7 +3354,8 @@ static sector_t ext4_bmap(struct address
filemap_write_and_wait(mapping);
}
- if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
+ if (EXT4_JOURNAL(inode) &&
+ ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
@@ -3309,7 +3374,7 @@ static sector_t ext4_bmap(struct address
* everything they get.
*/
- EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
+ ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
journal = EXT4_JOURNAL(inode);
jbd2_journal_lock_updates(journal);
err = jbd2_journal_flush(journal);
@@ -3425,6 +3490,9 @@ retry:
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+
goto out;
}
if (inode->i_nlink)
@@ -3540,7 +3608,7 @@ static int ext4_end_aio_dio_nolock(ext4_
{
struct inode *inode = io->inode;
loff_t offset = io->offset;
- size_t size = io->size;
+ ssize_t size = io->size;
int ret = 0;
ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
@@ -3777,8 +3845,8 @@ static ssize_t ext4_ext_direct_IO(int rw
if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
ext4_free_io_end(iocb->private);
iocb->private = NULL;
- } else if (ret > 0 && (EXT4_I(inode)->i_state &
- EXT4_STATE_DIO_UNWRITTEN)) {
+ } else if (ret > 0 && ext4_test_inode_state(inode,
+ EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
* for non AIO case, since the IO is already
@@ -3788,7 +3856,7 @@ static ssize_t ext4_ext_direct_IO(int rw
offset, ret);
if (err < 0)
ret = err;
- EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
+ ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
}
return ret;
}
@@ -3804,7 +3872,7 @@ static ssize_t ext4_direct_IO(int rw, st
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
@@ -4435,10 +4503,12 @@ void ext4_truncate(struct inode *inode)
if (!ext4_can_truncate(inode))
return;
+ ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
+
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
- ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
+ ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
- if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ext4_ext_truncate(inode);
return;
}
@@ -4722,7 +4792,7 @@ int ext4_get_inode_loc(struct inode *ino
{
/* We have all inode data except xattrs in memory here. */
return __ext4_get_inode_loc(inode, iloc,
- !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
+ !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
}
void ext4_set_inode_flags(struct inode *inode)
@@ -4816,7 +4886,7 @@ struct inode *ext4_iget(struct super_blo
}
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
- ei->i_state = 0;
+ ei->i_state_flags = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
@@ -4899,7 +4969,7 @@ struct inode *ext4_iget(struct super_blo
EXT4_GOOD_OLD_INODE_SIZE +
ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
- ei->i_state |= EXT4_STATE_XATTR;
+ ext4_set_inode_state(inode, EXT4_STATE_XATTR);
}
} else
ei->i_extra_isize = 0;
@@ -5039,7 +5109,7 @@ static int ext4_do_update_inode(handle_t
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
- if (ei->i_state & EXT4_STATE_NEW)
+ if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ext4_get_inode_flags(ei);
@@ -5103,7 +5173,7 @@ static int ext4_do_update_inode(handle_t
EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
sb->s_dirt = 1;
ext4_handle_sync(handle);
- err = ext4_handle_dirty_metadata(handle, inode,
+ err = ext4_handle_dirty_metadata(handle, NULL,
EXT4_SB(sb)->s_sbh);
}
}
@@ -5132,10 +5202,10 @@ static int ext4_do_update_inode(handle_t
}
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- rc = ext4_handle_dirty_metadata(handle, inode, bh);
+ rc = ext4_handle_dirty_metadata(handle, NULL, bh);
if (!err)
err = rc;
- ei->i_state &= ~EXT4_STATE_NEW;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
ext4_update_inode_fsync_trans(handle, inode, 0);
out_brelse:
@@ -5200,7 +5270,7 @@ int ext4_write_inode(struct inode *inode
} else {
struct ext4_iloc iloc;
- err = ext4_get_inode_loc(inode, &iloc);
+ err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
if (wait)
@@ -5213,6 +5283,7 @@ int ext4_write_inode(struct inode *inode
(unsigned long long)iloc.bh->b_blocknr);
err = -EIO;
}
+ brelse(iloc.bh);
}
return err;
}
@@ -5279,7 +5350,7 @@ int ext4_setattr(struct dentry *dentry,
}
if (attr->ia_valid & ATTR_SIZE) {
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (attr->ia_size > sbi->s_bitmap_maxbytes) {
@@ -5290,7 +5361,9 @@ int ext4_setattr(struct dentry *dentry,
}
if (S_ISREG(inode->i_mode) &&
- attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
+ attr->ia_valid & ATTR_SIZE &&
+ (attr->ia_size < inode->i_size ||
+ (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
handle_t *handle;
handle = ext4_journal_start(inode, 3);
@@ -5321,6 +5394,9 @@ int ext4_setattr(struct dentry *dentry,
goto err_out;
}
}
+ /* ext4_truncate will clear the flag */
+ if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
+ ext4_truncate(inode);
}
rc = inode_setattr(inode, attr);
@@ -5395,7 +5471,7 @@ static int ext4_indirect_trans_blocks(st
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
@@ -5559,8 +5635,8 @@ static int ext4_expand_extra_isize(struc
entry = IFIRST(header);
/* No extended attributes present */
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
- header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
new_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
@@ -5604,7 +5680,7 @@ int ext4_mark_inode_dirty(handle_t *hand
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
- !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
+ !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
/*
* We need extra buffer credits since we may write into EA block
* with this same handle. If journal_extend fails, then it will
@@ -5618,7 +5694,8 @@ int ext4_mark_inode_dirty(handle_t *hand
sbi->s_want_extra_isize,
iloc, handle);
if (ret) {
- EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+ ext4_set_inode_state(inode,
+ EXT4_STATE_NO_EXPAND);
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb, __func__,
@@ -5685,7 +5762,7 @@ static int ext4_pin_inode(handle_t *hand
err = jbd2_journal_get_write_access(handle, iloc.bh);
if (!err)
err = ext4_handle_dirty_metadata(handle,
- inode,
+ NULL,
iloc.bh);
brelse(iloc.bh);
}
@@ -5729,9 +5806,9 @@ int ext4_change_inode_journal_flag(struc
*/
if (val)
- EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
+ ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
else
- EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
file:b63d193126dbf759236a52f075af914bc921d40a -> file:bf5ae883b1bdc30ff55cbc616bab7c24f9d57dbb
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsig
flags &= ~EXT4_EXTENTS_FL;
}
+ if (flags & EXT4_EOFBLOCKS_FL) {
+ /* we don't support adding EOFBLOCKS flag */
+ if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
+ err = -EOPNOTSUPP;
+ goto flags_out;
+ }
+ } else if (oldflags & EXT4_EOFBLOCKS_FL)
+ ext4_truncate(inode);
+
handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
@@ -249,7 +258,8 @@ setversion_out:
if (me.moved_len > 0)
file_remove_suid(donor_filp);
- if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
+ if (copy_to_user((struct move_extent __user *)arg,
+ &me, sizeof(me)))
err = -EFAULT;
mext_out:
fput(donor_filp);
@@ -363,7 +373,30 @@ long ext4_compat_ioctl(struct file *file
case EXT4_IOC32_SETRSVSZ:
cmd = EXT4_IOC_SETRSVSZ;
break;
- case EXT4_IOC_GROUP_ADD:
+ case EXT4_IOC32_GROUP_ADD: {
+ struct compat_ext4_new_group_input __user *uinput;
+ struct ext4_new_group_input input;
+ mm_segment_t old_fs;
+ int err;
+
+ uinput = compat_ptr(arg);
+ err = get_user(input.group, &uinput->group);
+ err |= get_user(input.block_bitmap, &uinput->block_bitmap);
+ err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
+ err |= get_user(input.inode_table, &uinput->inode_table);
+ err |= get_user(input.blocks_count, &uinput->blocks_count);
+ err |= get_user(input.reserved_blocks,
+ &uinput->reserved_blocks);
+ if (err)
+ return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD,
+ (unsigned long) &input);
+ set_fs(old_fs);
+ return err;
+ }
+ case EXT4_IOC_MOVE_EXT:
break;
default:
return -ENOIOCTLCMD;
file:7d7114818f8dfe7ec709cd1ac124eccc34ffbc6b -> file:04e07e227278d0bc13b22fda9e5e9a04107e5b7b
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -658,6 +658,27 @@ static void ext4_mb_mark_free_simple(str
}
}
+/*
+ * Cache the order of the largest free extent we have available in this block
+ * group.
+ */
+static void
+mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+{
+ int i;
+ int bits;
+
+ grp->bb_largest_free_order = -1; /* uninit */
+
+ bits = sb->s_blocksize_bits + 1;
+ for (i = bits; i >= 0; i--) {
+ if (grp->bb_counters[i] > 0) {
+ grp->bb_largest_free_order = i;
+ break;
+ }
+ }
+}
+
static noinline_for_stack
void ext4_mb_generate_buddy(struct super_block *sb,
void *buddy, void *bitmap, ext4_group_t group)
@@ -700,6 +721,7 @@ void ext4_mb_generate_buddy(struct super
*/
grp->bb_free = free;
}
+ mb_set_largest_free_order(sb, grp);
clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
@@ -725,6 +747,9 @@ void ext4_mb_generate_buddy(struct super
* contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
* So it can have information regarding groups_per_page which
* is blocks_per_page/2
+ *
+ * Locking note: This routine takes the block group lock of all groups
+ * for this page; do not hold this lock when calling this routine!
*/
static int ext4_mb_init_cache(struct page *page, char *incore)
@@ -910,6 +935,11 @@ out:
return err;
}
+/*
+ * Locking note: This routine calls ext4_mb_init_cache(), which takes the
+ * block group lock of all groups for this page; do not hold the BG lock when
+ * calling this routine!
+ */
static noinline_for_stack
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
{
@@ -1004,6 +1034,11 @@ err:
return ret;
}
+/*
+ * Locking note: This routine calls ext4_mb_init_cache(), which takes the
+ * block group lock of all groups for this page; do not hold the BG lock when
+ * calling this routine!
+ */
static noinline_for_stack int
ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
struct ext4_buddy *e4b)
@@ -1150,7 +1185,7 @@ err:
return ret;
}
-static void ext4_mb_release_desc(struct ext4_buddy *e4b)
+static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page)
page_cache_release(e4b->bd_bitmap_page);
@@ -1300,6 +1335,7 @@ static void mb_free_blocks(struct inode
buddy = buddy2;
} while (1);
}
+ mb_set_largest_free_order(sb, e4b->bd_info);
mb_check_buddy(e4b);
}
@@ -1428,6 +1464,7 @@ static int mb_mark_used(struct ext4_budd
e4b->bd_info->bb_counters[ord]++;
e4b->bd_info->bb_counters[ord]++;
}
+ mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
mb_check_buddy(e4b);
@@ -1618,7 +1655,7 @@ int ext4_mb_try_best_found(struct ext4_a
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_release_desc(e4b);
+ ext4_mb_unload_buddy(e4b);
return 0;
}
@@ -1674,7 +1711,7 @@ int ext4_mb_find_by_goal(struct ext4_all
ext4_mb_use_best_found(ac, e4b);
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_release_desc(e4b);
+ ext4_mb_unload_buddy(e4b);
return 0;
}
@@ -1823,16 +1860,22 @@ void ext4_mb_scan_aligned(struct ext4_al
}
}
+/* This is now called BEFORE we load the buddy bitmap. */
static int ext4_mb_good_group(struct ext4_allocation_context *ac,
ext4_group_t group, int cr)
{
unsigned free, fragments;
- unsigned i, bits;
int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
BUG_ON(cr < 0 || cr >= 4);
- BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
+
+ /* We only do this if the grp has never been initialized */
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ int ret = ext4_mb_init_group(ac->ac_sb, group);
+ if (ret)
+ return 0;
+ }
free = grp->bb_free;
fragments = grp->bb_fragments;
@@ -1845,17 +1888,16 @@ static int ext4_mb_good_group(struct ext
case 0:
BUG_ON(ac->ac_2order == 0);
+ if (grp->bb_largest_free_order < ac->ac_2order)
+ return 0;
+
/* Avoid using the first bg of a flexgroup for data files */
if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
(flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
((group % flex_size) == 0))
return 0;
- bits = ac->ac_sb->s_blocksize_bits + 1;
- for (i = ac->ac_2order; i <= bits; i++)
- if (grp->bb_counters[i] > 0)
- return 1;
- break;
+ return 1;
case 1:
if ((free / fragments) >= ac->ac_g_ex.fe_len)
return 1;
@@ -1966,7 +2008,7 @@ ext4_mb_regular_allocator(struct ext4_al
sbi = EXT4_SB(sb);
ngroups = ext4_get_groups_count(sb);
/* non-extent files are limited to low blocks/groups */
- if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
ngroups = sbi->s_blockfile_groups;
BUG_ON(ac->ac_status == AC_STATUS_FOUND);
@@ -2026,15 +2068,11 @@ repeat:
group = ac->ac_g_ex.fe_group;
for (i = 0; i < ngroups; group++, i++) {
- struct ext4_group_info *grp;
- struct ext4_group_desc *desc;
-
if (group == ngroups)
group = 0;
- /* quick check to skip empty groups */
- grp = ext4_get_group_info(sb, group);
- if (grp->bb_free == 0)
+ /* This now checks without needing the buddy page */
+ if (!ext4_mb_good_group(ac, group, cr))
continue;
err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -2042,15 +2080,18 @@ repeat:
goto out;
ext4_lock_group(sb, group);
+
+ /*
+ * We need to check again after locking the
+ * block group
+ */
if (!ext4_mb_good_group(ac, group, cr)) {
- /* someone did allocation from this group */
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
continue;
}
ac->ac_groups_scanned++;
- desc = ext4_get_group_desc(sb, group, NULL);
if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 &&
@@ -2060,7 +2101,7 @@ repeat:
ext4_mb_complex_scan_group(ac, &e4b);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
if (ac->ac_status != AC_STATUS_CONTINUE)
break;
@@ -2150,7 +2191,7 @@ static int ext4_mb_seq_groups_show(struc
ext4_lock_group(sb, group);
memcpy(&sg, ext4_get_group_info(sb, group), i);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
sg.info.bb_fragments, sg.info.bb_first_free);
@@ -2257,6 +2298,7 @@ int ext4_mb_add_groupinfo(struct super_b
INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
init_rwsem(&meta_group_info[i]->alloc_sem);
meta_group_info[i]->bb_free_root.rb_node = NULL;
+ meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
#ifdef DOUBLE_CHECK
{
@@ -2537,6 +2579,23 @@ static void release_blocks_on_commit(jou
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
+ if (test_opt(sb, DISCARD)) {
+ int ret;
+ ext4_fsblk_t discard_block;
+
+ discard_block = entry->start_blk +
+ ext4_group_first_block_no(sb, entry->group);
+ trace_ext4_discard_blocks(sb,
+ (unsigned long long)discard_block,
+ entry->count);
+ ret = sb_issue_discard(sb, discard_block, entry->count);
+ if (ret == EOPNOTSUPP) {
+ ext4_warning(sb, __func__,
+ "discard not supported, disabling");
+ clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
+ }
+ }
+
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
@@ -2558,21 +2617,8 @@ static void release_blocks_on_commit(jou
page_cache_release(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->group);
- if (test_opt(sb, DISCARD)) {
- ext4_fsblk_t discard_block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- discard_block = (ext4_fsblk_t)entry->group *
- EXT4_BLOCKS_PER_GROUP(sb)
- + entry->start_blk
- + le32_to_cpu(es->s_first_data_block);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
- }
kmem_cache_free(ext4_free_ext_cachep, entry);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
}
mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
@@ -2755,12 +2801,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
- else {
- percpu_counter_sub(&sbi->s_dirtyblocks_counter,
- ac->ac_b_ex.fe_len);
- /* convert reserved quota blocks to real quota blocks */
- vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
- }
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -3136,7 +3176,7 @@ ext4_mb_use_preallocated(struct ext4_all
continue;
/* non-extent files can't have physical blocks past 2^32 */
- if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) &&
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
continue;
@@ -3715,7 +3755,7 @@ out:
ext4_unlock_group(sb, group);
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
return free;
}
@@ -3819,7 +3859,7 @@ repeat:
if (bitmap_bh == NULL) {
ext4_error(sb, __func__, "Error in reading block "
"bitmap for %u", group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
continue;
}
@@ -3828,7 +3868,7 @@ repeat:
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
list_del(&pa->u.pa_tmp_list);
@@ -3944,7 +3984,7 @@ static void ext4_mb_group_or_file(struct
/* don't use group allocation for large files */
size = max(size, isize);
- if (size >= sbi->s_mb_stream_request) {
+ if (size > sbi->s_mb_stream_request) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return;
}
@@ -4092,7 +4132,7 @@ ext4_mb_discard_lg_preallocations(struct
ext4_mb_release_group_pa(&e4b, pa, ac);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
@@ -4594,7 +4634,7 @@ do_more:
atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
}
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
*freed += count;
file:864614974536f094e200d4dea099391548b07913 -> file:7901f133f967cad48e99718d6afcc71665050bc6
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -357,12 +357,12 @@ static int ext4_ext_swap_inode_data(hand
* happened after we started the migrate. We need to
* fail the migrate
*/
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) {
+ if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
retval = -EAGAIN;
up_write(&EXT4_I(inode)->i_data_sem);
goto err_out;
} else
- EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
/*
* We have the extent map build with the tmp inode.
* Now copy the i_data across
@@ -465,7 +465,7 @@ int ext4_ext_migrate(struct inode *inode
*/
if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_INCOMPAT_EXTENTS) ||
- (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EINVAL;
if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
@@ -494,14 +494,10 @@ int ext4_ext_migrate(struct inode *inode
}
i_size_write(tmp_inode, i_size_read(inode));
/*
- * We don't want the inode to be reclaimed
- * if we got interrupted in between. We have
- * this tmp inode carrying reference to the
- * data blocks of the original file. We set
- * the i_nlink to zero at the last stage after
- * switching the original file to extent format
+ * Set the i_nlink to zero so it will be deleted later
+ * when we drop inode reference.
*/
- tmp_inode->i_nlink = 1;
+ tmp_inode->i_nlink = 0;
ext4_ext_tree_init(handle, tmp_inode);
ext4_orphan_add(handle, tmp_inode);
@@ -524,10 +520,20 @@ int ext4_ext_migrate(struct inode *inode
* allocation.
*/
down_read((&EXT4_I(inode)->i_data_sem));
- EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE;
+ ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
up_read((&EXT4_I(inode)->i_data_sem));
handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle)) {
+ /*
+ * It is impossible to update on-disk structures without
+ * a handle, so just rollback in-core changes and live other
+ * work to orphan_list_cleanup()
+ */
+ ext4_orphan_del(NULL, tmp_inode);
+ retval = PTR_ERR(handle);
+ goto out;
+ }
ei = EXT4_I(inode);
i_data = ei->i_data;
@@ -609,15 +615,8 @@ err_out:
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
-
- /*
- * Set the i_nlink to zero so that
- * generic_drop_inode really deletes the
- * inode
- */
- tmp_inode->i_nlink = 0;
-
ext4_journal_stop(handle);
+out:
unlock_new_inode(tmp_inode);
iput(tmp_inode);
file:f5b03a132a2ac1ff195a9c46d8e1427a8f0eac79 -> file:5b14d11438b7a5747c626453df5519f3fd70a593
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *hand
}
o_start->ee_len = start_ext->ee_len;
+ eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1;
} else if (start_ext->ee_len && new_ext->ee_len &&
@@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *hand
* orig |------------------------------|
*/
o_start->ee_len = start_ext->ee_len;
+ eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1;
} else if (!start_ext->ee_len && new_ext->ee_len &&
@@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct
struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
struct ext4_extent new_ext, start_ext, end_ext;
ext4_lblk_t new_ext_end;
- ext4_fsblk_t new_phys_end;
int oext_alen, new_ext_alen, end_ext_alen;
int depth = ext_depth(orig_inode);
int ret;
@@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct
new_ext.ee_len = dext->ee_len;
new_ext_alen = ext4_ext_get_actual_len(&new_ext);
new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
- new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1;
/*
* Case: original extent is first
@@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct
le32_to_cpu(oext->ee_block) + oext_alen) {
start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
le32_to_cpu(oext->ee_block));
+ start_ext.ee_block = oext->ee_block;
copy_extent_status(oext, &start_ext);
} else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
prev_ext = oext - 1;
@@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct
start_ext.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(prev_ext) +
new_ext_alen);
+ start_ext.ee_block = oext->ee_block;
copy_extent_status(prev_ext, &start_ext);
new_ext.ee_len = 0;
}
@@ -928,7 +930,7 @@ out2:
}
/**
- * mext_check_argumants - Check whether move extent can be done
+ * mext_check_arguments - Check whether move extent can be done
*
* @orig_inode: original inode
* @donor_inode: donor inode
@@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_
unsigned int blkbits = orig_inode->i_blkbits;
unsigned int blocksize = 1 << blkbits;
- /* Regular file check */
- if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
- ext4_debug("ext4 move extent: The argument files should be "
- "regular file [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
ext4_debug("ext4 move extent: suid or sgid is set"
" to donor file [ino:orig %lu, donor %lu]\n",
@@ -981,11 +975,11 @@ mext_check_arguments(struct inode *orig_
}
/* Ext4 move extent supports only extent based file */
- if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) {
+ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
"based file [ino:orig %lu]\n", orig_inode->i_ino);
return -EOPNOTSUPP;
- } else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) {
+ } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: donor file is not extents "
"based file [ino:donor %lu]\n", donor_inode->i_ino);
return -EOPNOTSUPP;
@@ -1203,6 +1197,14 @@ ext4_move_extents(struct file *o_filp, s
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
+
+ /* Regular file check */
+ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
+ ext4_debug("ext4 move extent: The argument files should be "
+ "regular file [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
/* Protect orig and donor inodes against a truncate */
ret1 = mext_inode_double_lock(orig_inode, donor_inode);
file:17a17e10dd605198ddfe2dfed530d4cfff6812c1 -> file:c3b6ad0fcc46628e978869f80d5ef343b904d0d5
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -660,7 +660,7 @@ int ext4_htree_fill_tree(struct file *di
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
dir = dir_file->f_path.dentry->d_inode;
- if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
+ if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version +=
@@ -805,7 +805,7 @@ static void ext4_update_dx_flag(struct i
{
if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX))
- EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
}
/*
@@ -1424,7 +1424,7 @@ static int make_indexed_dir(handle_t *ha
brelse(bh);
return retval;
}
- EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
+ ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
data1 = bh2->b_data;
memcpy (data1, de, len);
@@ -1497,7 +1497,7 @@ static int ext4_add_entry(handle_t *hand
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
return retval;
- EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL;
+ ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
}
@@ -1525,6 +1525,8 @@ static int ext4_add_entry(handle_t *hand
de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
brelse(bh);
+ if (retval == 0)
+ ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
return retval;
}
@@ -2020,11 +2022,18 @@ int ext4_orphan_add(handle_t *handle, st
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_unlock;
+ /*
+ * Due to previous errors inode may be already a part of on-disk
+ * orphan list. If so skip on-disk list modification.
+ */
+ if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
+ (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
+ goto mem_insert;
/* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
- err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
err = rc;
@@ -2037,6 +2046,7 @@ int ext4_orphan_add(handle_t *handle, st
*
* This is safe: on error we're going to ignore the orphan list
* anyway on the next recovery. */
+mem_insert:
if (!err)
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
@@ -2096,7 +2106,7 @@ int ext4_orphan_del(handle_t *handle, st
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
- err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
@@ -2284,7 +2294,7 @@ retry:
}
} else {
/* clear the extent format for fast symlink */
- EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
inode->i_op = &ext4_fast_symlink_inode_operations;
memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
inode->i_size = l-1;
file:3b2c5541d8a686fe7c780a512c0acd3e6771b3ab -> file:433ea27e265299389baddbe900b5bd4ce10b70b9
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -930,7 +930,8 @@ int ext4_group_add(struct super_block *s
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+ sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
atomic_add(input->free_blocks_count,
file:92943f2ca2abcb036f8cad4681d0124ca44d78a0 -> file:54a05ccf52216abdaf70e06c0b6443b5c14fe005
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -227,6 +227,7 @@ handle_t *ext4_journal_start_sb(struct s
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
+ vfs_check_frozen(sb, SB_FREEZE_WRITE);
/* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly. */
@@ -702,6 +703,7 @@ static struct inode *ext4_alloc_inode(st
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
ei->i_delalloc_reserved_flag = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
@@ -875,6 +877,8 @@ static int ext4_show_options(struct seq_
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
seq_puts(seq, ",journal_async_commit");
+ else if (test_opt(sb, JOURNAL_CHECKSUM))
+ seq_puts(seq, ",journal_checksum");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
if (test_opt(sb, I_VERSION))
@@ -2693,24 +2697,6 @@ static int ext4_fill_super(struct super_
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
- err = percpu_counter_init(&sbi->s_freeblocks_counter,
- ext4_count_free_blocks(sb));
- if (!err) {
- err = percpu_counter_init(&sbi->s_freeinodes_counter,
- ext4_count_free_inodes(sb));
- }
- if (!err) {
- err = percpu_counter_init(&sbi->s_dirs_counter,
- ext4_count_dirs(sb));
- }
- if (!err) {
- err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
- }
- if (err) {
- ext4_msg(sb, KERN_ERR, "insufficient memory");
- goto failed_mount3;
- }
-
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_max_writeback_mb_bump = 128;
@@ -2830,7 +2816,20 @@ static int ext4_fill_super(struct super_
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
-
+ err = percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext4_count_free_blocks(sb));
+ if (!err)
+ err = percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext4_count_free_inodes(sb));
+ if (!err)
+ err = percpu_counter_init(&sbi->s_dirs_counter,
+ ext4_count_dirs(sb));
+ if (!err)
+ err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "insufficient memory");
+ goto failed_mount_wq;
+ }
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
@@ -2905,7 +2904,7 @@ no_journal:
err = ext4_setup_system_zone(sb);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize system "
- "zone (%d)\n", err);
+ "zone (%d)", err);
goto failed_mount4;
}
@@ -2963,6 +2962,10 @@ failed_mount_wq:
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
+ percpu_counter_destroy(&sbi->s_freeblocks_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
+ percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount3:
if (sbi->s_flex_groups) {
if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -2970,10 +2973,6 @@ failed_mount3:
else
kfree(sbi->s_flex_groups);
}
- percpu_counter_destroy(&sbi->s_freeblocks_counter);
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
- percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -3390,8 +3389,10 @@ int ext4_force_commit(struct super_block
return 0;
journal = EXT4_SB(sb)->s_journal;
- if (journal)
+ if (journal) {
+ vfs_check_frozen(sb, SB_FREEZE_WRITE);
ret = ext4_journal_force_commit(journal);
+ }
return ret;
}
@@ -3440,18 +3441,16 @@ static int ext4_freeze(struct super_bloc
* the journal.
*/
error = jbd2_journal_flush(journal);
- if (error < 0) {
- out:
- jbd2_journal_unlock_updates(journal);
- return error;
- }
+ if (error < 0)
+ goto out;
/* Journal blocked and flushed, clear needs_recovery flag. */
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
error = ext4_commit_super(sb, 1);
- if (error)
- goto out;
- return 0;
+out:
+ /* we rely on s_frozen to stop further updates */
+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ return error;
}
/*
@@ -3468,7 +3467,6 @@ static int ext4_unfreeze(struct super_bl
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
unlock_super(sb);
- jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
return 0;
}
@@ -4001,6 +3999,7 @@ static int __init init_ext4_fs(void)
{
int err;
+ ext4_check_flag_values();
err = init_ext4_system_zone();
if (err)
return err;
file:025701926f9aed63cee73c530ff718a41bdd3946 -> file:4de7d0a75c427b385bafcbb120127ce88532895e
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -267,7 +267,7 @@ ext4_xattr_ibody_get(struct inode *inode
void *end;
int error;
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return -ENODATA;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
@@ -393,7 +393,7 @@ ext4_xattr_ibody_list(struct inode *inod
void *end;
int error;
- if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return 0;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
@@ -816,7 +816,7 @@ inserted:
EXT4_I(inode)->i_block_group);
/* non-extent files can't have physical blocks past 2^32 */
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
block = ext4_new_meta_blocks(handle, inode,
@@ -824,7 +824,7 @@ inserted:
if (error)
goto cleanup;
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
ea_idebug(inode, "creating block %d", block);
@@ -903,7 +903,7 @@ ext4_xattr_ibody_find(struct inode *inod
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
error = ext4_xattr_check_names(IFIRST(header), is->s.end);
if (error)
return error;
@@ -935,10 +935,10 @@ ext4_xattr_ibody_set(handle_t *handle, s
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
- EXT4_I(inode)->i_state |= EXT4_STATE_XATTR;
+ ext4_set_inode_state(inode, EXT4_STATE_XATTR);
} else {
header->h_magic = cpu_to_le32(0);
- EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR;
+ ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
}
return 0;
}
@@ -981,8 +981,8 @@ ext4_xattr_set_handle(handle_t *handle,
if (strlen(name) > 255)
return -ERANGE;
down_write(&EXT4_I(inode)->xattr_sem);
- no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND;
- EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+ no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_get_inode_loc(inode, &is.iloc);
if (error)
@@ -992,10 +992,10 @@ ext4_xattr_set_handle(handle_t *handle,
if (error)
goto cleanup;
- if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
+ if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
- EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
}
error = ext4_xattr_ibody_find(inode, &i, &is);
@@ -1047,7 +1047,7 @@ ext4_xattr_set_handle(handle_t *handle,
ext4_xattr_update_super_block(handle, inode->i_sb);
inode->i_ctime = ext4_current_time(inode);
if (!value)
- EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1062,7 +1062,7 @@ cleanup:
brelse(is.iloc.bh);
brelse(bs.bh);
if (no_expand == 0)
- EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem);
return error;
}
@@ -1327,6 +1327,8 @@ retry:
goto cleanup;
kfree(b_entry_name);
kfree(buffer);
+ b_entry_name = NULL;
+ buffer = NULL;
brelse(is->iloc.bh);
kfree(is);
kfree(bs);
file:3fc4e3ac7d84efdc76b4ddbae91f82f90f8c257d -> file:2168da1216475b2b9d3a9611730b77c89a3022d4
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
+#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/gfs2_ondisk.h>
@@ -26,61 +27,6 @@
#include "trans.h"
#include "util.h"
-#define ACL_ACCESS 1
-#define ACL_DEFAULT 0
-
-int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
- struct gfs2_ea_request *er, int *remove, mode_t *mode)
-{
- struct posix_acl *acl;
- int error;
-
- error = gfs2_acl_validate_remove(ip, access);
- if (error)
- return error;
-
- if (!er->er_data)
- return -EINVAL;
-
- acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- if (!acl) {
- *remove = 1;
- return 0;
- }
-
- error = posix_acl_valid(acl);
- if (error)
- goto out;
-
- if (access) {
- error = posix_acl_equiv_mode(acl, mode);
- if (!error)
- *remove = 1;
- else if (error > 0)
- error = 0;
- }
-
-out:
- posix_acl_release(acl);
- return error;
-}
-
-int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
-{
- if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
- return -EOPNOTSUPP;
- if (!is_owner_or_cap(&ip->i_inode))
- return -EPERM;
- if (S_ISLNK(ip->i_inode.i_mode))
- return -EOPNOTSUPP;
- if (!access && !S_ISDIR(ip->i_inode.i_mode))
- return -EACCES;
-
- return 0;
-}
-
static int acl_get(struct gfs2_inode *ip, const char *name,
struct posix_acl **acl, struct gfs2_ea_location *el,
char **datap, unsigned int *lenp)
@@ -277,3 +223,117 @@ out_brelse:
return error;
}
+static int gfs2_acl_type(const char *name)
+{
+ if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
+ return ACL_TYPE_ACCESS;
+ if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
+ return ACL_TYPE_DEFAULT;
+ return -EINVAL;
+}
+
+static int gfs2_xattr_system_get(struct inode *inode, const char *name,
+ void *buffer, size_t size)
+{
+ int type;
+
+ type = gfs2_acl_type(name);
+ if (type < 0)
+ return type;
+
+ return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
+}
+
+static int gfs2_set_mode(struct inode *inode, mode_t mode)
+{
+ int error = 0;
+
+ if (mode != inode->i_mode) {
+ struct iattr iattr;
+
+ iattr.ia_valid = ATTR_MODE;
+ iattr.ia_mode = mode;
+
+ error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
+ }
+
+ return error;
+}
+
+static int gfs2_xattr_system_set(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct posix_acl *acl = NULL;
+ int error = 0, type;
+
+ if (!sdp->sd_args.ar_posix_acl)
+ return -EOPNOTSUPP;
+
+ type = gfs2_acl_type(name);
+ if (type < 0)
+ return type;
+ if (flags & XATTR_CREATE)
+ return -EINVAL;
+ if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ return value ? -EACCES : 0;
+ if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
+ return -EPERM;
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ if (!value)
+ goto set_acl;
+
+ acl = posix_acl_from_xattr(value, size);
+ if (!acl) {
+ /*
+ * acl_set_file(3) may request that we set default ACLs with
+ * zero length -- defend (gracefully) against that here.
+ */
+ goto out;
+ }
+ if (IS_ERR(acl)) {
+ error = PTR_ERR(acl);
+ goto out;
+ }
+
+ error = posix_acl_valid(acl);
+ if (error)
+ goto out_release;
+
+ error = -EINVAL;
+ if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
+ goto out_release;
+
+ if (type == ACL_TYPE_ACCESS) {
+ mode_t mode = inode->i_mode;
+ error = posix_acl_equiv_mode(acl, &mode);
+
+ if (error <= 0) {
+ posix_acl_release(acl);
+ acl = NULL;
+
+ if (error < 0)
+ return error;
+ }
+
+ error = gfs2_set_mode(inode, mode);
+ if (error)
+ goto out_release;
+ }
+
+set_acl:
+ error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
+out_release:
+ posix_acl_release(acl);
+out:
+ return error;
+}
+
+struct xattr_handler gfs2_xattr_system_handler = {
+ .prefix = XATTR_SYSTEM_PREFIX,
+ .get = gfs2_xattr_system_get,
+ .set = gfs2_xattr_system_set,
+};
+
file:6751930bfb648e809eb90e028c0caa8458e1e5e8 -> file:cc954390b6daf3e633dd13d9bbb80982baf685a2
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,26 +13,12 @@
#include "incore.h"
#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
-#define GFS2_POSIX_ACL_ACCESS_LEN 16
#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
-#define GFS2_POSIX_ACL_DEFAULT_LEN 17
+#define GFS2_ACL_MAX_ENTRIES 25
-#define GFS2_ACL_IS_ACCESS(name, len) \
- ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
- !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
-
-#define GFS2_ACL_IS_DEFAULT(name, len) \
- ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
- !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
-
-struct gfs2_ea_request;
-
-int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
- struct gfs2_ea_request *er,
- int *remove, mode_t *mode);
-int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
-int gfs2_check_acl(struct inode *inode, int mask);
-int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
-int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
+extern int gfs2_check_acl(struct inode *inode, int mask);
+extern int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
+extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
+extern struct xattr_handler gfs2_xattr_system_handler;
#endif /* __ACL_DOT_H__ */
file:4eb308aa32342f0f51eb00cafefda3ecbb6ada17 -> file:b3fd1d8869464c22658dee350245e4d18d06a15f
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file
if (error)
goto out_drop_write;
+ error = -EACCES;
+ if (!is_owner_or_cap(inode))
+ goto out;
+
+ error = 0;
flags = ip->i_diskflags;
new_flags = (flags & ~mask) | (reqflags & mask);
if ((new_flags ^ flags) == 0)
@@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *f
{
struct inode *inode = filp->f_path.dentry->d_inode;
u32 fsflags, gfsflags;
+
if (get_user(fsflags, ptr))
return -EFAULT;
+
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
if (!S_ISDIR(inode->i_mode)) {
if (gfsflags & GFS2_DIF_INHERIT_JDATA)
@@ -606,7 +613,7 @@ static int gfs2_lock(struct file *file,
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
- if (__mandatory_lock(&ip->i_inode))
+ if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
if (cmd == F_CANCELLK) {
file:8a0f8ef6ee2700cc5ef62f5090b11d602b9f8b20 -> file:6b803540951ef9c37c905bf1197d51c0f065425e
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1507,18 +1507,6 @@ static int gfs2_xattr_user_set(struct in
return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
}
-static int gfs2_xattr_system_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
-}
-
-static int gfs2_xattr_system_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
-}
-
static int gfs2_xattr_security_get(struct inode *inode, const char *name,
void *buffer, size_t size)
{
@@ -1543,12 +1531,6 @@ static struct xattr_handler gfs2_xattr_s
.set = gfs2_xattr_security_set,
};
-static struct xattr_handler gfs2_xattr_system_handler = {
- .prefix = XATTR_SYSTEM_PREFIX,
- .get = gfs2_xattr_system_get,
- .set = gfs2_xattr_system_set,
-};
-
struct xattr_handler *gfs2_xattr_handlers[] = {
&gfs2_xattr_user_handler,
&gfs2_xattr_security_handler,
file:ca0f5eb62b20e450b38ef84b52c64bc2a214bc17 -> file:886849370950f462627f0a9d2df692ec4d117d97
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -22,6 +22,7 @@
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
#include <trace/events/jbd2.h>
/*
@@ -515,6 +516,20 @@ int jbd2_cleanup_journal_tail(journal_t
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
spin_unlock(&journal->j_state_lock);
+
+ /*
+ * If there is an external journal, we need to make sure that
+ * any data blocks that were recently written out --- perhaps
+ * by jbd2_log_do_checkpoint() --- are flushed out before we
+ * drop the transactions from the external journal. It's
+ * unlikely this will be necessary, especially with a
+ * appropriately sized journal, but we need this to guarantee
+ * correctness. Fortunately jbd2_cleanup_journal_tail()
+ * doesn't get called all that often.
+ */
+ if ((journal->j_fs_dev != journal->j_dev) &&
+ (journal->j_flags & JBD2_BARRIER))
+ blkdev_issue_flush(journal->j_fs_dev, NULL);
if (!(journal->j_flags & JBD2_ABORT))
jbd2_journal_update_superblock(journal, 1);
return 0;
file:8896c1d4febe590e1e6941d76f62833a04813201 -> file:09ab6ac6a075ab8a5d491b26db81f283e32b99f4
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -259,6 +259,7 @@ static int journal_submit_data_buffers(j
ret = err;
spin_lock(&journal->j_list_lock);
J_ASSERT(jinode->i_transaction == commit_transaction);
+ commit_transaction->t_flushed_data_blocks = 1;
jinode->i_flags &= ~JI_COMMIT_RUNNING;
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
@@ -708,8 +709,17 @@ start_journal_io:
}
}
- /* Done it all: now write the commit record asynchronously. */
+ /*
+ * If the journal is not located on the file system device,
+ * then we must flush the file system device before we issue
+ * the commit record
+ */
+ if (commit_transaction->t_flushed_data_blocks &&
+ (journal->j_fs_dev != journal->j_dev) &&
+ (journal->j_flags & JBD2_BARRIER))
+ blkdev_issue_flush(journal->j_fs_dev, NULL);
+ /* Done it all: now write the commit record asynchronously. */
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
err = journal_submit_commit_record(journal, commit_transaction,
@@ -720,13 +730,6 @@ start_journal_io:
blkdev_issue_flush(journal->j_dev, NULL);
}
- /*
- * This is the right place to wait for data buffers both for ASYNC
- * and !ASYNC commit. If commit is ASYNC, we need to wait only after
- * the commit block went to disk (which happens above). If commit is
- * SYNC, we need to wait for data buffers before we start writing
- * commit block, which happens below in such setting.
- */
err = journal_finish_inode_data_buffers(journal, commit_transaction);
if (err) {
printk(KERN_WARNING
file:b7ca3a92a4dba0b23f555a75cac79fea1b6ae9e4 -> file:17af879e6e9ec157528f3bcab4ad7c01436abc68
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2115,7 +2115,8 @@ static void __init jbd2_create_debugfs_e
{
jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
if (jbd2_debugfs_dir)
- jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
+ jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME,
+ S_IRUGO | S_IWUSR,
jbd2_debugfs_dir,
&jbd2_journal_enable_debug);
}
file:7f24a0bb08ca0a25d31352e9611273de87d2ee1b -> file:1aba0039f1c995ab0909664cc325a917ee5d8c0b
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb,
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
uint old_agsize;
+ int agsizechanged = 0;
struct buffer_head *bh, *bh2;
/* If the volume hasn't grown, get out now */
@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb,
*/
if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
goto error_out;
+
+ agsizechanged |= (bmp->db_agsize != old_agsize);
+
/*
* the map now has extended to cover additional nblocks:
* dn_mapsize = oldMapsize + nblocks;
@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb,
* will correctly identify the new ag);
*/
/* if new AG size the same as old AG size, done! */
- if (bmp->db_agsize != old_agsize) {
+ if (agsizechanged) {
if ((rc = diExtendFS(ipimap, ipbmap)))
goto error_out;
file:99ea196f071f04d81c82fbc42bdcb000b9f0694d -> file:127ed5cf28667ee069b8597943a5f000c450280b
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -965,6 +965,8 @@ out_error:
static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
{
target->flags = source->flags;
+ target->rsize = source->rsize;
+ target->wsize = source->wsize;
target->acregmin = source->acregmin;
target->acregmax = source->acregmax;
target->acdirmin = source->acdirmin;
@@ -1283,7 +1285,8 @@ static int nfs4_init_server(struct nfs_s
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
+ NFS_CAP_POSIX_LOCK;
server->options = data->options;
/* Get a client record */
file:09f383795174d065f36a413a6f698b467a93c57c -> file:7f237d243be50cc2bb850a82bcc8df77e4017d6d
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -68,4 +68,10 @@ static inline int nfs_inode_return_deleg
}
#endif
+static inline int nfs_have_delegated_attributes(struct inode *inode)
+{
+ return nfs_have_delegation(inode, FMODE_READ) &&
+ !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
+}
+
#endif
file:7cb298525eefd1a5ebcac1870ce08679447a3f2e -> file:a87cbd8bd0b9527f8e2b971ff3fd60c80c10c221
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -837,6 +837,8 @@ out_zap_parent:
/* If we have submounts, don't unhash ! */
if (have_submounts(dentry))
goto out_valid;
+ if (dentry->d_flags & DCACHE_DISCONNECTED)
+ goto out_valid;
shrink_dcache_parent(dentry);
}
d_drop(dentry);
@@ -1025,12 +1027,12 @@ static struct dentry *nfs_atomic_lookup(
res = NULL;
goto out;
/* This turned out not to be a regular file */
+ case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
- /* case -EISDIR: */
/* case -EINVAL: */
default:
goto out;
@@ -1797,7 +1799,7 @@ static int nfs_access_get_cached(struct
cache = nfs_access_search_rbtree(inode, cred);
if (cache == NULL)
goto out;
- if (!nfs_have_delegation(inode, FMODE_READ) &&
+ if (!nfs_have_delegated_attributes(inode) &&
!time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
goto out_stale;
res->jiffies = cache->jiffies;
file:f4d54ba97cc62653ffceedf568f4d4c987840682 -> file:c1fd68bf337d63ffa85dd16435a26527704e5c68
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -36,6 +36,19 @@ struct nfs_dns_ent {
};
+static void nfs_dns_ent_update(struct cache_head *cnew,
+ struct cache_head *ckey)
+{
+ struct nfs_dns_ent *new;
+ struct nfs_dns_ent *key;
+
+ new = container_of(cnew, struct nfs_dns_ent, h);
+ key = container_of(ckey, struct nfs_dns_ent, h);
+
+ memcpy(&new->addr, &key->addr, key->addrlen);
+ new->addrlen = key->addrlen;
+}
+
static void nfs_dns_ent_init(struct cache_head *cnew,
struct cache_head *ckey)
{
@@ -49,8 +62,7 @@ static void nfs_dns_ent_init(struct cach
new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
if (new->hostname) {
new->namelen = key->namelen;
- memcpy(&new->addr, &key->addr, key->addrlen);
- new->addrlen = key->addrlen;
+ nfs_dns_ent_update(cnew, ckey);
} else {
new->namelen = 0;
new->addrlen = 0;
@@ -234,7 +246,7 @@ static struct cache_detail nfs_dns_resol
.cache_show = nfs_dns_show,
.match = nfs_dns_match,
.init = nfs_dns_ent_init,
- .update = nfs_dns_ent_init,
+ .update = nfs_dns_ent_update,
.alloc = nfs_dns_ent_alloc,
};
file:393d40fd7eb9d8fd5dd5d784fc94b741d94e3912 -> file:61b3bf503ee157d8243259d41e3d7e13765b27f8
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -486,7 +486,8 @@ static int nfs_release_page(struct page
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- if (gfp & __GFP_WAIT)
+ /* Only do I/O if gfp is a superset of GFP_KERNEL */
+ if ((gfp & GFP_KERNEL) == GFP_KERNEL)
nfs_wb_page(page->mapping->host, page);
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
file:faa091865ad05c956114ab7c7642994845717382 -> file:3c80474a265183ba30cced3dd405f3b641b9ec4b
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -759,7 +759,7 @@ int nfs_attribute_timeout(struct inode *
{
struct nfs_inode *nfsi = NFS_I(inode);
- if (nfs_have_delegation(inode, FMODE_READ))
+ if (nfs_have_delegated_attributes(inode))
return 0;
return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
}
file:6c200595099fa3ba3c52ce290ce05851104f6b01 -> file:3c7581bcdb0d3a10cd98cd25bae42daa3b7225a3
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1439,6 +1439,8 @@ static int _nfs4_proc_open(struct nfs4_o
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
+ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
+ server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
@@ -1573,7 +1575,7 @@ static int _nfs4_do_open(struct inode *d
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
file:a4cd1b79b71dd10c64fb12d9174be7ddd49ad2a0 -> file:5ec74cd5402cea6ad9a7928d651d9efd94db21ac
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -840,8 +840,8 @@ static void encode_attrs(struct xdr_stre
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
*p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
- *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
+ *p++ = cpu_to_be32(iap->ia_atime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
}
else if (iap->ia_valid & ATTR_ATIME) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
file:a12c45b65dd42bc1712f23c7050923a6d83eed28 -> file:29d9d36cd5f431e603fcae5daccebbd5d62d7fa3
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page
*/
int nfs_set_page_tag_locked(struct nfs_page *req)
{
- struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
-
if (!nfs_lock_request_dontget(req))
return 0;
if (req->wb_page != NULL)
- radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
+ radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
return 1;
}
@@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_p
*/
void nfs_clear_page_tag_locked(struct nfs_page *req)
{
- struct inode *inode = req->wb_context->path.dentry->d_inode;
- struct nfs_inode *nfsi = NFS_I(inode);
-
if (req->wb_page != NULL) {
+ struct inode *inode = req->wb_context->path.dentry->d_inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
spin_lock(&inode->i_lock);
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
nfs_unlock_request(req);
@@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nf
* nfs_clear_request - Free up all resources allocated to the request
* @req:
*
- * Release page resources associated with a write request after it
- * has completed.
+ * Release page and open context resources associated with a read/write
+ * request after it has completed.
*/
void nfs_clear_request(struct nfs_page *req)
{
struct page *page = req->wb_page;
+ struct nfs_open_context *ctx = req->wb_context;
+
if (page != NULL) {
page_cache_release(page);
req->wb_page = NULL;
}
+ if (ctx != NULL) {
+ put_nfs_open_context(ctx);
+ req->wb_context = NULL;
+ }
}
@@ -165,9 +169,8 @@ static void nfs_free_request(struct kref
{
struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
- /* Release struct file or cached credential */
+ /* Release struct file and open context */
nfs_clear_request(req);
- put_nfs_open_context(req->wb_context);
nfs_page_free(req);
}
file:4bf23f6f93cf7b72833e4c2a6872e46ba6c15da6 -> file:a53b9e5068cdccc0a5850cbd7284e7d8d1fbc52b
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -534,6 +534,22 @@ static void nfs_show_mountd_options(stru
}
}
+#ifdef CONFIG_NFS_V4
+static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct nfs_client *clp = nfss->nfs_client;
+
+ seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
+ seq_printf(m, ",minorversion=%u", clp->cl_minorversion);
+}
+#else
+static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+}
+#endif
+
/*
* Describe the mount options in force on this server representation
*/
@@ -595,11 +611,9 @@ static void nfs_show_mount_options(struc
if (version != 4)
nfs_show_mountd_options(m, nfss, showdefaults);
+ else
+ nfs_show_nfsv4_options(m, nfss, showdefaults);
-#ifdef CONFIG_NFS_V4
- if (clp->rpc_ops->version == 4)
- seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
-#endif
if (nfss->options & NFS_OPTION_FSCACHE)
seq_printf(m, ",fsc");
}
file:2153f9bdbebdcb3cb094a76c73b9948815b30e2d -> file:6ad6282e3076f4d16b6ff134253ac8a59b456063
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2002,7 +2002,9 @@ nfs4_file_downgrade(struct file *filp, u
{
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
drop_file_write_access(filp);
+ spin_lock(&filp->f_lock);
filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
+ spin_unlock(&filp->f_lock);
}
}
file:0fbd50cee1f60e8c437102fb4b2cb2cee53afbe1 -> file:12f62ff353d68124403cdf2613fdf89a6d477b34
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -168,10 +168,10 @@ static __be32 *read_buf(struct nfsd4_com
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = p + (argp->pagelen>>2);
+ argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
- argp->end = p + (PAGE_SIZE>>2);
+ argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
@@ -1433,10 +1433,10 @@ nfsd4_decode_compound(struct nfsd4_compo
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = p + (argp->pagelen>>2);
+ argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
- argp->end = p + (PAGE_SIZE>>2);
+ argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
}
@@ -2129,9 +2129,15 @@ out_acl:
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
- exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
- err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
- exp->ex_path.mnt->mnt_mountpoint, &stat);
+ dentry == exp->ex_path.mnt->mnt_root) {
+ struct path path = exp->ex_path;
+ path_get(&path);
+ while (follow_up(&path)) {
+ if (path.dentry != path.mnt->mnt_root)
+ break;
+ }
+ err = vfs_getattr(path.mnt, path.dentry, &stat);
+ path_put(&path);
if (err)
goto out_nfserr;
}
file:67ea83eedd43eb7b69e896e1db54f8fd995a0d9e -> file:4d4e2d00d15ee88573fe1a6c289bf28836eb909e
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -136,7 +136,7 @@ u32 nfsd_supported_minorversion;
int nfsd_vers(int vers, enum vers_op change)
{
if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
- return -1;
+ return 0;
switch(change) {
case NFSD_SET:
nfsd_versions[vers] = nfsd_version[vers];
file:644e66727dd0e6f5464eb092b9d6db6caeac837f -> file:63e7b108ff8353c7e9dc7817aea5f89fe78d9560
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -781,6 +781,7 @@ nilfs_fill_super(struct super_block *sb,
sb->s_export_op = &nilfs_export_ops;
sb->s_root = NULL;
sb->s_time_gran = 1;
+ sb->s_bdi = nilfs->ns_bdi;
if (!nilfs_loaded(nilfs)) {
err = load_nilfs(nilfs, sbi);
file:fbeaec762103a91eacd396f47a999244303954ee -> file:d8fe53a6f8df53a1e495cf2aeb390ce46238ac79
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -30,6 +30,8 @@
#include "alloc.h"
#include "dlmglue.h"
#include "file.h"
+#include "inode.h"
+#include "journal.h"
#include "ocfs2_fs.h"
#include "xattr.h"
@@ -170,6 +172,60 @@ static struct posix_acl *ocfs2_get_acl(s
}
/*
+ * Helper function to set i_mode in memory and disk. Some call paths
+ * will not have di_bh or a journal handle to pass, in which case it
+ * will create it's own.
+ */
+static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
+ handle_t *handle, umode_t new_mode)
+{
+ int ret, commit_handle = 0;
+ struct ocfs2_dinode *di;
+
+ if (di_bh == NULL) {
+ ret = ocfs2_read_inode_block(inode, &di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ } else
+ get_bh(di_bh);
+
+ if (handle == NULL) {
+ handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
+ OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_brelse;
+ }
+
+ commit_handle = 1;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ inode->i_mode = new_mode;
+ di->i_mode = cpu_to_le16(inode->i_mode);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ if (commit_handle)
+ ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out_brelse:
+ brelse(di_bh);
+out:
+ return ret;
+}
+
+/*
* Set the access or default ACL of an inode.
*/
static int ocfs2_set_acl(handle_t *handle,
@@ -197,9 +253,14 @@ static int ocfs2_set_acl(handle_t *handl
if (ret < 0)
return ret;
else {
- inode->i_mode = mode;
if (ret == 0)
acl = NULL;
+
+ ret = ocfs2_acl_set_mode(inode, di_bh,
+ handle, mode);
+ if (ret)
+ return ret;
+
}
}
break;
@@ -287,6 +348,7 @@ int ocfs2_init_acl(handle_t *handle,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0;
+ mode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
@@ -295,12 +357,17 @@ int ocfs2_init_acl(handle_t *handle,
if (IS_ERR(acl))
return PTR_ERR(acl);
}
- if (!acl)
- inode->i_mode &= ~current_umask();
+ if (!acl) {
+ mode = inode->i_mode & ~current_umask();
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret) {
+ mlog_errno(ret);
+ goto cleanup;
+ }
+ }
}
if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
struct posix_acl *clone;
- mode_t mode;
if (S_ISDIR(inode->i_mode)) {
ret = ocfs2_set_acl(handle, inode, di_bh,
@@ -317,7 +384,7 @@ int ocfs2_init_acl(handle_t *handle,
mode = inode->i_mode;
ret = posix_acl_create_masq(clone, &mode);
if (ret >= 0) {
- inode->i_mode = mode;
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
file:deb2b132ae5ed42b68fd11f58413f2ffa4779b83 -> file:5fc918ca25722038e19ebaa1c9880cbd72ef5cb1
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -591,8 +591,9 @@ static int ocfs2_direct_IO_get_blocks(st
goto bail;
}
- /* We should already CoW the refcounted extent. */
- BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
+ /* We should already CoW the refcounted extent in case of create. */
+ BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
+
/*
* get_more_blocks() expects us to describe a hole by clearing
* the mapped bit on bh_result().
file:d43d34a1dd31aa3225673cdbcaf7ed07620a0388 -> file:5a253bab122cfa541173ea9cb1c2821df42ca192
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -407,6 +407,7 @@ int ocfs2_write_super_or_backup(struct o
struct buffer_head *bh)
{
int ret = 0;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
mlog_entry_void();
@@ -426,6 +427,7 @@ int ocfs2_write_super_or_backup(struct o
get_bh(bh); /* for end_buffer_write_sync() */
bh->b_end_io = end_buffer_write_sync;
+ ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
submit_bh(WRITE, bh);
wait_on_buffer(bh);
file:02bf17808bdc50d1c8f72aa8f8a13664bed5a146 -> file:18bc101d603f8176ced2683936f50d8abea0183a
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct fi
if ((count + *ppos) > i_size_read(inode))
readlen = i_size_read(inode) - *ppos;
else
- readlen = count - *ppos;
+ readlen = count;
lvb_buf = kmalloc(readlen, GFP_NOFS);
if (!lvb_buf)
file:0297fb8982b861afdae1d974e9fdae96bf0a1f13 -> file:4c827d86547440bcbff79b54780cebc1cb1484a2
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -559,6 +559,7 @@ static int ocfs2_truncate_for_delete(str
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
+ handle = NULL;
mlog_errno(status);
goto out;
}
file:3a0df7a1b8109666b92dea616c1b45ce57a2add9 -> file:03a1ab83a65b08544c0b9c7493431b0a122ab35a
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3995,6 +3995,9 @@ static int ocfs2_complete_reflink(struct
di->i_attr = s_di->i_attr;
if (preserve) {
+ t_inode->i_uid = s_inode->i_uid;
+ t_inode->i_gid = s_inode->i_gid;
+ t_inode->i_mode = s_inode->i_mode;
di->i_uid = s_di->i_uid;
di->i_gid = s_di->i_gid;
di->i_mode = s_di->i_mode;
file:c30b644d9572408816154db8c91c275d3136a0ac -> file:79b5dacf9312b0b5d616c915b09c00bdb92bbeb6
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct o
#define do_error(fmt, ...) \
do{ \
- if (clean_error) \
+ if (resize) \
mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
else \
ocfs2_error(sb, fmt, ##__VA_ARGS__); \
@@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct o
static int ocfs2_validate_gd_self(struct super_block *sb,
struct buffer_head *bh,
- int clean_error)
+ int resize)
{
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct
static int ocfs2_validate_gd_parent(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh,
- int clean_error)
+ int resize)
{
unsigned int max_bits;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -233,8 +233,11 @@ static int ocfs2_validate_gd_parent(stru
return -EINVAL;
}
- if (le16_to_cpu(gd->bg_chain) >=
- le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
+ /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
+ if ((le16_to_cpu(gd->bg_chain) >
+ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
+ ((le16_to_cpu(gd->bg_chain) ==
+ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
do_error("Group descriptor #%llu has bad chain %u",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_chain));
file:6d2668fdc3848eb5b2be29d027c5634c727148f6 -> file:d42c30ceaee53e9e720ff733b4286473b63cb6c3
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -45,8 +45,6 @@ static inline bool is_privroot_deh(struc
struct reiserfs_de_head *deh)
{
struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
- if (reiserfs_expose_privroot(dir->d_sb))
- return 0;
return (dir == dir->d_parent && privroot->d_inode &&
deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
}
file:6925b835a43b6f2f94e8a4217180b2e8d1735ab7 -> file:cc1caa26be52d149906f06403741dee43a598cfb
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -536,7 +536,7 @@ reiserfs_xattr_set_handle(struct reiserf
if (!err && new_size < i_size_read(dentry->d_inode)) {
struct iattr newattrs = {
.ia_ctime = current_fs_time(inode->i_sb),
- .ia_size = buffer_size,
+ .ia_size = new_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
@@ -952,21 +952,13 @@ int reiserfs_permission(struct inode *in
return generic_permission(inode, mask, NULL);
}
-/* This will catch lookups from the fs root to .reiserfs_priv */
-static int
-xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
-{
- struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
- if (container_of(q1, struct dentry, d_name) == priv_root)
- return -ENOENT;
- if (q1->len == name->len &&
- !memcmp(q1->name, name->name, name->len))
- return 0;
- return 1;
+static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ return -EPERM;
}
static const struct dentry_operations xattr_lookup_poison_ops = {
- .d_compare = xattr_lookup_poison,
+ .d_revalidate = xattr_hide_revalidate,
};
int reiserfs_lookup_privroot(struct super_block *s)
@@ -980,8 +972,7 @@ int reiserfs_lookup_privroot(struct supe
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
- if (!reiserfs_expose_privroot(s))
- s->s_root->d_op = &xattr_lookup_poison_ops;
+ dentry->d_op = &xattr_lookup_poison_ops;
if (dentry->d_inode)
dentry->d_inode->i_flags |= S_PRIVATE;
} else
file:b23a5450644608fddbc7d5146e6c20f638209450 -> file:4c7b14503716d3e01bfee8d48ab930f6fdbd008a
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -250,8 +250,9 @@ xfs_set_mode(struct inode *inode, mode_t
if (mode != inode->i_mode) {
struct iattr iattr;
- iattr.ia_valid = ATTR_MODE;
+ iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
iattr.ia_mode = mode;
+ iattr.ia_ctime = current_fs_time(inode->i_sb);
error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
file:c2e30eea74dc2cd6344dd6710806465b029db422 -> file:7263002fac643f6da9df6ea561ac3cb36054908a
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -204,14 +204,17 @@ xfs_ioend_new_eof(
}
/*
- * Update on-disk file size now that data has been written to disk.
- * The current in-memory file size is i_size. If a write is beyond
- * eof i_new_size will be the intended file size until i_size is
- * updated. If this write does not extend all the way to the valid
- * file size then restrict this update to the end of the write.
+ * Update on-disk file size now that data has been written to disk. The
+ * current in-memory file size is i_size. If a write is beyond eof i_new_size
+ * will be the intended file size until i_size is updated. If this write does
+ * not extend all the way to the valid file size then restrict this update to
+ * the end of the write.
+ *
+ * This function does not block as blocking on the inode lock in IO completion
+ * can lead to IO completion order dependency deadlocks.. If it can't get the
+ * inode ilock it will return EAGAIN. Callers must handle this.
*/
-
-STATIC void
+STATIC int
xfs_setfilesize(
xfs_ioend_t *ioend)
{
@@ -222,9 +225,11 @@ xfs_setfilesize(
ASSERT(ioend->io_type != IOMAP_READ);
if (unlikely(ioend->io_error))
- return;
+ return 0;
+
+ if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
+ return EAGAIN;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
isize = xfs_ioend_new_eof(ioend);
if (isize) {
ip->i_d.di_size = isize;
@@ -232,6 +237,28 @@ xfs_setfilesize(
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return 0;
+}
+
+/*
+ * Schedule IO completion handling on a xfsdatad if this was
+ * the final hold on this ioend. If we are asked to wait,
+ * flush the workqueue.
+ */
+STATIC void
+xfs_finish_ioend(
+ xfs_ioend_t *ioend,
+ int wait)
+{
+ if (atomic_dec_and_test(&ioend->io_remaining)) {
+ struct workqueue_struct *wq;
+
+ wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
+ xfsconvertd_workqueue : xfsdatad_workqueue;
+ queue_work(wq, &ioend->io_work);
+ if (wait)
+ flush_workqueue(wq);
+ }
}
/*
@@ -243,9 +270,23 @@ xfs_end_bio_delalloc(
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
+ int error;
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
+ /*
+ * If we didn't complete processing of the ioend, requeue it to the
+ * tail of the workqueue for another attempt later. Otherwise destroy
+ * it.
+ */
+ error = xfs_setfilesize(ioend);
+ if (error == EAGAIN) {
+ atomic_inc(&ioend->io_remaining);
+ xfs_finish_ioend(ioend, 0);
+ /* ensure we don't spin on blocked ioends */
+ delay(1);
+ } else {
+ ASSERT(!error);
+ xfs_destroy_ioend(ioend);
+ }
}
/*
@@ -257,9 +298,23 @@ xfs_end_bio_written(
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
+ int error;
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
+ /*
+ * If we didn't complete processing of the ioend, requeue it to the
+ * tail of the workqueue for another attempt later. Otherwise destroy
+ * it.
+ */
+ error = xfs_setfilesize(ioend);
+ if (error == EAGAIN) {
+ atomic_inc(&ioend->io_remaining);
+ xfs_finish_ioend(ioend, 0);
+ /* ensure we don't spin on blocked ioends */
+ delay(1);
+ } else {
+ ASSERT(!error);
+ xfs_destroy_ioend(ioend);
+ }
}
/*
@@ -279,13 +334,25 @@ xfs_end_bio_unwritten(
size_t size = ioend->io_size;
if (likely(!ioend->io_error)) {
+ int error;
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- int error;
error = xfs_iomap_write_unwritten(ip, offset, size);
if (error)
ioend->io_error = error;
}
- xfs_setfilesize(ioend);
+ /*
+ * If we didn't complete processing of the ioend, requeue it to the
+ * tail of the workqueue for another attempt later. Otherwise destroy
+ * it.
+ */
+ error = xfs_setfilesize(ioend);
+ if (error == EAGAIN) {
+ atomic_inc(&ioend->io_remaining);
+ xfs_finish_ioend(ioend, 0);
+ /* ensure we don't spin on blocked ioends */
+ delay(1);
+ return;
+ }
}
xfs_destroy_ioend(ioend);
}
@@ -304,27 +371,6 @@ xfs_end_bio_read(
}
/*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
- */
-STATIC void
-xfs_finish_ioend(
- xfs_ioend_t *ioend,
- int wait)
-{
- if (atomic_dec_and_test(&ioend->io_remaining)) {
- struct workqueue_struct *wq = xfsdatad_workqueue;
- if (ioend->io_work.func == xfs_end_bio_unwritten)
- wq = xfsconvertd_workqueue;
-
- queue_work(wq, &ioend->io_work);
- if (wait)
- flush_workqueue(wq);
- }
-}
-
-/*
* Allocate and initialise an IO completion structure.
* We need to track unwritten extent write completion here initially.
* We'll need to extend this for updating the ondisk inode size later
file:cd42ef78f6b54e705ad604e3ccfc4eb1a3df3743 -> file:1f3b4b8f7dd45195b71e41f6ea4df2a6a0b110c9
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -573,8 +573,8 @@ xfs_vn_fallocate(
bf.l_len = len;
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
- 0, XFS_ATTR_NOLOCK);
+ error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
+ 0, XFS_ATTR_NOLOCK);
if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode))
new_size = offset + len;
@@ -585,7 +585,7 @@ xfs_vn_fallocate(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
- error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
+ error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
file:18a4b8e11df2d4241bcfafd59297c30e961241ad -> file:d95bfa27c62a745cf9c2e7921c070dcfeeb3a6d0
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -930,13 +930,37 @@ xfs_fs_alloc_inode(
*/
STATIC void
xfs_fs_destroy_inode(
- struct inode *inode)
+ struct inode *inode)
{
- xfs_inode_t *ip = XFS_I(inode);
+ struct xfs_inode *ip = XFS_I(inode);
+
+ xfs_itrace_entry(ip);
XFS_STATS_INC(vn_reclaim);
- if (xfs_reclaim(ip))
- panic("%s: cannot reclaim 0x%p\n", __func__, inode);
+
+ /* bad inode, get out here ASAP */
+ if (is_bad_inode(inode))
+ goto out_reclaim;
+
+ xfs_ioend_wait(ip);
+
+ ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+
+ /*
+ * We should never get here with one of the reclaim flags already set.
+ */
+ ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+ ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
+
+ /*
+ * We always use background reclaim here because even if the
+ * inode is clean, it still may be under IO and hence we have
+ * to take the flush lock. The background reclaim path handles
+ * this more efficiently than we can here, so simply let background
+ * reclaim tear down all inodes.
+ */
+out_reclaim:
+ xfs_inode_set_reclaim_tag(ip);
}
/*
@@ -1140,6 +1164,7 @@ xfs_fs_put_super(
xfs_unmountfs(mp);
xfs_freesb(mp);
+ xfs_inode_shrinker_unregister(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
xfs_dmops_put(mp);
@@ -1299,6 +1324,8 @@ xfs_fs_remount(
/* ro -> rw */
if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+ __uint64_t resblks;
+
mp->m_flags &= ~XFS_MOUNT_RDONLY;
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_mountfs_check_barriers(mp);
@@ -1316,11 +1343,37 @@ xfs_fs_remount(
}
mp->m_update_flags = 0;
}
+
+ /*
+ * Fill out the reserve pool if it is empty. Use the stashed
+ * value if it is non-zero, otherwise go with the default.
+ */
+ if (mp->m_resblks_save) {
+ resblks = mp->m_resblks_save;
+ mp->m_resblks_save = 0;
+ } else {
+ resblks = mp->m_sb.sb_dblocks;
+ do_div(resblks, 20);
+ resblks = min_t(__uint64_t, resblks, 1024);
+ }
+ xfs_reserve_blocks(mp, &resblks, NULL);
}
/* rw -> ro */
if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+ /*
+ * After we have synced the data but before we sync the
+ * metadata, we need to free up the reserve block pool so that
+ * the used block count in the superblock on disk is correct at
+ * the end of the remount. Stash the current reserve pool size
+ * so that if we get remounted rw, we can return it to the same
+ * size.
+ */
+ __uint64_t resblks = 0;
+
xfs_quiesce_data(mp);
+ mp->m_resblks_save = mp->m_resblks;
+ xfs_reserve_blocks(mp, &resblks, NULL);
xfs_quiesce_attr(mp);
mp->m_flags |= XFS_MOUNT_RDONLY;
}
@@ -1503,6 +1556,8 @@ xfs_fs_fill_super(
if (error)
goto fail_vnrele;
+ xfs_inode_shrinker_register(mp);
+
kfree(mtpt);
xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
@@ -1842,6 +1897,7 @@ init_xfs_fs(void)
goto out_cleanup_procfs;
vfs_initquota();
+ xfs_inode_shrinker_init();
error = register_filesystem(&xfs_fs_type);
if (error)
@@ -1871,6 +1927,7 @@ exit_xfs_fs(void)
{
vfs_exitquota();
unregister_filesystem(&xfs_fs_type);
+ xfs_inode_shrinker_destroy();
xfs_sysctl_unregister();
xfs_cleanup_procfs();
xfs_buf_terminate();
file:961df0a22c7837e9a969a8ab73331c594d5a6116 -> file:c82683ad1484aea5d1fa1636619c25d51667707c
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -64,7 +64,6 @@ xfs_inode_ag_lookup(
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
- read_lock(&pag->pag_ici_lock);
if (tag == XFS_ICI_NO_TAG) {
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)&ip, *first_index, 1);
@@ -73,7 +72,7 @@ xfs_inode_ag_lookup(
(void **)&ip, *first_index, 1, tag);
}
if (!nr_found)
- goto unlock;
+ return NULL;
/*
* Update the index for the next lookup. Catch overflows
@@ -83,13 +82,8 @@ xfs_inode_ag_lookup(
*/
*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- goto unlock;
-
+ return NULL;
return ip;
-
-unlock:
- read_unlock(&pag->pag_ici_lock);
- return NULL;
}
STATIC int
@@ -99,7 +93,9 @@ xfs_inode_ag_walk(
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
- int tag)
+ int tag,
+ int exclusive,
+ int *nr_to_scan)
{
struct xfs_perag *pag = &mp->m_perag[ag];
uint32_t first_index;
@@ -113,10 +109,20 @@ restart:
int error = 0;
xfs_inode_t *ip;
+ if (exclusive)
+ write_lock(&pag->pag_ici_lock);
+ else
+ read_lock(&pag->pag_ici_lock);
ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
- if (!ip)
+ if (!ip) {
+ if (exclusive)
+ write_unlock(&pag->pag_ici_lock);
+ else
+ read_unlock(&pag->pag_ici_lock);
break;
+ }
+ /* execute releases pag->pag_ici_lock */
error = execute(ip, pag, flags);
if (error == EAGAIN) {
skipped++;
@@ -124,13 +130,12 @@ restart:
}
if (error)
last_error = error;
- /*
- * bail out if the filesystem is corrupted.
- */
+
+ /* bail out if the filesystem is corrupted. */
if (error == EFSCORRUPTED)
break;
- } while (1);
+ } while ((*nr_to_scan)--);
if (skipped) {
delay(1);
@@ -147,22 +152,31 @@ xfs_inode_ag_iterator(
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
- int tag)
+ int tag,
+ int exclusive,
+ int *nr_to_scan)
{
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
+ int nr;
+ nr = nr_to_scan ? *nr_to_scan : INT_MAX;
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
if (!mp->m_perag[ag].pag_ici_init)
continue;
- error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
+ error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
+ exclusive, &nr);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
break;
}
+ if (nr <= 0)
+ break;
}
+ if (nr_to_scan)
+ *nr_to_scan = nr;
return XFS_ERROR(last_error);
}
@@ -173,30 +187,31 @@ xfs_sync_inode_valid(
struct xfs_perag *pag)
{
struct inode *inode = VFS_I(ip);
+ int error = EFSCORRUPTED;
/* nothing to sync during shutdown */
- if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- read_unlock(&pag->pag_ici_lock);
- return EFSCORRUPTED;
- }
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ goto out_unlock;
- /*
- * If we can't get a reference on the inode, it must be in reclaim.
- * Leave it for the reclaim code to flush. Also avoid inodes that
- * haven't been fully initialised.
- */
- if (!igrab(inode)) {
- read_unlock(&pag->pag_ici_lock);
- return ENOENT;
- }
- read_unlock(&pag->pag_ici_lock);
+ /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+ error = ENOENT;
+ if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ goto out_unlock;
+
+ /* If we can't grab the inode, it must on it's way to reclaim. */
+ if (!igrab(inode))
+ goto out_unlock;
- if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
+ if (is_bad_inode(inode)) {
IRELE(ip);
- return ENOENT;
+ goto out_unlock;
}
- return 0;
+ /* inode is valid */
+ error = 0;
+out_unlock:
+ read_unlock(&pag->pag_ici_lock);
+ return error;
}
STATIC int
@@ -281,7 +296,7 @@ xfs_sync_data(
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
- XFS_ICI_NO_TAG);
+ XFS_ICI_NO_TAG, 0, NULL);
if (error)
return XFS_ERROR(error);
@@ -303,7 +318,7 @@ xfs_sync_attr(
ASSERT((flags & ~SYNC_WAIT) == 0);
return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
- XFS_ICI_NO_TAG);
+ XFS_ICI_NO_TAG, 0, NULL);
}
STATIC int
@@ -663,67 +678,6 @@ xfs_syncd_stop(
kthread_stop(mp->m_sync_task);
}
-int
-xfs_reclaim_inode(
- xfs_inode_t *ip,
- int locked,
- int sync_mode)
-{
- xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
-
- /* The hash lock here protects a thread in xfs_iget_core from
- * racing with us on linking the inode back with a vnode.
- * Once we have the XFS_IRECLAIM flag set it will not touch
- * us.
- */
- write_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
- if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
- !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- if (locked) {
- xfs_ifunlock(ip);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
- return -EAGAIN;
- }
- __xfs_iflags_set(ip, XFS_IRECLAIM);
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- xfs_put_perag(ip->i_mount, pag);
-
- /*
- * If the inode is still dirty, then flush it out. If the inode
- * is not in the AIL, then it will be OK to flush it delwri as
- * long as xfs_iflush() does not keep any references to the inode.
- * We leave that decision up to xfs_iflush() since it has the
- * knowledge of whether it's OK to simply do a delwri flush of
- * the inode or whether we need to wait until the inode is
- * pulled from the AIL.
- * We get the flush lock regardless, though, just to make sure
- * we don't free it while it is being flushed.
- */
- if (!locked) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
- }
-
- /*
- * In the case of a forced shutdown we rely on xfs_iflush() to
- * wait for the inode to be unpinned before returning an error.
- */
- if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
- /* synchronize with xfs_iflush_done */
- xfs_iflock(ip);
- xfs_ifunlock(ip);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_ireclaim(ip);
- return 0;
-}
-
void
__xfs_inode_set_reclaim_tag(
struct xfs_perag *pag,
@@ -732,6 +686,7 @@ __xfs_inode_set_reclaim_tag(
radix_tree_tag_set(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
+ pag->pag_ici_reclaimable++;
}
/*
@@ -746,12 +701,12 @@ xfs_inode_set_reclaim_tag(
xfs_mount_t *mp = ip->i_mount;
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
- read_lock(&pag->pag_ici_lock);
+ write_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
__xfs_inode_set_reclaim_tag(pag, ip);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
- read_unlock(&pag->pag_ici_lock);
+ write_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
}
@@ -763,22 +718,59 @@ __xfs_inode_clear_reclaim_tag(
{
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
+ pag->pag_ici_reclaimable--;
}
STATIC int
-xfs_reclaim_inode_now(
+xfs_reclaim_inode(
struct xfs_inode *ip,
struct xfs_perag *pag,
- int flags)
+ int sync_mode)
{
- /* ignore if already under reclaim */
- if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
- read_unlock(&pag->pag_ici_lock);
+ /*
+ * The radix tree lock here protects a thread in xfs_iget from racing
+ * with us starting reclaim on the inode. Once we have the
+ * XFS_IRECLAIM flag set it will not touch us.
+ */
+ spin_lock(&ip->i_flags_lock);
+ ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+ if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
+ /* ignore as it is already under reclaim */
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
return 0;
}
- read_unlock(&pag->pag_ici_lock);
+ __xfs_iflags_set(ip, XFS_IRECLAIM);
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
+
+ /*
+ * If the inode is still dirty, then flush it out. If the inode
+ * is not in the AIL, then it will be OK to flush it delwri as
+ * long as xfs_iflush() does not keep any references to the inode.
+ * We leave that decision up to xfs_iflush() since it has the
+ * knowledge of whether it's OK to simply do a delwri flush of
+ * the inode or whether we need to wait until the inode is
+ * pulled from the AIL.
+ * We get the flush lock regardless, though, just to make sure
+ * we don't free it while it is being flushed.
+ */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_iflock(ip);
- return xfs_reclaim_inode(ip, 0, flags);
+ /*
+ * In the case of a forced shutdown we rely on xfs_iflush() to
+ * wait for the inode to be unpinned before returning an error.
+ */
+ if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
+ /* synchronize with xfs_iflush_done */
+ xfs_iflock(ip);
+ xfs_ifunlock(ip);
+ }
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_ireclaim(ip);
+ return 0;
}
int
@@ -786,6 +778,89 @@ xfs_reclaim_inodes(
xfs_mount_t *mp,
int mode)
{
- return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
- XFS_ICI_RECLAIM_TAG);
+ return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
+ XFS_ICI_RECLAIM_TAG, 1, NULL);
+}
+
+/*
+ * Shrinker infrastructure.
+ *
+ * This is all far more complex than it needs to be. It adds a global list of
+ * mounts because the shrinkers can only call a global context. We need to make
+ * the shrinkers pass a context to avoid the need for global state.
+ */
+static LIST_HEAD(xfs_mount_list);
+static struct rw_semaphore xfs_mount_list_lock;
+
+static int
+xfs_reclaim_inode_shrink(
+ int nr_to_scan,
+ gfp_t gfp_mask)
+{
+ struct xfs_mount *mp;
+ xfs_agnumber_t ag;
+ int reclaimable = 0;
+
+ if (nr_to_scan) {
+ if (!(gfp_mask & __GFP_FS))
+ return -1;
+
+ down_read(&xfs_mount_list_lock);
+ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
+ xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
+ XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
+ if (nr_to_scan <= 0)
+ break;
+ }
+ up_read(&xfs_mount_list_lock);
+ }
+
+ down_read(&xfs_mount_list_lock);
+ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
+ for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
+
+ if (!mp->m_perag[ag].pag_ici_init)
+ continue;
+ reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
+ }
+ }
+ up_read(&xfs_mount_list_lock);
+ return reclaimable;
+}
+
+static struct shrinker xfs_inode_shrinker = {
+ .shrink = xfs_reclaim_inode_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+void __init
+xfs_inode_shrinker_init(void)
+{
+ init_rwsem(&xfs_mount_list_lock);
+ register_shrinker(&xfs_inode_shrinker);
+}
+
+void
+xfs_inode_shrinker_destroy(void)
+{
+ ASSERT(list_empty(&xfs_mount_list));
+ unregister_shrinker(&xfs_inode_shrinker);
+}
+
+void
+xfs_inode_shrinker_register(
+ struct xfs_mount *mp)
+{
+ down_write(&xfs_mount_list_lock);
+ list_add_tail(&mp->m_mplist, &xfs_mount_list);
+ up_write(&xfs_mount_list_lock);
+}
+
+void
+xfs_inode_shrinker_unregister(
+ struct xfs_mount *mp)
+{
+ down_write(&xfs_mount_list_lock);
+ list_del(&mp->m_mplist);
+ up_write(&xfs_mount_list_lock);
}
file:27920eb7a820cbe77b2e7799bf441a5da8dd0930 -> file:0b28c13bdf9455a6a406b57d4785554e90bdb7c3
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -44,7 +44,6 @@ void xfs_quiesce_attr(struct xfs_mount *
void xfs_flush_inodes(struct xfs_inode *ip);
-int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
@@ -55,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struc
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
- int flags, int tag);
+ int flags, int tag, int write_lock, int *nr_to_scan);
+
+void xfs_inode_shrinker_init(void);
+void xfs_inode_shrinker_destroy(void);
+void xfs_inode_shrinker_register(struct xfs_mount *mp);
+void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
#endif
file:a5346630dfae05d639a0cee2a24cadbaac057cad -> file:97b410c12794d5ddf52579625d4a256ed384d872
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -59,7 +59,7 @@ xfs_fill_statvfs_from_dquot(
be64_to_cpu(dp->d_blk_hardlimit);
if (limit && statp->f_blocks > limit) {
statp->f_blocks = limit;
- statp->f_bfree =
+ statp->f_bfree = statp->f_bavail =
(statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
(statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
}
file:5d1a3b98a6e68875a47dc283483a9be376cdce42 -> file:60fe35821cff740eff6f615a07c21fda93d2941e
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -893,7 +893,8 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG);
+ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
+ XFS_ICI_NO_TAG, 0, NULL);
}
/*------------------------------------------------------------------------*/
file:a5d54bf4931b583dfd3729d670cd0694e7b7c507 -> file:381fba77b28d5bdd230fc12453cff08b82bbe0f0
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -215,6 +215,7 @@ typedef struct xfs_perag
int pag_ici_init; /* incore inode cache initialised */
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
+ int pag_ici_reclaimable; /* reclaimable inodes */
#endif
} xfs_perag_t;
file:2cf944eb796daf4ca455865d3dd22c83ee6d13ee -> file:4cd1c23b77f00a48bb407cbd2061966b7a66c7e4
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2703,45 +2703,35 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_mount_t *mp;
xfs_perag_busy_t *bsy;
xfs_agblock_t uend, bend;
- xfs_lsn_t lsn;
+ xfs_lsn_t lsn = 0;
int cnt;
mp = tp->t_mountp;
spin_lock(&mp->m_perag[agno].pagb_lock);
- cnt = mp->m_perag[agno].pagb_count;
-
uend = bno + len - 1;
- /* search pagb_list for this slot, skipping open slots */
- for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) {
-
- /*
- * (start1,length1) within (start2, length2)
- */
- if (bsy->busy_tp != NULL) {
- bend = bsy->busy_start + bsy->busy_length - 1;
- if ((bno > bend) || (uend < bsy->busy_start)) {
- cnt--;
- } else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy",
- "found1", agno, bno, len, tp);
- break;
- }
- }
- }
-
/*
- * If a block was found, force the log through the LSN of the
- * transaction that freed the block
+ * search pagb_list for this slot, skipping open slots. We have to
+ * search the entire array as there may be multiple overlaps and
+ * we have to get the most recent LSN for the log force to push out
+ * all the transactions that span the range.
*/
- if (cnt) {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
- lsn = bsy->busy_tp->t_commit_lsn;
- spin_unlock(&mp->m_perag[agno].pagb_lock);
- xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
- } else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
+ bsy = &mp->m_perag[agno].pagb_list[cnt];
+ if (!bsy->busy_tp)
+ continue;
+ bend = bsy->busy_start + bsy->busy_length - 1;
+ if (bno > bend || uend < bsy->busy_start)
+ continue;
+
+ /* (start1,length1) within (start2, length2) */
+ if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
+ lsn = bsy->busy_tp->t_commit_lsn;
}
+ spin_unlock(&mp->m_perag[agno].pagb_lock);
+ TRACE_BUSYSEARCH("xfs_alloc_search_busy", lsn ? "found" : "not-found",
+ agno, bno, len, tp);
+ if (lsn)
+ xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
file:ab89a7e94a0fd4b861114f656b99e5fcf0355a14 -> file:73c22278fdaf82955c66f02cf2b4b626f0713dcd
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -113,10 +113,82 @@ xfs_swapext(
return error;
}
+/*
+ * We need to check that the format of the data fork in the temporary inode is
+ * valid for the target inode before doing the swap. This is not a problem with
+ * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
+ * data fork depending on the space the attribute fork is taking so we can get
+ * invalid formats on the target inode.
+ *
+ * E.g. target has space for 7 extents in extent format, temp inode only has
+ * space for 6. If we defragment down to 7 extents, then the tmp format is a
+ * btree, but when swapped it needs to be in extent format. Hence we can't just
+ * blindly swap data forks on attr2 filesystems.
+ *
+ * Note that we check the swap in both directions so that we don't end up with
+ * a corrupt temporary inode, either.
+ *
+ * Note that fixing the way xfs_fsr sets up the attribute fork in the source
+ * inode will prevent this situation from occurring, so all we do here is
+ * reject and log the attempt. basically we are putting the responsibility on
+ * userspace to get this right.
+ */
+static int
+xfs_swap_extents_check_format(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip) /* tmp inode */
+{
+
+ /* Should never get a local format */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+ tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+ return EINVAL;
+
+ /*
+ * if the target inode has less extents that then temporary inode then
+ * why did userspace call us?
+ */
+ if (ip->i_d.di_nextents < tip->i_d.di_nextents)
+ return EINVAL;
+
+ /*
+ * if the target inode is in extent form and the temp inode is in btree
+ * form then we will end up with the target inode in the wrong format
+ * as we already know there are less extents in the temp inode.
+ */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ return EINVAL;
+
+ /* Check temp in extent form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max)
+ return EINVAL;
+
+ /* Check target in extent form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
+ return EINVAL;
+
+ /* Check root block of temp in btree form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_BOFF(ip) &&
+ tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+ return EINVAL;
+
+ /* Check root block of target in btree form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_BOFF(tip) &&
+ ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+ return EINVAL;
+
+ return 0;
+}
+
int
xfs_swap_extents(
- xfs_inode_t *ip,
- xfs_inode_t *tip,
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip, /* tmp inode */
xfs_swapext_t *sxp)
{
xfs_mount_t *mp;
@@ -160,13 +232,6 @@ xfs_swap_extents(
goto out_unlock;
}
- /* Should never get a local format */
- if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
- tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- error = XFS_ERROR(EINVAL);
- goto out_unlock;
- }
-
if (VN_CACHED(VFS_I(tip)) != 0) {
xfs_inval_cached_trace(tip, 0, -1, 0, -1);
error = xfs_flushinval_pages(tip, 0, -1,
@@ -189,13 +254,12 @@ xfs_swap_extents(
goto out_unlock;
}
- /*
- * If the target has extended attributes, the tmp file
- * must also in order to ensure the correct data fork
- * format.
- */
- if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
- error = XFS_ERROR(EINVAL);
+ /* check inode formats now that data is flushed */
+ error = xfs_swap_extents_check_format(ip, tip);
+ if (error) {
+ xfs_fs_cmn_err(CE_NOTE, mp,
+ "%s: inode 0x%llx format is incompatible for exchanging.",
+ __FILE__, ip->i_ino);
goto out_unlock;
}
@@ -276,6 +340,16 @@ xfs_swap_extents(
*tifp = *tempifp; /* struct copy */
/*
+ * Fix the in-memory data fork values that are dependent on the fork
+ * offset in the inode. We can't assume they remain the same as attr2
+ * has dynamic fork offsets.
+ */
+ ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) /
+ (uint)sizeof(xfs_bmbt_rec_t);
+ tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) /
+ (uint)sizeof(xfs_bmbt_rec_t);
+
+ /*
* Fix the on-disk inode values
*/
tmp = (__uint64_t)ip->i_d.di_nblocks;
file:2d0b3e1da9e69094f798967e3ff432d679249f00 -> file:6f83f58c099fee2db91d80f6d31c51f3b46f6830
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -611,7 +611,7 @@ xfs_fs_log_dummy(
xfs_inode_t *ip;
int error;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
if (error) {
xfs_trans_cancel(tp, 0);
file:80e526489be5d7ee3dc415ec592fe2807a879a01 -> file:a04a72fbbb654f599971d69573cefcfe1999536d
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -228,13 +228,12 @@ xfs_iget_cache_hit(
xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
/*
- * We need to set XFS_INEW atomically with clearing the
- * reclaimable tag so that we do have an indicator of the
- * inode still being initialized.
+ * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
+ * from stomping over us while we recycle the inode. We can't
+ * clear the radix tree reclaimable tag yet as it requires
+ * pag_ici_lock to be held exclusive.
*/
- ip->i_flags |= XFS_INEW;
- ip->i_flags &= ~XFS_IRECLAIMABLE;
- __xfs_inode_clear_reclaim_tag(mp, pag, ip);
+ ip->i_flags |= XFS_IRECLAIM;
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
@@ -253,7 +252,15 @@ xfs_iget_cache_hit(
__xfs_inode_set_reclaim_tag(pag, ip);
goto out_error;
}
+
+ write_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+ ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
+ ip->i_flags |= XFS_INEW;
+ __xfs_inode_clear_reclaim_tag(mp, pag, ip);
inode->i_state = I_LOCK|I_NEW;
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
@@ -511,17 +518,21 @@ xfs_ireclaim(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
XFS_STATS_INC(xs_ig_reclaims);
/*
- * Remove the inode from the per-AG radix tree. It doesn't matter
- * if it was never added to it because radix_tree_delete can deal
- * with that case just fine.
+ * Remove the inode from the per-AG radix tree.
+ *
+ * Because radix_tree_delete won't complain even if the item was never
+ * added to the tree assert that it's been there before to catch
+ * problems with the inode life time early on.
*/
pag = xfs_get_perag(mp, ip->i_ino);
write_lock(&pag->pag_ici_lock);
- radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
+ if (!radix_tree_delete(&pag->pag_ici_root, agino))
+ ASSERT(0);
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
file:b92a4fa2a0a12eda161c2ff897bd820f5cbde1e8 -> file:523a1ae4964d6efef646469e1398c272cf1b0281
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2877,8 +2877,8 @@ xfs_iflush(
mp = ip->i_mount;
/*
- * If the inode isn't dirty, then just release the inode
- * flush lock and do nothing.
+ * If the inode isn't dirty, then just release the inode flush lock and
+ * do nothing.
*/
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
@@ -2904,6 +2904,19 @@ xfs_iflush(
xfs_iunpin_wait(ip);
/*
+ * For stale inodes we cannot rely on the backing buffer remaining
+ * stale in cache for the remaining life of the stale inode and so
+ * xfs_itobp() below may give us a buffer that no longer contains
+ * inodes below. We have to check this after ensuring the inode is
+ * unpinned so that it is safe to reclaim the stale inode after the
+ * flush call.
+ */
+ if (xfs_iflags_test(ip, XFS_ISTALE)) {
+ xfs_ifunlock(ip);
+ return 0;
+ }
+
+ /*
* This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this inode
* to disk, because the log record didn't make it to disk!
file:67ae5555a30a6e8be0bd415b1596bea17854add5 -> file:7294abce6ef2f4d7d7e7ecae29dd377b14a4a2b3
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -860,8 +860,15 @@ xfs_iomap_write_unwritten(
* set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
* we have covered the range passed in.
+ *
+ * Note that we open code the transaction allocation here
+ * to pass KM_NOFS--we can't risk to recursing back into
+ * the filesystem here as we might be asked to write out
+ * the same inode that we complete here and might deadlock
+ * on the iolock.
*/
- tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
+ xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
tp->t_flags |= XFS_TRANS_RESERVE;
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), 0,
file:fb17f8226b0955eddc3ee3130708750f72fa3f29 -> file:b5b0d80559108a085131f92d3ca8b1ba8a593828
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3517,7 +3517,7 @@ xlog_do_recovery_pass(
{
xlog_rec_header_t *rhead;
xfs_daddr_t blk_no;
- xfs_caddr_t bufaddr, offset;
+ xfs_caddr_t offset;
xfs_buf_t *hbp, *dbp;
int error = 0, h_size;
int bblks, split_bblks;
@@ -3610,7 +3610,7 @@ xlog_do_recovery_pass(
/*
* Check for header wrapping around physical end-of-log
*/
- offset = NULL;
+ offset = XFS_BUF_PTR(hbp);
split_hblks = 0;
wrapped_hblks = 0;
if (blk_no + hblks <= log->l_logBBsize) {
@@ -3646,9 +3646,8 @@ xlog_do_recovery_pass(
* - order is important.
*/
wrapped_hblks = hblks - split_hblks;
- bufaddr = XFS_BUF_PTR(hbp);
error = XFS_BUF_SET_PTR(hbp,
- bufaddr + BBTOB(split_hblks),
+ offset + BBTOB(split_hblks),
BBTOB(hblks - split_hblks));
if (error)
goto bread_err2;
@@ -3658,14 +3657,10 @@ xlog_do_recovery_pass(
if (error)
goto bread_err2;
- error = XFS_BUF_SET_PTR(hbp, bufaddr,
+ error = XFS_BUF_SET_PTR(hbp, offset,
BBTOB(hblks));
if (error)
goto bread_err2;
-
- if (!offset)
- offset = xlog_align(log, 0,
- wrapped_hblks, hbp);
}
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead,
@@ -3685,7 +3680,7 @@ xlog_do_recovery_pass(
} else {
/* This log record is split across the
* physical end of log */
- offset = NULL;
+ offset = XFS_BUF_PTR(dbp);
split_bblks = 0;
if (blk_no != log->l_logBBsize) {
/* some data is before the physical
@@ -3714,9 +3709,8 @@ xlog_do_recovery_pass(
* _first_, then the log start (LR header end)
* - order is important.
*/
- bufaddr = XFS_BUF_PTR(dbp);
error = XFS_BUF_SET_PTR(dbp,
- bufaddr + BBTOB(split_bblks),
+ offset + BBTOB(split_bblks),
BBTOB(bblks - split_bblks));
if (error)
goto bread_err2;
@@ -3727,13 +3721,9 @@ xlog_do_recovery_pass(
if (error)
goto bread_err2;
- error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
+ error = XFS_BUF_SET_PTR(dbp, offset, h_size);
if (error)
goto bread_err2;
-
- if (!offset)
- offset = xlog_align(log, wrapped_hblks,
- bblks - split_bblks, dbp);
}
xlog_unpack_data(rhead, offset, log);
if ((error = xlog_recover_process_data(log, rhash,
file:8b6c9e807efb7b8a47bf493563bd5d8dadde3f4f -> file:4d509f742bd20aba743310378c8c5a8a2b327363
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1471,7 +1471,7 @@ xfs_log_sbcount(
if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
return 0;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT);
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT);
if (error) {
file:a6c023bc0fb27191d895578fe910e052840a804a -> file:08fdb6d43efd0cfaffc07716bdbdbd1b21d5d4d5
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -209,6 +209,7 @@ typedef struct xfs_mount {
__uint64_t m_maxioffset; /* maximum inode offset */
__uint64_t m_resblks; /* total reserved blocks */
__uint64_t m_resblks_avail;/* available reserved blocks */
+ __uint64_t m_resblks_save; /* reserved blks @ remount,ro */
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
int m_sinoalign; /* stripe unit inode alignment */
@@ -242,6 +243,7 @@ typedef struct xfs_mount {
wait_queue_head_t m_wait_single_sync_task;
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
+ struct list_head m_mplist; /* inode shrinker mount list */
} xfs_mount_t;
/*
file:f5e4874c37d8ecfa55a77a1a1eeca31c7a6d27df -> file:726014d1c925f9cf398492a79528839e07790000
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -37,13 +37,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_
}
/*
- * Flags for xfs_free_eofblocks
- */
-#define XFS_FREE_EOF_LOCK (1<<0)
-#define XFS_FREE_EOF_NOLOCK (1<<1)
-
-
-/*
* helper function to extract extent size hint from inode
*/
STATIC_INLINE xfs_extlen_t
file:66b849358e62d16e9a01fc307bc5a03dc1605ffb -> file:237badcbac3bcbbc9fbbba8ecea9229cdac80f08
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -236,19 +236,20 @@ xfs_trans_alloc(
uint type)
{
xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
- return _xfs_trans_alloc(mp, type);
+ return _xfs_trans_alloc(mp, type, KM_SLEEP);
}
xfs_trans_t *
_xfs_trans_alloc(
xfs_mount_t *mp,
- uint type)
+ uint type,
+ uint memflags)
{
xfs_trans_t *tp;
atomic_inc(&mp->m_active_trans);
- tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+ tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
tp->t_magic = XFS_TRANS_MAGIC;
tp->t_type = type;
tp->t_mountp = mp;
file:ed47fc77759c7cce24f98f283a717fae33fa9eea -> file:a0574f593f5272764adf820c12ad724c9d90011e
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -924,7 +924,7 @@ typedef struct xfs_trans {
* XFS transaction mechanism exported interfaces.
*/
xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint);
+xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint);
xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
uint, uint);
file:b572f7e840e0b82a372225581d87c85c104dfec6 -> file:38a63244e75dda8d89fb1e17b0009ce1c491998b
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -69,7 +69,6 @@ xfs_setattr(
uint commit_flags=0;
uid_t uid=0, iuid=0;
gid_t gid=0, igid=0;
- int timeflags = 0;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int need_iolock = 1;
@@ -134,16 +133,13 @@ xfs_setattr(
if (flags & XFS_ATTR_NOLOCK)
need_iolock = 0;
if (!(mask & ATTR_SIZE)) {
- if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) ||
- (mp->m_flags & XFS_MOUNT_WSYNC)) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- commit_flags = 0;
- if ((code = xfs_trans_reserve(tp, 0,
- XFS_ICHANGE_LOG_RES(mp), 0,
- 0, 0))) {
- lock_flags = 0;
- goto error_return;
- }
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+ commit_flags = 0;
+ code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
+ 0, 0, 0);
+ if (code) {
+ lock_flags = 0;
+ goto error_return;
}
} else {
if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
@@ -294,15 +290,23 @@ xfs_setattr(
* or we are explicitly asked to change it. This handles
* the semantic difference between truncate() and ftruncate()
* as implemented in the VFS.
- */
- if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME))
- timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ *
+ * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
+ * is a special case where we need to update the times despite
+ * not having these flags set. For all other operations the
+ * VFS set these flags explicitly if it wants a timestamp
+ * update.
+ */
+ if (iattr->ia_size != ip->i_size &&
+ (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+ iattr->ia_ctime = iattr->ia_mtime =
+ current_fs_time(inode->i_sb);
+ mask |= ATTR_CTIME | ATTR_MTIME;
+ }
if (iattr->ia_size > ip->i_size) {
ip->i_d.di_size = iattr->ia_size;
ip->i_size = iattr->ia_size;
- if (!(flags & XFS_ATTR_DMI))
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
} else if (iattr->ia_size <= ip->i_size ||
(iattr->ia_size == 0 && ip->i_d.di_nextents)) {
@@ -373,9 +377,6 @@ xfs_setattr(
ip->i_d.di_gid = gid;
inode->i_gid = gid;
}
-
- xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
- timeflags |= XFS_ICHGTIME_CHG;
}
/*
@@ -392,51 +393,37 @@ xfs_setattr(
inode->i_mode &= S_IFMT;
inode->i_mode |= mode & ~S_IFMT;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- timeflags |= XFS_ICHGTIME_CHG;
}
/*
* Change file access or modified times.
*/
- if (mask & (ATTR_ATIME|ATTR_MTIME)) {
- if (mask & ATTR_ATIME) {
- inode->i_atime = iattr->ia_atime;
- ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
- ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
- ip->i_update_core = 1;
- }
- if (mask & ATTR_MTIME) {
- inode->i_mtime = iattr->ia_mtime;
- ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
- ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
- timeflags &= ~XFS_ICHGTIME_MOD;
- timeflags |= XFS_ICHGTIME_CHG;
- }
- if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)))
- xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+ if (mask & ATTR_ATIME) {
+ inode->i_atime = iattr->ia_atime;
+ ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
+ ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
+ ip->i_update_core = 1;
}
-
- /*
- * Change file inode change time only if ATTR_CTIME set
- * AND we have been called by a DMI function.
- */
-
- if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) {
+ if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
ip->i_update_core = 1;
- timeflags &= ~XFS_ICHGTIME_CHG;
+ }
+ if (mask & ATTR_MTIME) {
+ inode->i_mtime = iattr->ia_mtime;
+ ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
+ ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
+ ip->i_update_core = 1;
}
/*
- * Send out timestamp changes that need to be set to the
- * current time. Not done when called by a DMI function.
+ * And finally, log the inode core if any attribute in it
+ * has been changed.
*/
- if (timeflags && !(flags & XFS_ATTR_DMI))
- xfs_ichgtime(ip, timeflags);
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
+ ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg);
@@ -451,12 +438,10 @@ xfs_setattr(
* mix so this probably isn't worth the trouble to optimize.
*/
code = 0;
- if (tp) {
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(tp);
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
- code = xfs_trans_commit(tp, commit_flags);
- }
+ code = xfs_trans_commit(tp, commit_flags);
xfs_iunlock(ip, lock_flags);
@@ -612,7 +597,7 @@ xfs_fsync(
{
xfs_trans_t *tp;
int error = 0;
- int log_flushed = 0, changed = 1;
+ int log_flushed = 0;
xfs_itrace_entry(ip);
@@ -642,19 +627,11 @@ xfs_fsync(
* disk yet, the inode will be still be pinned. If it is,
* force the log.
*/
-
xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
if (xfs_ipincount(ip)) {
error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE | XFS_LOG_SYNC,
&log_flushed);
- } else {
- /*
- * If the inode is not pinned and nothing has changed
- * we don't need to flush the cache.
- */
- changed = 0;
}
} else {
/*
@@ -689,7 +666,7 @@ xfs_fsync(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) {
+ if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
/*
* If the log write didn't issue an ordered tag we need
* to flush the disk cache for the data device now.
@@ -709,6 +686,11 @@ xfs_fsync(
}
/*
+ * Flags for xfs_free_eofblocks
+ */
+#define XFS_FREE_EOF_TRYLOCK (1<<0)
+
+/*
* This is called by xfs_inactive to free any blocks beyond eof
* when the link count isn't zero and by xfs_dm_punch_hole() when
* punching a hole to EOF.
@@ -726,7 +708,6 @@ xfs_free_eofblocks(
xfs_filblks_t map_len;
int nimaps;
xfs_bmbt_irec_t imap;
- int use_iolock = (flags & XFS_FREE_EOF_LOCK);
/*
* Figure out if there are any blocks beyond the end
@@ -768,14 +749,19 @@ xfs_free_eofblocks(
* cache and we can't
* do that within a transaction.
*/
- if (use_iolock)
+ if (flags & XFS_FREE_EOF_TRYLOCK) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ xfs_trans_cancel(tp, 0);
+ return 0;
+ }
+ } else {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ }
error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
ip->i_size);
if (error) {
xfs_trans_cancel(tp, 0);
- if (use_iolock)
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
@@ -812,8 +798,7 @@ xfs_free_eofblocks(
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES);
}
- xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)
- : XFS_ILOCK_EXCL));
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
}
return error;
}
@@ -1113,7 +1098,17 @@ xfs_release(
(ip->i_df.if_flags & XFS_IFEXTENTS)) &&
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
- error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
+
+ /*
+ * If we can't get the iolock just skip truncating
+ * the blocks past EOF because we could deadlock
+ * with the mmap_sem otherwise. We'll get another
+ * chance to drop them once the last reference to
+ * the inode is dropped, so we'll never leak blocks
+ * permanently.
+ */
+ error = xfs_free_eofblocks(mp, ip,
+ XFS_FREE_EOF_TRYLOCK);
if (error)
return error;
}
@@ -1184,7 +1179,7 @@ xfs_inactive(
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
(ip->i_delayed_blks != 0)))) {
- error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
+ error = xfs_free_eofblocks(mp, ip, 0);
if (error)
return VN_INACTIVE_CACHE;
}
@@ -2456,46 +2451,6 @@ xfs_set_dmattrs(
return error;
}
-int
-xfs_reclaim(
- xfs_inode_t *ip)
-{
-
- xfs_itrace_entry(ip);
-
- ASSERT(!VN_MAPPED(VFS_I(ip)));
-
- /* bad inode, get out here ASAP */
- if (is_bad_inode(VFS_I(ip))) {
- xfs_ireclaim(ip);
- return 0;
- }
-
- xfs_ioend_wait(ip);
-
- ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
-
- /*
- * If we have nothing to flush with this inode then complete the
- * teardown now, otherwise break the link between the xfs inode and the
- * linux inode and clean up the xfs inode later. This avoids flushing
- * the inode to disk during the delete operation itself.
- *
- * When breaking the link, we need to set the XFS_IRECLAIMABLE flag
- * first to ensure that xfs_iunpin() will never see an xfs inode
- * that has a linux inode being reclaimed. Synchronisation is provided
- * by the i_flags_lock.
- */
- if (!ip->i_update_core && (ip->i_itemp == NULL)) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
- xfs_iflags_set(ip, XFS_IRECLAIMABLE);
- return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC);
- }
- xfs_inode_set_reclaim_tag(ip);
- return 0;
-}
-
/*
* xfs_alloc_file_space()
* This routine allocates disk space for the given file.
file:a9e102de71a19c35d1bac05181b910d496e180fa -> file:167a467403a59f0c4327bb0a23cbb369e0e9d7af
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -38,7 +38,6 @@ int xfs_symlink(struct xfs_inode *dp, st
const char *target_path, mode_t mode, struct xfs_inode **ipp,
cred_t *credp);
int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
-int xfs_reclaim(struct xfs_inode *ip);
int xfs_change_file_space(struct xfs_inode *ip, int cmd,
xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
file:740ac3ad8fd06fc61b8e2dacc10054ec27469439 -> file:e7bdaafabc3f521349b297d9e62923b9b22499f2
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -48,7 +48,7 @@ struct acpi_power_register {
u8 space_id;
u8 bit_width;
u8 bit_offset;
- u8 reserved;
+ u8 access_size;
u64 address;
} __attribute__ ((packed));
@@ -74,6 +74,7 @@ struct acpi_processor_cx {
u32 power;
u32 usage;
u64 time;
+ u8 bm_sts_skip;
struct acpi_processor_cx_policy promotion;
struct acpi_processor_cx_policy demotion;
char desc[ACPI_CX_DESC_LEN];
file:e694263445f7b5dd48ff294cdd6ec4b76f3769a2 -> file:69206957b72c52efe1f9fe7d2479cfc931d7d7b7
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -131,7 +131,7 @@ static inline void dma_sync_single_range
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
} else
- dma_sync_single_for_cpu(dev, addr, size, dir);
+ dma_sync_single_for_cpu(dev, addr + offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -148,7 +148,7 @@ static inline void dma_sync_single_range
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
} else
- dma_sync_single_for_device(dev, addr, size, dir);
+ dma_sync_single_for_device(dev, addr + offset, size, dir);
}
static inline void
file:e6f3b120f51a5e4f9d51bf694b8ecba0b8d7503c -> file:0cbdccc4003d8dfc3a151841f3f63b049319d538
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -6,6 +6,7 @@
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
@@ -375,6 +376,7 @@
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
file:07432a144204c6f33bbe61362c4ecfc6e4c9b576 -> file:c010b94625c26cec18c201cb0d93830e0f61c8ae
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -251,7 +251,6 @@ int acpi_check_mem_region(resource_size_
void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
-void __init acpi_set_sci_en_on_resume(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
file:4fb357312b3bf4b4484879fa5ede0942e39fb15f -> file:89387962f5f57291b289846f92d55e8203ccaf11
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1000,8 +1000,8 @@ static inline int ata_ok(u8 status)
static inline int lba_28_ok(u64 block, u32 n_block)
{
- /* check the ending block number */
- return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
+ /* check the ending block number: must be LESS THAN 0x0fffffff */
+ return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
}
static inline int lba_48_ok(u64 block, u32 n_block)
file:15e4eb713694442a1ed77d5972667ac69543fd8a -> file:7ffab7cb9ee3d6d1c00111ab968b25c9dd0bd778
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -357,6 +357,8 @@ struct ethtool_rxnfc {
__u32 flow_type;
/* The rx flow hash value or the rule DB size */
__u64 data;
+ /* The following fields are not valid and must not be used for
+ * the ETHTOOL_{G,X}RXFH commands. */
struct ethtool_rx_flow_spec fs;
__u32 rule_cnt;
__u32 rule_locs[0];
file:5fc9df401ec9d94069dd7753f711956d55229b30 -> file:b6a562291a6d00657d572b690a18e71c1236f50b
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1310,6 +1310,8 @@ extern int send_sigurg(struct fown_struc
#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
#define MNT_DETACH 0x00000002 /* Just detach from the tree */
#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
+#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
+#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
file:52e15e079c619894f680dfe8b62531358a7491aa -> file:e7660728a2ed970af93269bcc07d08631bb78469
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1098,6 +1098,8 @@ enum ieee80211_category {
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
WLAN_CATEGORY_WMM = 17,
+ WLAN_CATEGORY_MESH_PLINK = 30, /* Pending ANA approval */
+ WLAN_CATEGORY_MESH_PATH_SEL = 32, /* Pending ANA approval */
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
file:f1011f7f3d4142bf5771007661cec4cd8a6c3315 -> file:638ce4554c76e903d632720b3d25dea3350c745b
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -653,6 +653,7 @@ struct transaction_s
* waiting for it to finish.
*/
unsigned int t_synchronous_commit:1;
+ unsigned int t_flushed_data_blocks:1;
/*
* For use by the filesystem to store fs-specific data
file:b7bbb5ddd7aec861215887db80008b2b54543f79 -> file:c728a50f8dabdc56c9e53e744237228174179925
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache
*/
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 6
+#define NR_IOBUS_DEVS 200
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
@@ -116,6 +116,11 @@ struct kvm_memory_slot {
int user_alloc;
};
+static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+{
+ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+}
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
file:9e2fa999fe78e677ded85644d67f99af903b2643 -> file:397a3774e6e964a06cb4dbd99738684034e22a8b
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1170,8 +1170,8 @@ int write_one_page(struct page *page, in
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
-#define VM_MAX_READAHEAD 4 /* kbytes */
-#define VM_MIN_READAHEAD 4 /* kbytes (includes current page) */
+#define VM_MAX_READAHEAD 128 /* kbytes */
+#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
file:63adefe56fbbcd370d08f863aefe2ecee7c6ebff -> file:67325bfc803eeea2f55f4c83527c904960ed1bd3
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2317,6 +2317,7 @@
#define PCI_VENDOR_ID_JMICRON 0x197B
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
+#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
file:3d0a9ff24f01220f2957d8f3665eb3467dcafa6d -> file:a28c37d9dd9ec9185f65348da841ccf87a747f04
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -302,6 +302,7 @@ struct ssb_bus {
u16 chip_id;
u16 chip_rev;
u16 sprom_size; /* number of words in sprom */
+ u16 sprom_offset;
u8 chip_package;
/* List of devices (cores) on the backplane. */
file:4e27acf0a92f91f3f3052f96dcdfa4d142ca369e -> file:7600f38c5aa96b9d70bed32150bbc5c776ad8613
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -46,6 +46,7 @@
#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */
#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */
#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */
+#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
#define SSB_CHIPCO_CAP_OTPS_SHIFT 19
#define SSB_CHIPCO_CAP_OTPS_BASE 5
#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */
@@ -564,6 +565,7 @@ struct ssb_chipcommon_pmu {
struct ssb_chipcommon {
struct ssb_device *dev;
u32 capabilities;
+ u32 status;
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
struct ssb_chipcommon_pmu pmu;
file:9ae9082eaeb46eb2f9d9573a2706d339ab8c06ab -> file:b8be23ce191547126ea00f54fcf7a12a6b742fad
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -170,7 +170,8 @@
#define SSB_SPROMSIZE_WORDS_R4 220
#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
-#define SSB_SPROM_BASE 0x1000
+#define SSB_SPROM_BASE1 0x1000
+#define SSB_SPROM_BASE31 0x0800
#define SSB_SPROM_REVISION 0x107E
#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */
#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */
file:bf2a0c7488780b6096d2c5ccf93716fdea5b5b89 -> file:1dba6ee55203fec99992d211eb169938fcec918e
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -150,6 +150,7 @@ extern int tboot_force_iommu(void);
#else
+#define tboot_enabled() 0
#define tboot_probe() do { } while (0)
#define tboot_shutdown(shutdown_type) do { } while (0)
#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
file:f456534dcaf92615e4b6778bd17de23f227a5578 -> file:3e2576df213c609730cb38f14e5fa534fc21c896
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -799,7 +799,7 @@ do { \
X##_e -= (_FP_W_TYPE_SIZE - rsize); \
X##_e = rsize - X##_e - 1; \
\
- if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs < X##_e) \
+ if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs <= X##_e) \
__FP_FRAC_SRS_1(ur_, (X##_e - _FP_WFRACBITS_##fs + 1), rsize);\
_FP_FRAC_DISASSEMBLE_##wc(X, ur_, rsize); \
if ((_FP_WFRACBITS_##fs - X##_e - 1) > 0) \
file:bfd23aeedc46e5176c965d64924a381462b44e1a -> file:ed3aea1605e89b6955e402a8b7a71c97119f42e0
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -139,20 +139,15 @@ enum {
#define ESCO_2EV5 0x0100
#define ESCO_3EV5 0x0200
-#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
-#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
-#define ALL_ESCO_MASK (SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
- EDR_ESCO_MASK)
+#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
/* ACL flags */
-#define ACL_START_NO_FLUSH 0x00
#define ACL_CONT 0x01
#define ACL_START 0x02
#define ACL_ACTIVE_BCAST 0x04
#define ACL_PICO_BCAST 0x08
-#define ACL_PB_MASK (ACL_CONT | ACL_START)
-
/* Baseband links */
#define SCO_LINK 0x00
#define ACL_LINK 0x01
@@ -191,7 +186,6 @@ enum {
#define LMP_EDR_ESCO_3M 0x40
#define LMP_EDR_3S_ESCO 0x80
-#define LMP_NO_FLUSH 0x01
#define LMP_SIMPLE_PAIR 0x08
/* Connection modes */
@@ -997,9 +991,6 @@ struct hci_conn_info {
__u8 out;
__u16 state;
__u32 link_mode;
- __u32 mtu;
- __u32 cnt;
- __u32 pkts;
};
struct hci_dev_req {
file:f5da4e6cfc2513e8912760f77c1fca1937fd8b63 -> file:7b640aeddb64d81af0c608becdc6d88631686773
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -326,15 +326,12 @@ void hci_acl_disconn(struct hci_conn *co
void hci_add_sco(struct hci_conn *conn, __u16 handle);
void hci_setup_sync(struct hci_conn *conn, __u16 handle);
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
- __u16 pkt_type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
- __u16 pkt_type, bdaddr_t *dst,
- __u8 sec_level, __u8 auth_type);
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
int hci_conn_change_link_key(struct hci_conn *conn);
@@ -361,7 +358,7 @@ static inline void hci_conn_put(struct h
if (conn->state == BT_CONNECTED) {
timeo = msecs_to_jiffies(conn->disc_timeout);
if (!conn->out)
- timeo *= 20;
+ timeo *= 2;
} else
timeo = msecs_to_jiffies(10);
} else
@@ -479,7 +476,6 @@ void hci_conn_del_sysfs(struct hci_conn
#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
-#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
/* ----- HCI protocols ----- */
struct hci_proto {
file:23f37b487977dc00b8e188f14768e797ade566d8 -> file:9516f4b4a3c235169404a1d589edfcb2eb378d46
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -70,7 +70,6 @@ struct l2cap_conninfo {
#define L2CAP_LM_TRUSTED 0x0008
#define L2CAP_LM_RELIABLE 0x0010
#define L2CAP_LM_SECURE 0x0020
-#define L2CAP_LM_FLUSHABLE 0x0040
/* L2CAP command codes */
#define L2CAP_COMMAND_REJ 0x01
@@ -317,7 +316,6 @@ struct l2cap_pinfo {
__u8 sec_level;
__u8 role_switch;
__u8 force_reliable;
- __u8 flushable;
__u8 conf_req[64];
__u8 conf_len;
file:c274993234e32dc8c6ea88b7d61e0f332227e6cb -> file:921d7b3c7f8d2098e6390d3d60195628cae55fc1
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -29,6 +29,7 @@
#define RFCOMM_CONN_TIMEOUT (HZ * 30)
#define RFCOMM_DISC_TIMEOUT (HZ * 20)
#define RFCOMM_AUTH_TIMEOUT (HZ * 25)
+#define RFCOMM_IDLE_TIMEOUT (HZ * 2)
#define RFCOMM_DEFAULT_MTU 127
#define RFCOMM_DEFAULT_CREDITS 7
@@ -154,6 +155,7 @@ struct rfcomm_msc {
struct rfcomm_session {
struct list_head list;
struct socket *sock;
+ struct timer_list timer;
unsigned long state;
unsigned long flags;
atomic_t refcnt;
file:924338ac322387084bf8ca2927a8ea1e5e811013 -> file:e28a2a7714713a4cbb05dbfd3cd0c514ede79d66
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -37,7 +37,6 @@
struct sockaddr_sco {
sa_family_t sco_family;
bdaddr_t sco_bdaddr;
- __u16 sco_pkt_type;
};
/* SCO socket options */
@@ -73,8 +72,7 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
- __u16 pkt_type;
-
+ __u32 flags;
struct sco_conn *conn;
};
file:998c30fc89819f2d48140dd83ad82feb40e6f99b -> file:c39ed07929487457ae31f6d371409bde4fa430b0
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -908,6 +908,9 @@ enum ieee80211_tkip_key_type {
* @IEEE80211_HW_BEACON_FILTER:
* Hardware supports dropping of irrelevant beacon frames to
* avoid waking up cpu.
+ * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
+ * Hardware can provide ack status reports of Tx frames to
+ * the stack.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
@@ -924,6 +927,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
IEEE80211_HW_BEACON_FILTER = 1<<14,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<15,
};
/**
file:0a474568b003053686f98e27d4102d34148a8ade -> file:89e54e932368430bcabad6f2210a6c510428ef4e
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -772,6 +772,7 @@ int sctp_user_addto_chunk(struct sctp_ch
struct iovec *data);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *,
struct sock *);
file:a4b233318179b16e80997723b82959a45b89df90 -> file:91a4e4ff9a9bbeb28d66394299e3ef3c938e5b0c
--- a/include/scsi/scsi_bsg_fc.h
+++ b/include/scsi/scsi_bsg_fc.h
@@ -292,7 +292,7 @@ struct fc_bsg_request {
struct fc_bsg_rport_els r_els;
struct fc_bsg_rport_ct r_ct;
} rqst_data;
-};
+} __attribute__((packed));
/* response (request sense data) structure of the sg_io_v4 */
file:dacb8ef6700071238a3293799f57610909c6f22f -> file:4b6a4a3001677d1482caac5df7e1d6167dd025c2
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -43,7 +43,8 @@
tstruct \
char __data[0]; \
}; \
- static struct ftrace_event_call event_##name
+ static struct ftrace_event_call \
+ __attribute__((__aligned__(4))) event_##name
#undef __cpparg
#define __cpparg(arg...) arg
file:ee9d69707c0afafc9d5a6d0d9b46146e4fafe3e4 -> file:d01bc14a9b3713f6f62ed6c2599a1ca95b0cf0e2
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -706,7 +706,7 @@ SYSCALL_DEFINE4(mq_open, const char __us
dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
- goto out_err;
+ goto out_putfd;
}
mntget(ipc_ns->mq_mnt);
@@ -744,7 +744,6 @@ out:
mntput(ipc_ns->mq_mnt);
out_putfd:
put_unused_fd(fd);
-out_err:
fd = error;
out_upsem:
mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
file:f6c204f07ea6084c4849d52358d6b1b2aab1f0da -> file:180d1885f782e5d9e4e5872d166bf8df5738eae8
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -494,29 +494,26 @@ asmlinkage long compat_sys_sched_getaffi
{
int ret;
cpumask_var_t mask;
- unsigned long *k;
- unsigned int min_length = cpumask_size();
- if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
- min_length = sizeof(compat_ulong_t);
-
- if (len < min_length)
+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
- if (ret < 0)
- goto out;
+ if (ret == 0) {
+ size_t retlen = min_t(size_t, len, cpumask_size());
- k = cpumask_bits(mask);
- ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
- if (ret == 0)
- ret = min_length;
-
-out:
+ if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
+ ret = -EFAULT;
+ else
+ ret = retlen;
+ }
free_cpumask_var(mask);
+
return ret;
}
file:dfa33e8ee2a9c286ec4b4f4e1be9cb0e8c85667a -> file:4b270e666212e29c083d6d012000794599438e73
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -870,7 +870,6 @@ SYSCALL_DEFINE2(delete_module, const cha
mutex_lock(&module_mutex);
/* Store the name of the last unloaded module for diagnostic purposes */
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
- ddebug_remove_module(mod->name);
free_module(mod);
out:
@@ -1533,6 +1532,9 @@ static void free_module(struct module *m
remove_sect_attrs(mod);
mod_kobject_remove(mod);
+ /* Remove dynamic debug info */
+ ddebug_remove_module(mod->name);
+
/* Arch-specific cleanup. */
module_arch_cleanup(mod);
file:947b3ad551f8a925c39ef6f53f4f37c16f8a3ea3 -> file:f85644c878dd66b70101de3d504ddfdf6e46d32f
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock,
struct thread_info *owner;
/*
+ * If we own the BKL, then don't spin. The owner of
+ * the mutex might be waiting on us to release the BKL.
+ */
+ if (unlikely(current->lock_depth >= 0))
+ break;
+
+ /*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
file:413d101ca8eea782725305eedbaa397a36d814d5 -> file:183d437f4a0fb7c5ad96ad82ae3f6d28333e52e1
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4510,8 +4510,8 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct file *group_file = NULL;
+ int event_fd;
int fput_needed = 0;
- int fput_needed2 = 0;
int err;
/* for future expandability... */
@@ -4532,12 +4532,18 @@ SYSCALL_DEFINE5(perf_event_open,
return -EINVAL;
}
+ event_fd = get_unused_fd_flags(O_RDWR);
+ if (event_fd < 0)
+ return event_fd;
+
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pid, cpu);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto err_fd;
+ }
/*
* Look up the group leader (we will attach this event to it):
@@ -4577,13 +4583,11 @@ SYSCALL_DEFINE5(perf_event_open,
if (IS_ERR(event))
goto err_put_context;
- err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
- if (err < 0)
- goto err_free_put_context;
-
- event_file = fget_light(err, &fput_needed2);
- if (!event_file)
+ event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
+ if (IS_ERR(event_file)) {
+ err = PTR_ERR(event_file);
goto err_free_put_context;
+ }
if (flags & PERF_FLAG_FD_OUTPUT) {
err = perf_event_set_output(event, group_fd);
@@ -4604,19 +4608,19 @@ SYSCALL_DEFINE5(perf_event_open,
list_add_tail(&event->owner_entry, &current->perf_event_list);
mutex_unlock(&current->perf_event_mutex);
-err_fput_free_put_context:
- fput_light(event_file, fput_needed2);
+ fput_light(group_file, fput_needed);
+ fd_install(event_fd, event_file);
+ return event_fd;
+err_fput_free_put_context:
+ fput(event_file);
err_free_put_context:
- if (err < 0)
- kfree(event);
-
+ free_event(event);
err_put_context:
- if (err < 0)
- put_ctx(ctx);
-
fput_light(group_file, fput_needed);
-
+ put_ctx(ctx);
+err_fd:
+ put_unused_fd(event_fd);
return err;
}
@@ -4981,12 +4985,22 @@ int perf_event_init_task(struct task_str
return ret;
}
+static void __init perf_event_init_all_cpus(void)
+{
+ int cpu;
+ struct perf_cpu_context *cpuctx;
+
+ for_each_possible_cpu(cpu) {
+ cpuctx = &per_cpu(perf_cpu_context, cpu);
+ __perf_event_init_context(&cpuctx->ctx, NULL);
+ }
+}
+
static void __cpuinit perf_event_init_cpu(int cpu)
{
struct perf_cpu_context *cpuctx;
cpuctx = &per_cpu(perf_cpu_context, cpu);
- __perf_event_init_context(&cpuctx->ctx, NULL);
spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
@@ -5057,6 +5071,7 @@ static struct notifier_block __cpuinitda
void __init perf_event_init(void)
{
+ perf_event_init_all_cpus();
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
file:a55d3a367ae86a95e3788e6162717188bb05ad90 -> file:dfadc5b729f194905ded52faef8b2d2564dfd2ce
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -127,8 +127,10 @@ int __ref profile_init(void)
return 0;
prof_buffer = vmalloc(buffer_bytes);
- if (prof_buffer)
+ if (prof_buffer) {
+ memset(prof_buffer, 0, buffer_bytes);
return 0;
+ }
free_cpumask_var(prof_cpu_mask);
return -ENOMEM;
file:6bd5ab842f55af19e388049663e2964acddc4448 -> file:d80812d39d6238378ccac64538056d762128f084
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1997,12 +1997,8 @@ static void set_curr_task_fair(struct rq
static void moved_group_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- s64 delta;
update_curr(cfs_rq);
- delta = (s64) (p->se.vruntime - cfs_rq->min_vruntime);
- if(delta > 0)
- p->se.vruntime = cfs_rq->min_vruntime;
place_entity(cfs_rq, &p->se, 1);
}
#endif
file:6dc4e5ef7a01aab7e92479455c3672ce6096461c -> file:0cccb6c460da74d4f8e03214002baf41b8577ca9
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3258,6 +3258,7 @@ void ftrace_graph_init_task(struct task_
{
/* Make sure we do not use the parent ret_stack */
t->ret_stack = NULL;
+ t->curr_ret_stack = -1;
if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack;
@@ -3267,7 +3268,6 @@ void ftrace_graph_init_task(struct task_
GFP_KERNEL);
if (!ret_stack)
return;
- t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
file:5dd017fea6f58bf8cbb3824770c99ba6f83fa4f2 -> file:c88b21c9fb64b9a99b2147ac4e1fa6bd290464fa
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2237,12 +2237,12 @@ ring_buffer_lock_reserve(struct ring_buf
if (ring_buffer_flags != RB_BUFFERS_ON)
return NULL;
- if (atomic_read(&buffer->record_disabled))
- return NULL;
-
/* If we are tracing schedule, we don't want to recurse */
resched = ftrace_preempt_disable();
+ if (atomic_read(&buffer->record_disabled))
+ goto out_nocheck;
+
if (trace_recursive_lock())
goto out_nocheck;
@@ -2474,11 +2474,11 @@ int ring_buffer_write(struct ring_buffer
if (ring_buffer_flags != RB_BUFFERS_ON)
return -EBUSY;
- if (atomic_read(&buffer->record_disabled))
- return -EBUSY;
-
resched = ftrace_preempt_disable();
+ if (atomic_read(&buffer->record_disabled))
+ goto out;
+
cpu = raw_smp_processor_id();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
file:b20d3ec75de9c6f505e5a591d7d746013b88d6db -> file:3cfb60b4770b651d783832a7fa82b0e839aecf8e
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -748,10 +748,10 @@ out:
mutex_unlock(&trace_types_lock);
}
-static void __tracing_reset(struct trace_array *tr, int cpu)
+static void __tracing_reset(struct ring_buffer *buffer, int cpu)
{
ftrace_disable_cpu();
- ring_buffer_reset_cpu(tr->buffer, cpu);
+ ring_buffer_reset_cpu(buffer, cpu);
ftrace_enable_cpu();
}
@@ -763,7 +763,7 @@ void tracing_reset(struct trace_array *t
/* Make sure all commits have finished */
synchronize_sched();
- __tracing_reset(tr, cpu);
+ __tracing_reset(buffer, cpu);
ring_buffer_record_enable(buffer);
}
@@ -781,7 +781,7 @@ void tracing_reset_online_cpus(struct tr
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
- __tracing_reset(tr, cpu);
+ __tracing_reset(buffer, cpu);
ring_buffer_record_enable(buffer);
}
@@ -858,6 +858,8 @@ void tracing_start(void)
goto out;
}
+ /* Prevent the buffers from switching */
+ __raw_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer;
if (buffer)
@@ -867,6 +869,8 @@ void tracing_start(void)
if (buffer)
ring_buffer_record_enable(buffer);
+ __raw_spin_unlock(&ftrace_max_lock);
+
ftrace_start();
out:
spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -888,6 +892,9 @@ void tracing_stop(void)
if (trace_stop_count++)
goto out;
+ /* Prevent the buffers from switching */
+ __raw_spin_lock(&ftrace_max_lock);
+
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
@@ -896,6 +903,8 @@ void tracing_stop(void)
if (buffer)
ring_buffer_record_disable(buffer);
+ __raw_spin_unlock(&ftrace_max_lock);
+
out:
spin_unlock_irqrestore(&tracing_start_lock, flags);
}
@@ -1162,6 +1171,13 @@ ftrace_trace_userstack(struct ring_buffe
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
+ /*
+ * NMIs can not handle page faults, even with fix ups.
+ * The save user stack can (and often does) fault.
+ */
+ if (unlikely(in_nmi()))
+ return;
+
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), flags, pc);
if (!event)
file:405cb850b75d9a308d04d198946e9b0e8da21a39 -> file:374d4eeea9eab4b5a45fde30d5da975bd777b06c
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -746,7 +746,8 @@ extern const char *__stop___trace_bprint
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
- extern struct ftrace_event_call event_##call;
+ extern struct ftrace_event_call \
+ __attribute__((__aligned__(4))) event_##call;
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
file:5d7601b0287487321314c1d969f17425b339f9b1 -> file:f5a106eeb2bd532c258c0b0d83c422de17a7b924
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -401,7 +401,7 @@ static void clear_huge_page(struct page
{
int i;
- if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
+ if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, sz);
return;
}
@@ -545,6 +545,7 @@ static void free_huge_page(struct page *
mapping = (struct address_space *) page_private(page);
set_page_private(page, 0);
+ page->mapping = NULL;
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
@@ -1007,7 +1008,7 @@ static struct page *alloc_huge_page(stru
page = alloc_buddy_huge_page(h, vma, addr);
if (!page) {
hugetlb_put_quota(inode->i_mapping, chg);
- return ERR_PTR(-VM_FAULT_OOM);
+ return ERR_PTR(-VM_FAULT_SIGBUS);
}
}
@@ -2095,8 +2096,10 @@ retry:
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
- } else
+ } else {
lock_page(page);
+ page->mapping = HUGETLB_POISON;
+ }
}
/*
file:66035bf926120cff9c33b0fdcf8815dde0da2961 -> file:ba9a0aaa4b74c1cc4bcb96670c39fa7c57110c05
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2008,12 +2008,12 @@ int mem_cgroup_prepare_migration(struct
}
unlock_page_cgroup(pc);
+ *ptr = mem;
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
page);
css_put(&mem->css);
}
- *ptr = mem;
return ret;
}
file:4545d59442431e33d7d64e2f98c8f9d1a403780b -> file:f29d8d70884d7fb2e170983bf0e551b02740f617
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2122,8 +2122,8 @@ int mpol_parse_str(char *str, struct mem
char *rest = nodelist;
while (isdigit(*rest))
rest++;
- if (!*rest)
- err = 0;
+ if (*rest)
+ goto out;
}
break;
case MPOL_INTERLEAVE:
@@ -2132,7 +2132,6 @@ int mpol_parse_str(char *str, struct mem
*/
if (!nodelist)
nodes = node_states[N_HIGH_MEMORY];
- err = 0;
break;
case MPOL_LOCAL:
/*
@@ -2142,11 +2141,19 @@ int mpol_parse_str(char *str, struct mem
goto out;
mode = MPOL_PREFERRED;
break;
-
- /*
- * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
- * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
- */
+ case MPOL_DEFAULT:
+ /*
+ * Insist on a empty nodelist
+ */
+ if (!nodelist)
+ err = 0;
+ goto out;
+ case MPOL_BIND:
+ /*
+ * Insist on a nodelist
+ */
+ if (!nodelist)
+ goto out;
}
mode_flags = 0;
@@ -2160,13 +2167,14 @@ int mpol_parse_str(char *str, struct mem
else if (!strcmp(flags, "relative"))
mode_flags |= MPOL_F_RELATIVE_NODES;
else
- err = 1;
+ goto out;
}
new = mpol_new(mode, mode_flags, &nodes);
if (IS_ERR(new))
- err = 1;
- else {
+ goto out;
+
+ {
int ret;
NODEMASK_SCRATCH(scratch);
if (scratch) {
@@ -2177,13 +2185,15 @@ int mpol_parse_str(char *str, struct mem
ret = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
if (ret) {
- err = 1;
mpol_put(new);
- } else if (no_context) {
- /* save for contextualization */
- new->w.user_nodemask = nodes;
+ goto out;
}
}
+ err = 0;
+ if (no_context) {
+ /* save for contextualization */
+ new->w.user_nodemask = nodes;
+ }
out:
/* Restore string for error message */
file:1707efb7cb0faf039035b5e8ca1f6f00de6a91fc -> file:49ff6c7468058ee485edde1f4b2d1369d117d6fe
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -161,13 +161,6 @@ static int bt_sock_create(struct net *ne
{
int err;
- if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
- proto == BTPROTO_L2CAP) {
- if (!current_has_bt())
- return -EPERM;
- } else if (!current_has_bt_admin())
- return -EPERM;
-
if (net != &init_net)
return -EAFNOSUPPORT;
file:2f4d30fd12276873595eb30e2f93a91910eddf66 -> file:fb040eb376c6bf7f382dadd2b88877a23e5358cc
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -196,8 +196,7 @@ static void hci_conn_idle(unsigned long
hci_conn_enter_sniff_mode(conn);
}
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
- __u16 pkt_type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
{
struct hci_conn *conn;
@@ -222,22 +221,14 @@ struct hci_conn *hci_conn_add(struct hci
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
break;
case SCO_LINK:
- if (!pkt_type)
- pkt_type = SCO_ESCO_MASK;
+ if (lmp_esco_capable(hdev))
+ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
+ (hdev->esco_type & EDR_ESCO_MASK);
+ else
+ conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
+ break;
case ESCO_LINK:
- if (!pkt_type)
- pkt_type = ALL_ESCO_MASK;
- if (lmp_esco_capable(hdev)) {
- /* HCI Setup Synchronous Connection Command uses
- reverse logic on the EDR_ESCO_MASK bits */
- conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
- hdev->esco_type;
- } else {
- /* Legacy HCI Add Sco Connection Command uses a
- shifted bitmask */
- conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
- SCO_PTYPE_MASK;
- }
+ conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
break;
}
@@ -349,9 +340,7 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO or ACL connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
- __u16 pkt_type, bdaddr_t *dst,
- __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -359,7 +348,7 @@ struct hci_conn *hci_connect(struct hci_
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
- if (!(acl = hci_conn_add(hdev, ACL_LINK, 0, dst)))
+ if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
return NULL;
}
@@ -375,7 +364,7 @@ struct hci_conn *hci_connect(struct hci_
return acl;
if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
- if (!(sco = hci_conn_add(hdev, type, pkt_type, dst))) {
+ if (!(sco = hci_conn_add(hdev, type, dst))) {
hci_conn_put(acl);
return NULL;
}
@@ -388,9 +377,6 @@ struct hci_conn *hci_connect(struct hci_
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
- acl->power_save = 1;
- hci_conn_enter_active_mode(acl);
-
if (lmp_esco_capable(hdev))
hci_setup_sync(sco, acl->handle);
else
@@ -650,15 +636,6 @@ int hci_get_conn_list(void __user *arg)
(ci + n)->out = c->out;
(ci + n)->state = c->state;
(ci + n)->link_mode = c->link_mode;
- if (c->type == SCO_LINK) {
- (ci + n)->mtu = hdev->sco_mtu;
- (ci + n)->cnt = hdev->sco_cnt;
- (ci + n)->pkts = hdev->sco_pkts;
- } else {
- (ci + n)->mtu = hdev->acl_mtu;
- (ci + n)->cnt = hdev->acl_cnt;
- (ci + n)->pkts = hdev->acl_pkts;
- }
if (++n >= req.conn_num)
break;
}
@@ -695,15 +672,6 @@ int hci_get_conn_info(struct hci_dev *hd
ci.out = conn->out;
ci.state = conn->state;
ci.link_mode = conn->link_mode;
- if (req.type == SCO_LINK) {
- ci.mtu = hdev->sco_mtu;
- ci.cnt = hdev->sco_cnt;
- ci.pkts = hdev->sco_pkts;
- } else {
- ci.mtu = hdev->acl_mtu;
- ci.cnt = hdev->acl_cnt;
- ci.pkts = hdev->acl_pkts;
- }
}
hci_dev_unlock_bh(hdev);
file:84a9d75b0c61ede0e98150490abecee369f3fb0f -> file:e1da8f68759c3a989fc8cd8ddde64949c204f5a1
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1239,7 +1239,7 @@ int hci_send_acl(struct hci_conn *conn,
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
+ hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
if (!(list = skb_shinfo(skb)->frag_list)) {
/* Non fragmented */
@@ -1256,14 +1256,12 @@ int hci_send_acl(struct hci_conn *conn,
spin_lock_bh(&conn->data_q.lock);
__skb_queue_tail(&conn->data_q, skb);
- flags &= ~ACL_PB_MASK;
- flags |= ACL_CONT;
do {
skb = list; list = list->next;
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
+ hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
file:8c59afcc0c1de2b3c07e4b31fb7d5031b22b3409 -> file:e99fe385fba293d6a7fe2c558b7d454f669f52b8
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -579,7 +579,7 @@ static inline void hci_cs_create_conn(st
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
+ conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
if (conn) {
conn->out = 1;
conn->link_mode |= HCI_LM_MASTER;
@@ -964,9 +964,7 @@ static inline void hci_conn_request_evt(
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
- /* pkt_type not yet used for incoming connections */
- if (!(conn = hci_conn_add(hdev, ev->link_type, 0,
- &ev->bdaddr))) {
+ if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
BT_ERR("No memmory for new connection");
hci_dev_unlock(hdev);
return;
@@ -1051,8 +1049,7 @@ static inline void hci_auth_complete_evt
if (conn) {
if (!ev->status)
conn->link_mode |= HCI_LM_AUTH;
- else
- conn->sec_level = BT_SECURITY_LOW;
+
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
if (conn->state == BT_CONFIG) {
@@ -1701,9 +1698,7 @@ static inline void hci_sync_conn_complet
hci_conn_add_sysfs(conn);
break;
- case 0x10: /* Connection Accept Timeout */
case 0x1c: /* SCO interval rejected */
- case 0x1a: /* unsupported feature */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
file:e3596795c31915dbaaeb8fffb48a8a5904acb486 -> file:8d1c4a93eee9a0b225361e6555a09e40abd2c524
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -325,19 +325,13 @@ static inline u8 l2cap_get_ident(struct
static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
- u8 flags;
BT_DBG("code 0x%2.2x", code);
if (!skb)
return -ENOMEM;
- if (lmp_no_flush_capable(conn->hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- return hci_send_acl(conn->hcon, skb, flags);
+ return hci_send_acl(conn->hcon, skb, 0);
}
static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -464,8 +458,7 @@ static void l2cap_conn_start(struct l2ca
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
- if (parent)
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
} else {
sk->sk_state = BT_CONFIG;
@@ -777,7 +770,6 @@ static void l2cap_sock_init(struct sock
pi->sec_level = l2cap_pi(parent)->sec_level;
pi->role_switch = l2cap_pi(parent)->role_switch;
pi->force_reliable = l2cap_pi(parent)->force_reliable;
- pi->flushable = l2cap_pi(parent)->flushable;
} else {
pi->imtu = L2CAP_DEFAULT_MTU;
pi->omtu = 0;
@@ -786,7 +778,6 @@ static void l2cap_sock_init(struct sock
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
pi->force_reliable = 0;
- pi->flushable = 0;
}
/* Default config options */
@@ -962,7 +953,7 @@ static int l2cap_do_connect(struct sock
}
}
- hcon = hci_connect(hdev, ACL_LINK, 0, dst,
+ hcon = hci_connect(hdev, ACL_LINK, dst,
l2cap_pi(sk)->sec_level, auth_type);
if (!hcon)
goto done;
@@ -1267,18 +1258,11 @@ static void l2cap_drop_acked_frames(stru
static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct hci_conn *hcon = pi->conn->hcon;
int err;
- u16 flags;
BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- if (lmp_no_flush_capable(hcon->hdev) && !l2cap_pi(sk)->flushable)
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- err = hci_send_acl(hcon, skb, flags);
+ err = hci_send_acl(pi->conn->hcon, skb, 0);
if (err < 0)
kfree_skb(skb);
@@ -1763,7 +1747,6 @@ static int l2cap_sock_setsockopt_old(str
l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
- l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE);
break;
default:
@@ -1891,9 +1874,6 @@ static int l2cap_sock_getsockopt_old(str
if (l2cap_pi(sk)->force_reliable)
opt |= L2CAP_LM_RELIABLE;
- if (l2cap_pi(sk)->flushable)
- opt |= L2CAP_LM_FLUSHABLE;
-
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
@@ -2833,6 +2813,11 @@ static inline int l2cap_config_rsp(struc
int len = cmd->len - sizeof(*rsp);
char req[64];
+ if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
+ l2cap_send_disconn_req(conn, sk);
+ goto done;
+ }
+
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
len = l2cap_parse_conf_rsp(sk, rsp->data,
@@ -3469,7 +3454,7 @@ static inline int l2cap_data_channel_sfr
}
break;
}
- kfree_skb(skb);
+
return 0;
}
@@ -3821,7 +3806,7 @@ static int l2cap_recv_acldata(struct hci
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
- if (!(flags & ACL_CONT)) {
+ if (flags & ACL_START) {
struct l2cap_hdr *hdr;
int len;
@@ -3905,16 +3890,24 @@ static ssize_t l2cap_sysfs_show(struct c
struct sock *sk;
struct hlist_node *node;
char *str = buf;
+ int size = PAGE_SIZE;
read_lock_bh(&l2cap_sk_list.lock);
sk_for_each(sk, node, &l2cap_sk_list.head) {
struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int len;
- str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
+ len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
+
+ size -= len;
+ if (size <= 0)
+ break;
+
+ str += len;
}
read_unlock_bh(&l2cap_sk_list.lock);
file:483b8ab62f062ea490fd0089023881623b72ae3b -> file:ef3abf28c12039cd9d538b2f826a410dc647d545
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -244,6 +244,32 @@ static inline int rfcomm_check_security(
auth_type);
}
+static void rfcomm_session_timeout(unsigned long arg)
+{
+ struct rfcomm_session *s = (void *) arg;
+
+ BT_DBG("session %p state %ld", s, s->state);
+
+ set_bit(RFCOMM_TIMED_OUT, &s->flags);
+ rfcomm_schedule(RFCOMM_SCHED_TIMEO);
+}
+
+static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
+{
+ BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
+
+ if (!mod_timer(&s->timer, jiffies + timeout))
+ rfcomm_session_hold(s);
+}
+
+static void rfcomm_session_clear_timer(struct rfcomm_session *s)
+{
+ BT_DBG("session %p state %ld", s, s->state);
+
+ if (timer_pending(&s->timer) && del_timer(&s->timer))
+ rfcomm_session_put(s);
+}
+
/* ---- RFCOMM DLCs ---- */
static void rfcomm_dlc_timeout(unsigned long arg)
{
@@ -320,6 +346,7 @@ static void rfcomm_dlc_link(struct rfcom
rfcomm_session_hold(s);
+ rfcomm_session_clear_timer(s);
rfcomm_dlc_hold(d);
list_add(&d->list, &s->dlcs);
d->session = s;
@@ -335,6 +362,9 @@ static void rfcomm_dlc_unlink(struct rfc
d->session = NULL;
rfcomm_dlc_put(d);
+ if (list_empty(&s->dlcs))
+ rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT);
+
rfcomm_session_put(s);
}
@@ -428,6 +458,7 @@ static int __rfcomm_dlc_close(struct rfc
switch (d->state) {
case BT_CONNECT:
+ case BT_CONFIG:
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
rfcomm_schedule(RFCOMM_SCHED_AUTH);
@@ -447,6 +478,7 @@ static int __rfcomm_dlc_close(struct rfc
break;
case BT_OPEN:
+ case BT_CONNECT2:
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
rfcomm_schedule(RFCOMM_SCHED_AUTH);
@@ -565,6 +597,8 @@ static struct rfcomm_session *rfcomm_ses
BT_DBG("session %p sock %p", s, sock);
+ setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
+
INIT_LIST_HEAD(&s->dlcs);
s->state = state;
s->sock = sock;
@@ -596,6 +630,7 @@ static void rfcomm_session_del(struct rf
if (state == BT_CONNECTED)
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
sock_release(s->sock);
kfree(s);
@@ -637,6 +672,7 @@ static void rfcomm_session_close(struct
__rfcomm_dlc_close(d, err);
}
+ rfcomm_session_clear_timer(s);
rfcomm_session_put(s);
}
@@ -1111,8 +1147,7 @@ static int rfcomm_recv_ua(struct rfcomm_
break;
case BT_DISCONN:
- if (s->sock->sk->sk_state != BT_CLOSED)
- rfcomm_session_put(s);
+ rfcomm_session_put(s);
break;
}
}
@@ -1193,6 +1228,8 @@ void rfcomm_dlc_accept(struct rfcomm_dlc
rfcomm_send_ua(d->session, d->dlci);
+ rfcomm_dlc_clear_timer(d);
+
rfcomm_dlc_lock(d);
d->state = BT_CONNECTED;
d->state_change(d, 0);
@@ -1210,6 +1247,11 @@ static void rfcomm_check_accept(struct r
if (d->defer_setup) {
set_bit(RFCOMM_DEFER_SETUP, &d->flags);
rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
+
+ rfcomm_dlc_lock(d);
+ d->state = BT_CONNECT2;
+ d->state_change(d, 0);
+ rfcomm_dlc_unlock(d);
} else
rfcomm_dlc_accept(d);
} else {
@@ -1751,6 +1793,11 @@ static inline void rfcomm_process_dlcs(s
if (d->defer_setup) {
set_bit(RFCOMM_DEFER_SETUP, &d->flags);
rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
+
+ rfcomm_dlc_lock(d);
+ d->state = BT_CONNECT2;
+ d->state_change(d, 0);
+ rfcomm_dlc_unlock(d);
} else
rfcomm_dlc_accept(d);
}
@@ -1866,6 +1913,13 @@ static inline void rfcomm_process_sessio
struct rfcomm_session *s;
s = list_entry(p, struct rfcomm_session, list);
+ if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
+ s->state = BT_DISCONN;
+ rfcomm_send_disc(s, 0);
+ rfcomm_session_put(s);
+ continue;
+ }
+
if (s->state == BT_LISTEN) {
rfcomm_accept_connection(s);
continue;
@@ -2042,6 +2096,7 @@ static ssize_t rfcomm_dlc_sysfs_show(str
struct rfcomm_session *s;
struct list_head *pp, *p;
char *str = buf;
+ int size = PAGE_SIZE;
rfcomm_lock();
@@ -2050,11 +2105,21 @@ static ssize_t rfcomm_dlc_sysfs_show(str
list_for_each(pp, &s->dlcs) {
struct sock *sk = s->sock->sk;
struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
+ int len;
- str += sprintf(str, "%s %s %ld %d %d %d %d\n",
+ len = snprintf(str, size, "%s %s %ld %d %d %d %d\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
+
+ size -= len;
+ if (size <= 0)
+ break;
+
+ str += len;
}
+
+ if (size <= 0)
+ break;
}
rfcomm_unlock();
file:8a20aaf1f2316676564501c49bb59c3fe56c898b -> file:30a36499ee7741afc4f6116c633273ee87032fae
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1065,13 +1065,22 @@ static ssize_t rfcomm_sock_sysfs_show(st
struct sock *sk;
struct hlist_node *node;
char *str = buf;
+ int size = PAGE_SIZE;
read_lock_bh(&rfcomm_sk_list.lock);
sk_for_each(sk, node, &rfcomm_sk_list.head) {
- str += sprintf(str, "%s %s %d %d\n",
+ int len;
+
+ len = snprintf(str, size, "%s %s %d %d\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->sk_state, rfcomm_pi(sk)->channel);
+
+ size -= len;
+ if (size <= 0)
+ break;
+
+ str += len;
}
read_unlock_bh(&rfcomm_sk_list.lock);
file:e4343cabedf31f32d389c6fdd656b34da08a54d7 -> file:5c0685eba947db01aa38d0ae9a4b3d1dd62e12a3
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -176,7 +176,6 @@ static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
- __u16 pkt_type = sco_pi(sk)->pkt_type;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -193,13 +192,10 @@ static int sco_connect(struct sock *sk)
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
- else {
+ else
type = SCO_LINK;
- pkt_type &= SCO_ESCO_MASK;
- }
- hcon = hci_connect(hdev, type, pkt_type, dst,
- BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (!hcon)
goto done;
@@ -455,22 +451,18 @@ static int sco_sock_create(struct net *n
return 0;
}
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
- struct sockaddr_sco sa;
+ struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa.sco_bdaddr;
- int len, err = 0;
+ bdaddr_t *src = &sa->sco_bdaddr;
+ int err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
+ BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
- memset(&sa, 0, sizeof(sa));
- len = min_t(unsigned int, sizeof(sa), alen);
- memcpy(&sa, addr, len);
-
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
@@ -484,8 +476,7 @@ static int sco_sock_bind(struct socket *
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
- sco_pi(sk)->pkt_type = sa.sco_pkt_type;
+ bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
}
@@ -498,34 +489,26 @@ done:
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
+ struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- struct sockaddr_sco sa;
- int len, err = 0;
+ int err = 0;
+
BT_DBG("sk %p", sk);
- if (!addr || addr->sa_family != AF_BLUETOOTH)
+ if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco))
return -EINVAL;
- memset(&sa, 0, sizeof(sa));
- len = min_t(unsigned int, sizeof(sa), alen);
- memcpy(&sa, addr, len);
-
- lock_sock(sk);
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
+ return -EBADFD;
- if (sk->sk_type != SOCK_SEQPACKET) {
- err = -EINVAL;
- goto done;
- }
+ if (sk->sk_type != SOCK_SEQPACKET)
+ return -EINVAL;
- if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
- err = -EBADFD;
- goto done;
- }
+ lock_sock(sk);
/* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
- sco_pi(sk)->pkt_type = sa.sco_pkt_type;
+ bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
if ((err = sco_connect(sk)))
goto done;
@@ -631,7 +614,6 @@ static int sco_sock_getname(struct socke
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
- sa->sco_pkt_type = sco_pi(sk)->pkt_type;
return 0;
}
@@ -975,13 +957,22 @@ static ssize_t sco_sysfs_show(struct cla
struct sock *sk;
struct hlist_node *node;
char *str = buf;
+ int size = PAGE_SIZE;
read_lock_bh(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
- str += sprintf(str, "%s %s %d\n",
+ int len;
+
+ len = snprintf(str, size, "%s %s %d\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->sk_state);
+
+ size -= len;
+ if (size <= 0)
+ break;
+
+ str += len;
}
read_unlock_bh(&sco_sk_list.lock);
file:4c12ddb5f5ee43ab546278ba5527c2e10939824b -> file:5aef51eb3d1f5fd01c2d6766e3062476f43440da
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -216,22 +216,34 @@ static int ethtool_get_drvinfo(struct ne
return 0;
}
-static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+static int ethtool_set_rxnfc(struct net_device *dev,
+ u32 cmd, void __user *useraddr)
{
- struct ethtool_rxnfc cmd;
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
if (!dev->ethtool_ops->set_rxnfc)
return -EOPNOTSUPP;
- if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ /* struct ethtool_rxnfc was originally defined for
+ * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
+ * members. User-space might still be using that
+ * definition. */
+ if (cmd == ETHTOOL_SRXFH)
+ info_size = (offsetof(struct ethtool_rxnfc, data) +
+ sizeof(info.data));
+
+ if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
- return dev->ethtool_ops->set_rxnfc(dev, &cmd);
+ return dev->ethtool_ops->set_rxnfc(dev, &info);
}
-static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+static int ethtool_get_rxnfc(struct net_device *dev,
+ u32 cmd, void __user *useraddr)
{
struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
const struct ethtool_ops *ops = dev->ethtool_ops;
int ret;
void *rule_buf = NULL;
@@ -239,13 +251,22 @@ static int ethtool_get_rxnfc(struct net_
if (!ops->get_rxnfc)
return -EOPNOTSUPP;
- if (copy_from_user(&info, useraddr, sizeof(info)))
+ /* struct ethtool_rxnfc was originally defined for
+ * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
+ * members. User-space might still be using that
+ * definition. */
+ if (cmd == ETHTOOL_GRXFH)
+ info_size = (offsetof(struct ethtool_rxnfc, data) +
+ sizeof(info.data));
+
+ if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
- rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
- GFP_USER);
+ if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
+ rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+ GFP_USER);
if (!rule_buf)
return -ENOMEM;
}
@@ -256,7 +277,7 @@ static int ethtool_get_rxnfc(struct net_
goto err_out;
ret = -EFAULT;
- if (copy_to_user(useraddr, &info, sizeof(info)))
+ if (copy_to_user(useraddr, &info, info_size))
goto err_out;
if (rule_buf) {
@@ -1111,12 +1132,12 @@ int dev_ethtool(struct net *net, struct
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
- rc = ethtool_get_rxnfc(dev, useraddr);
+ rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
break;
case ETHTOOL_SRXFH:
case ETHTOOL_SRXCLSRLDEL:
case ETHTOOL_SRXCLSRLINS:
- rc = ethtool_set_rxnfc(dev, useraddr);
+ rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
break;
case ETHTOOL_GGRO:
rc = ethtool_get_gro(dev, useraddr);
file:e587e6819698cbe7485fb336cb70177e3bf98ca4 -> file:e69625084481254a792512cb50ddf05fe332057a
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -945,7 +945,10 @@ static void neigh_update_hhs(struct neig
{
struct hh_cache *hh;
void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
- = neigh->dev->header_ops->cache_update;
+ = NULL;
+
+ if (neigh->dev->header_ops)
+ update = neigh->dev->header_ops->cache_update;
if (update) {
for (hh = neigh->hh; hh; hh = hh->hh_next) {
file:37731da4148164adc741da891a86b27e2484187d -> file:48759983b2db2d16f7495fabf4dbdef68872a102
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -164,7 +164,8 @@ static __init int dccpprobe_init(void)
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
- ret = register_jprobe(&dccp_send_probe);
+ ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
+ "dccp");
if (ret)
goto err1;
file:5a7f00cd15ce7e194a0c3f1c1cda956c0eae2843 -> file:1264ad0d9da1cdbb020fd2b7e0ffbb3af2fe469e
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -95,9 +95,11 @@ static void send_reset(struct net *net,
fl.fl_ip_dport = otcph.source;
security_skb_classify_flow(oldskb, &fl);
dst = ip6_route_output(net, NULL, &fl);
- if (dst == NULL)
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
return;
- if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0))
+ }
+ if (xfrm_lookup(net, &dst, &fl, NULL, 0))
return;
hh_len = (dst->dev->hard_header_len + 15)&~15;
file:4b6a539cb87ab7f1a2b82fe62a24b576f9062bf8 -> file:bfc8737292bdb3438a38d030eb7c933ab8b941c9
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -63,6 +63,7 @@ struct nf_ct_frag6_queue
struct inet_frag_queue q;
__be32 id; /* fragment id */
+ u32 user;
struct in6_addr saddr;
struct in6_addr daddr;
file:4d5543af3123e02201f872f7ac4f3d1cc45ea88d -> file:ea367cf9a052263a8db0a46aad61a5f8dde92db8
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled fo
if MAC80211 != n
+config MAC80211_HAS_RC
+ def_bool n
+
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EMBEDDED
+ select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
mac80211 that uses a PID controller to select the TX
@@ -24,12 +28,14 @@ config MAC80211_RC_PID
config MAC80211_RC_MINSTREL
bool "Minstrel" if EMBEDDED
+ select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm
choice
prompt "Default rate control algorithm"
+ depends on MAC80211_HAS_RC
default MAC80211_RC_DEFAULT_MINSTREL
---help---
This option selects the default rate control algorithm
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
endif
+comment "Some wireless drivers require a rate control algorithm"
+ depends on MAC80211_HAS_RC=n
+
config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support"
depends on MAC80211 && EXPERIMENTAL
file:89e238b001de936e4585f2eae73b3d09ae8cb3f7 -> file:2e08921b7cf6ecf1994c3deeb192b89aa3fac936
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -177,10 +177,10 @@ static void sta_addba_resp_timer_expired
/* check if the TID waits for addBA response */
spin_lock_bh(&sta->lock);
- if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
+ if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
+ HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
HT_ADDBA_REQUESTED_MSK) {
spin_unlock_bh(&sta->lock);
- *state = HT_AGG_STATE_IDLE;
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
"(or no longer) expecting addBA response there",
file:5a461641e94ee8cfbd6d9c7d82b2f7dcc1f235ea -> file:ca62bfe229dd501f8064698aa8605c552dc1601f
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -264,6 +264,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_DISABLE_11N = BIT(4),
IEEE80211_STA_CSA_RECEIVED = BIT(5),
IEEE80211_STA_MFP_ENABLED = BIT(6),
+ IEEE80211_STA_NULLFUNC_ACKED = BIT(7),
};
/* flags for MLME request */
file:797f53942e5f870093ff771b3d3bb523c664044a -> file:19fbd25a705b22d3d220bc8f7ea6266a20564c68
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -441,6 +441,7 @@ void ieee80211_tx_status(struct ieee8021
rcu_read_lock();
sband = local->hw.wiphy->bands[info->band];
+ fc = hdr->frame_control;
sta = sta_info_get(local, hdr->addr1);
@@ -522,6 +523,20 @@ void ieee80211_tx_status(struct ieee8021
local->dot11FailedCount++;
}
+ if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
+ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ local->ps_sdata && !(local->scanning)) {
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ local->ps_sdata->u.mgd.flags |=
+ IEEE80211_STA_NULLFUNC_ACKED;
+ ieee80211_queue_work(&local->hw,
+ &local->dynamic_ps_enable_work);
+ } else
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(10));
+ }
+
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
file:6cae2954963d3bd8abb5599a20932e28a5bad4e7 -> file:5bea319e3e54b7d17cad18f4a71a353b3b5b6f68
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -269,12 +269,6 @@ static void ieee80211_send_assoc(struct
if (wk->bss->wmm_used)
wmm = 1;
- /* get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode) */
- rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
-
if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
(local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
@@ -309,6 +303,17 @@ static void ieee80211_send_assoc(struct
*pos++ = wk->ssid_len;
memcpy(pos, wk->ssid, wk->ssid_len);
+ if (wk->bss->supp_rates_len) {
+ /* get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode) */
+ rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
+ } else {
+ rates = ~0;
+ rates_len = sband->n_bitrates;
+ }
+
/* add all rates which were marked to be used above */
supp_rates_len = rates_len;
if (supp_rates_len > 8)
@@ -650,8 +655,11 @@ static void ieee80211_enable_ps(struct i
} else {
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
ieee80211_send_nullfunc(local, sdata, 1);
- conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+
+ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ conf->flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
}
}
@@ -742,6 +750,7 @@ void ieee80211_dynamic_ps_enable_work(st
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
struct ieee80211_sub_if_data *sdata = local->ps_sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/* can only happen when PS was just disabled anyway */
if (!sdata)
@@ -750,11 +759,16 @@ void ieee80211_dynamic_ps_enable_work(st
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
ieee80211_send_nullfunc(local, sdata, 1);
- local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
+ (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
}
void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -2458,6 +2472,7 @@ int ieee80211_mgd_assoc(struct ieee80211
list_add(&wk->list, &ifmgd->work_list);
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
file:16c6cdc6e8af363b8ffeda0e6f301dead6440c83 -> file:38499c4e7106c514b22efbf679c4bd957f6a61ef
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1220,7 +1220,8 @@ ieee80211_drop_unencrypted(struct ieee80
(rx->key || rx->sdata->drop_unencrypted)))
return -EACCES;
if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
- if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
+ if (unlikely(!ieee80211_has_protected(fc) &&
+ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key))
return -EACCES;
/* BIP does not use Protected field, so need to check MMIE */
@@ -1590,6 +1591,7 @@ static ieee80211_rx_result debug_noinlin
ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
{
struct net_device *dev = rx->dev;
+ struct ieee80211_local *local = rx->local;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
__le16 fc = hdr->frame_control;
int err;
@@ -1612,6 +1614,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_
dev->stats.rx_packets++;
dev->stats.rx_bytes += rx->skb->len;
+ if (ieee80211_is_data(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+ }
+
ieee80211_deliver_skb(rx);
return RX_QUEUED;
@@ -1809,6 +1818,11 @@ ieee80211_rx_h_action(struct ieee80211_r
return RX_CONTINUE;
}
break;
+ case WLAN_CATEGORY_MESH_PLINK:
+ case WLAN_CATEGORY_MESH_PATH_SEL:
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+ break;
default:
/* do not process rejected action frames */
if (mgmt->u.action.category & 0x80)
file:1a4190947970b45f3f945ce7ddafceeb91f3fad9 -> file:169111ad6c9947431773a39b27e57ddcc638ce63
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -62,7 +62,7 @@ ieee80211_bss_info_update(struct ieee802
bool beacon)
{
struct ieee80211_bss *bss;
- int clen;
+ int clen, srlen;
s32 signal = 0;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -94,23 +94,24 @@ ieee80211_bss_info_update(struct ieee802
if (bss->dtim_period == 0)
bss->dtim_period = 1;
- bss->supp_rates_len = 0;
+ /* replace old supported rates if we get new values */
+ srlen = 0;
if (elems->supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+ clen = IEEE80211_MAX_SUPP_RATES;
if (clen > elems->supp_rates_len)
clen = elems->supp_rates_len;
- memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
- clen);
- bss->supp_rates_len += clen;
+ memcpy(bss->supp_rates, elems->supp_rates, clen);
+ srlen += clen;
}
if (elems->ext_supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+ clen = IEEE80211_MAX_SUPP_RATES - srlen;
if (clen > elems->ext_supp_rates_len)
clen = elems->ext_supp_rates_len;
- memcpy(&bss->supp_rates[bss->supp_rates_len],
- elems->ext_supp_rates, clen);
- bss->supp_rates_len += clen;
+ memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen);
+ srlen += clen;
}
+ if (srlen)
+ bss->supp_rates_len = srlen;
bss->wmm_used = elems->wmm_param || elems->wmm_info;
@@ -408,6 +409,16 @@ static int __ieee80211_start_scan(struct
if (local->scan_req)
return -EBUSY;
+ if (req != local->int_scan_req &&
+ sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !list_empty(&ifmgd->work_list)) {
+ /* actually wait for the work it's doing to finish/time out */
+ set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
+ local->scan_req = req;
+ local->scan_sdata = sdata;
+ return 0;
+ }
+
if (local->ops->hw_scan) {
u8 *ies;
int ielen;
@@ -428,14 +439,6 @@ static int __ieee80211_start_scan(struct
local->scan_req = req;
local->scan_sdata = sdata;
- if (req != local->int_scan_req &&
- sdata->vif.type == NL80211_IFTYPE_STATION &&
- !list_empty(&ifmgd->work_list)) {
- /* actually wait for the work it's doing to finish/time out */
- set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
- return 0;
- }
-
if (local->ops->hw_scan)
__set_bit(SCAN_HW_SCANNING, &local->scanning);
else
file:441f68e3f3790c5a0c9012927f25901643f87ce2 -> file:b1d79046257b39137f21f6e7c961064d906fa98a
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -496,7 +496,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee8021
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
- int i, len;
+ int i;
+ u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
u32 sta_flags;
@@ -505,7 +506,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee8021
sband = tx->local->hw.wiphy->bands[tx->channel->band];
- len = min_t(int, tx->skb->len + FCS_LEN,
+ len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);
/* set up the tx rate control struct we give the RC algo */
@@ -1881,6 +1882,7 @@ static bool ieee80211_tx_pending_skb(str
void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_sub_if_data *sdata;
unsigned long flags;
int i;
bool txok;
@@ -1921,6 +1923,11 @@ void ieee80211_tx_pending(unsigned long
if (!txok)
break;
}
+
+ if (skb_queue_empty(&local->pending[i]))
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(sdata->dev, i));
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
file:553cffe9ab37045acf717e6b11022bc0381fc0e3 -> file:31b10850905fced1908ae098ad94dc62a911c3b9
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struc
/* someone still has this queue stopped */
return;
- if (!skb_queue_empty(&local->pending[queue]))
+ if (skb_queue_empty(&local->pending[queue])) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
+ } else
tasklet_schedule(&local->tx_pending_tasklet);
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
- rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -1137,6 +1137,14 @@ int ieee80211_reconfig(struct ieee80211_
}
}
+ rcu_read_lock();
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ ieee80211_sta_tear_down_BA_sessions(sta);
+ }
+ }
+ rcu_read_unlock();
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (netif_running(sdata->dev))
file:27c30cf933daa9092def2dfc342f8f05ef036d5f -> file:95682e56902398486bec7fe20432db6c98cc9535
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -146,6 +146,7 @@ static inline int ip_vs_conn_hash(struct
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -158,6 +159,7 @@ static inline int ip_vs_conn_hash(struct
ret = 0;
}
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
@@ -177,6 +179,7 @@ static inline int ip_vs_conn_unhash(stru
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
list_del(&cp->c_list);
@@ -186,6 +189,7 @@ static inline int ip_vs_conn_unhash(stru
} else
ret = 0;
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
file:eb0ceb8465270d355d44842b1b82e91e41b6830e -> file:2f181aa07b0567dc0f443ed680146c5ffa6d8b07
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -173,10 +173,10 @@ recent_entry_init(struct recent_table *t
static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
{
+ e->index %= ip_pkt_list_tot;
e->stamps[e->index++] = jiffies;
if (e->index > e->nstamps)
e->nstamps = e->index;
- e->index %= ip_pkt_list_tot;
list_move_tail(&e->lru_list, &t->lru_list);
}
@@ -260,7 +260,7 @@ recent_mt(const struct sk_buff *skb, con
for (i = 0; i < e->nstamps; i++) {
if (info->seconds && time_after(time, e->stamps[i]))
continue;
- if (++hits >= info->hit_count) {
+ if (!info->hit_count || ++hits >= info->hit_count) {
ret = !ret;
break;
}
file:9d881a61ac0262e5145437a6e2116b714c4ac631 -> file:8579b4f2f24f5bd5077bdaa9b142b1e292daeb53
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -107,7 +107,7 @@ static const struct sctp_paramhdr prsctp
cpu_to_be16(sizeof(struct sctp_paramhdr)),
};
-/* A helper to initialize to initialize an op error inside a
+/* A helper to initialize an op error inside a
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
@@ -124,6 +124,29 @@ void sctp_init_cause(struct sctp_chunk
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
}
+/* A helper to initialize an op error inside a
+ * provided chunk, as most cause codes will be embedded inside an
+ * abort chunk. Differs from sctp_init_cause in that it won't oops
+ * if there isn't enough space in the op error chunk
+ */
+int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+ size_t paylen)
+{
+ sctp_errhdr_t err;
+ __u16 len;
+
+ /* Cause code constants are now defined in network order. */
+ err.cause = cause_code;
+ len = sizeof(sctp_errhdr_t) + paylen;
+ err.length = htons(len);
+
+ if (skb_tailroom(chunk->skb) < len)
+ return -ENOSPC;
+ chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
+ sizeof(sctp_errhdr_t),
+ &err);
+ return 0;
+}
/* 3.3.2 Initiation (INIT) (1)
*
* This chunk is used to initiate a SCTP association between two
@@ -1125,6 +1148,24 @@ nodata:
return retval;
}
+/* Create an Operation Error chunk of a fixed size,
+ * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
+ * This is a helper function to allocate an error chunk for
+ * for those invalid parameter codes in which we may not want
+ * to report all the errors, if the incomming chunk is large
+ */
+static inline struct sctp_chunk *sctp_make_op_error_fixed(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ size_t size = asoc ? asoc->pathmtu : 0;
+
+ if (!size)
+ size = SCTP_DEFAULT_MAXSEGMENT;
+
+ return sctp_make_op_error_space(asoc, chunk, size);
+}
+
/* Create an Operation Error chunk. */
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
@@ -1365,6 +1406,18 @@ void *sctp_addto_chunk(struct sctp_chunk
return target;
}
+/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
+ * space in the chunk
+ */
+void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
+ int len, const void *data)
+{
+ if (skb_tailroom(chunk->skb) >= len)
+ return sctp_addto_chunk(chunk, len, data);
+ else
+ return NULL;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
@@ -1968,13 +2021,12 @@ static sctp_ierror_t sctp_process_unk_pa
* returning multiple unknown parameters.
*/
if (NULL == *errp)
- *errp = sctp_make_op_error_space(asoc, chunk,
- ntohs(chunk->chunk_hdr->length));
+ *errp = sctp_make_op_error_fixed(asoc, chunk);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk(*errp,
+ sctp_addto_chunk_fixed(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
} else {
file:9c5a19d9dbcea17569acee79b7c51990270a712a -> file:2370ab49dd4b16eed44550cba208885c08b846e3
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1273,9 +1273,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
rqstp->rq_release_snd_buf = priv_release_snd_buf;
return 0;
out_free:
- for (i--; i >= 0; i--) {
- __free_page(rqstp->rq_enc_pages[i]);
- }
+ rqstp->rq_enc_pages_num = i;
+ priv_release_snd_buf(rqstp);
out:
return -EAGAIN;
}
file:49278f830367eec97d34e862992d6f7f16e1e899 -> file:27a23785a50d3aea8e4e1e8aa78e8949d9c46c72
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_creat
struct dentry *dentry;
dentry = __rpc_lookup_create(parent, name);
+ if (IS_ERR(dentry))
+ return dentry;
if (dentry->d_inode == NULL)
return dentry;
dput(dentry);
file:0266ccada52af7eabbc833cc912c75f63cae49c4 -> file:3fbd6ba788e5c6b3e3ebc8252e059cb9972e87ac
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -711,8 +711,10 @@ int svc_recv(struct svc_rqst *rqstp, lon
spin_unlock_bh(&pool->sp_lock);
len = 0;
- if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
- !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+ if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+ dprintk("svc_recv: found XPT_CLOSE\n");
+ svc_delete_xprt(xprt);
+ } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) {
@@ -738,7 +740,7 @@ int svc_recv(struct svc_rqst *rqstp, lon
svc_xprt_received(newxpt);
}
svc_xprt_received(xprt);
- } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+ } else {
dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
rqstp, pool->sp_id, xprt,
atomic_read(&xprt->xpt_ref.refcount));
@@ -751,11 +753,6 @@ int svc_recv(struct svc_rqst *rqstp, lon
dprintk("svc: got len=%d\n", len);
}
- if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
- dprintk("svc_recv: found XPT_CLOSE\n");
- svc_delete_xprt(xprt);
- }
-
/* No data, incomplete (TCP) read, or accept() */
if (len == 0 || len == -EAGAIN) {
rqstp->rq_res.len = 0;
@@ -901,11 +898,8 @@ void svc_delete_xprt(struct svc_xprt *xp
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
- for (dr = svc_deferred_dequeue(xprt); dr;
- dr = svc_deferred_dequeue(xprt)) {
- svc_xprt_put(xprt);
+ while ((dr = svc_deferred_dequeue(xprt)) != NULL)
kfree(dr);
- }
svc_xprt_put(xprt);
spin_unlock_bh(&serv->sv_lock);
file:117f68a8aa40c2d220b0dbd0ab3fda6ed6a6d523 -> file:97cc3de7432e83702ae7913cdf4eb5d93bd4594f
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(
return NULL;
}
-static int unix_gid_find(uid_t uid, struct group_info **gip,
- struct svc_rqst *rqstp)
+static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
{
- struct unix_gid *ug = unix_gid_lookup(uid);
+ struct unix_gid *ug;
+ struct group_info *gi;
+ int ret;
+
+ ug = unix_gid_lookup(uid);
if (!ug)
- return -EAGAIN;
- switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
+ return ERR_PTR(-EAGAIN);
+ ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
+ switch (ret) {
case -ENOENT:
- *gip = NULL;
- return 0;
+ return ERR_PTR(-ENOENT);
case 0:
- *gip = ug->gi;
- get_group_info(*gip);
+ gi = get_group_info(ug->gi);
cache_put(&ug->h, &unix_gid_cache);
- return 0;
+ return gi;
default:
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
}
@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6, sin6_storage;
struct ip_map *ipm;
+ struct group_info *gi;
+ struct svc_cred *cred = &rqstp->rq_cred;
switch (rqstp->rq_addr.ss_family) {
case AF_INET:
@@ -722,6 +726,17 @@ svcauth_unix_set_client(struct svc_rqst
ip_map_cached_put(rqstp, ipm);
break;
}
+
+ gi = unix_gid_find(cred->cr_uid, rqstp);
+ switch (PTR_ERR(gi)) {
+ case -EAGAIN:
+ return SVC_DROP;
+ case -ENOENT:
+ break;
+ default:
+ put_group_info(cred->cr_group_info);
+ cred->cr_group_info = gi;
+ }
return SVC_OK;
}
@@ -818,19 +833,11 @@ svcauth_unix_accept(struct svc_rqst *rqs
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
- if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
- == -EAGAIN)
+ cred->cr_group_info = groups_alloc(slen);
+ if (cred->cr_group_info == NULL)
return SVC_DROP;
- if (cred->cr_group_info == NULL) {
- cred->cr_group_info = groups_alloc(slen);
- if (cred->cr_group_info == NULL)
- return SVC_DROP;
- for (i = 0; i < slen; i++)
- GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
- } else {
- for (i = 0; i < slen ; i++)
- svc_getnl(argv);
- }
+ for (i = 0; i < slen; i++)
+ GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
*authp = rpc_autherr_badverf;
return SVC_DENIED;
file:1c246a4f491e9ee5126a8bace259b32a91955421 -> file:70b0a227dca42bee16f94c971114f3753d52aee6
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct sv
return len;
err_delete:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+ svc_xprt_received(&svsk->sk_xprt);
err_again:
return -EAGAIN;
}
file:37c5475ba258b51b22c6722aeb605741c2732051 -> file:b6fcf68dc35cdd3a2fd08942151c5771338f05cb
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -238,7 +238,8 @@ struct sock_xprt {
* State of TCP reply receive
*/
__be32 tcp_fraghdr,
- tcp_xid;
+ tcp_xid,
+ tcp_calldir;
u32 tcp_offset,
tcp_reclen;
@@ -961,7 +962,7 @@ static inline void xs_tcp_read_calldir(s
{
size_t len, used;
u32 offset;
- __be32 calldir;
+ char *p;
/*
* We want transport->tcp_offset to be 8 at the end of this routine
@@ -970,26 +971,33 @@ static inline void xs_tcp_read_calldir(s
* transport->tcp_offset is 4 (after having already read the xid).
*/
offset = transport->tcp_offset - sizeof(transport->tcp_xid);
- len = sizeof(calldir) - offset;
+ len = sizeof(transport->tcp_calldir) - offset;
dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
- used = xdr_skb_read_bits(desc, &calldir, len);
+ p = ((char *) &transport->tcp_calldir) + offset;
+ used = xdr_skb_read_bits(desc, p, len);
transport->tcp_offset += used;
if (used != len)
return;
transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
- transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
/*
* We don't yet have the XDR buffer, so we will write the calldir
* out after we get the buffer from the 'struct rpc_rqst'
*/
- if (ntohl(calldir) == RPC_REPLY)
+ switch (ntohl(transport->tcp_calldir)) {
+ case RPC_REPLY:
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
transport->tcp_flags |= TCP_RPC_REPLY;
- else
+ break;
+ case RPC_CALL:
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
transport->tcp_flags &= ~TCP_RPC_REPLY;
- dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
- (transport->tcp_flags & TCP_RPC_REPLY) ?
- "reply for" : "request with", calldir);
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+ xprt_force_disconnect(&transport->xprt);
+ }
xs_tcp_check_fraghdr(transport);
}
@@ -1009,12 +1017,10 @@ static inline void xs_tcp_read_common(st
/*
* Save the RPC direction in the XDR buffer
*/
- __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
- htonl(RPC_REPLY) : 0;
-
memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
- &calldir, sizeof(calldir));
- transport->tcp_copied += sizeof(calldir);
+ &transport->tcp_calldir,
+ sizeof(transport->tcp_calldir));
+ transport->tcp_copied += sizeof(transport->tcp_calldir);
transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
}
@@ -1926,6 +1932,11 @@ static void xs_tcp_setup_socket(struct r
case -EALREADY:
xprt_clear_connecting(xprt);
return;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+ * local IPv6 address without a scope-id.
+ */
+ goto out;
}
out_eagain:
status = -EAGAIN;
file:327011fcc407941721d0e207f0e69b1f81c2b162 -> file:78091375ca120988ee81e4a1841bc4176103837f
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -45,10 +45,10 @@
#define MAX_ADDR_STR 32
-static struct media *media_list = NULL;
+static struct media media_list[MAX_MEDIA];
static u32 media_count = 0;
-struct bearer *tipc_bearers = NULL;
+struct bearer tipc_bearers[MAX_BEARERS];
/**
* media_name_valid - validate media name
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
int res = -EINVAL;
write_lock_bh(&tipc_net_lock);
- if (!media_list)
- goto exit;
+ if (tipc_mode != TIPC_NET_MODE) {
+ warn("Media <%s> rejected, not in networked mode yet\n", name);
+ goto exit;
+ }
if (!media_name_valid(name)) {
warn("Media <%s> rejected, illegal name\n", name);
goto exit;
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name
-int tipc_bearer_init(void)
-{
- int res;
-
- write_lock_bh(&tipc_net_lock);
- tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
- media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
- if (tipc_bearers && media_list) {
- res = 0;
- } else {
- kfree(tipc_bearers);
- kfree(media_list);
- tipc_bearers = NULL;
- media_list = NULL;
- res = -ENOMEM;
- }
- write_unlock_bh(&tipc_net_lock);
- return res;
-}
-
void tipc_bearer_stop(void)
{
u32 i;
- if (!tipc_bearers)
- return;
-
for (i = 0; i < MAX_BEARERS; i++) {
if (tipc_bearers[i].active)
tipc_bearers[i].publ.blocked = 1;
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
if (tipc_bearers[i].active)
bearer_disable(tipc_bearers[i].publ.name);
}
- kfree(tipc_bearers);
- kfree(media_list);
- tipc_bearers = NULL;
- media_list = NULL;
media_count = 0;
}
file:ca5734892713d19f9d6cc37b1fc48b684cc5d539 -> file:000228e93f9ef75e3dd008b4b76b4a9f56b963c6
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,7 +114,7 @@ struct bearer_name {
struct link;
-extern struct bearer *tipc_bearers;
+extern struct bearer tipc_bearers[];
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
file:7906608bf510f4d6eec420e739256e8658647be6 -> file:f25b1cdb64eb069cba0bf991df72930248ba371c
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,8 @@
*/
DEFINE_RWLOCK(tipc_net_lock);
-struct network tipc_net = { NULL };
+struct _zone *tipc_zones[256] = { NULL, };
+struct network tipc_net = { tipc_zones };
struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
{
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 d
}
}
-static int net_init(void)
-{
- memset(&tipc_net, 0, sizeof(tipc_net));
- tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
- if (!tipc_net.zones) {
- return -ENOMEM;
- }
- return 0;
-}
-
static void net_stop(void)
{
u32 z_num;
- if (!tipc_net.zones)
- return;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+ for (z_num = 1; z_num <= tipc_max_zones; z_num++)
tipc_zone_delete(tipc_net.zones[z_num]);
- }
- kfree(tipc_net.zones);
- tipc_net.zones = NULL;
}
static void net_route_named_msg(struct sk_buff *buf)
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
tipc_named_reinit();
tipc_port_reinit();
- if ((res = tipc_bearer_init()) ||
- (res = net_init()) ||
- (res = tipc_cltr_init()) ||
+ if ((res = tipc_cltr_init()) ||
(res = tipc_bclink_init())) {
return res;
}
file:68b321997d4cbe6dbe764f9b20b191135f9aed3f -> file:376798fd6b16dda652711cc164d5817ff0648520
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -385,6 +385,8 @@ int rdev_set_freq(struct cfg80211_regist
struct wireless_dev *for_wdev,
int freq, enum nl80211_channel_type channel_type);
+u16 cfg80211_calculate_bitrate(struct rate_info *rate);
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
file:ca3c92a0a14f26898c5e07a137a54393e1279fdd -> file:b75e718e1ad1467fbb8332c7289f3983fad9ef1a
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1562,39 +1562,6 @@ static int parse_station_flags(struct ge
return 0;
}
-static u16 nl80211_calculate_bitrate(struct rate_info *rate)
-{
- int modulation, streams, bitrate;
-
- if (!(rate->flags & RATE_INFO_FLAGS_MCS))
- return rate->legacy;
-
- /* the formula below does only work for MCS values smaller than 32 */
- if (rate->mcs >= 32)
- return 0;
-
- modulation = rate->mcs & 7;
- streams = (rate->mcs >> 3) + 1;
-
- bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
- 13500000 : 6500000;
-
- if (modulation < 4)
- bitrate *= (modulation + 1);
- else if (modulation == 4)
- bitrate *= (modulation + 2);
- else
- bitrate *= (modulation + 3);
-
- bitrate *= streams;
-
- if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
- bitrate = (bitrate / 9) * 10;
-
- /* do NOT round down here */
- return (bitrate + 50000) / 100000;
-}
-
static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
int flags, struct net_device *dev,
u8 *mac_addr, struct station_info *sinfo)
@@ -1641,8 +1608,8 @@ static int nl80211_send_station(struct s
if (!txrate)
goto nla_put_failure;
- /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */
- bitrate = nl80211_calculate_bitrate(&sinfo->txrate);
+ /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
+ bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
if (bitrate > 0)
NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
file:3fc2df86278fcf7390c5d2b3cce484a7abb04f4d -> file:a6a38b17645fda302831ce1817db16fcd6b44b90
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -682,3 +682,36 @@ int cfg80211_change_iface(struct cfg8021
return err;
}
+
+u16 cfg80211_calculate_bitrate(struct rate_info *rate)
+{
+ int modulation, streams, bitrate;
+
+ if (!(rate->flags & RATE_INFO_FLAGS_MCS))
+ return rate->legacy;
+
+ /* the formula below does only work for MCS values smaller than 32 */
+ if (rate->mcs >= 32)
+ return 0;
+
+ modulation = rate->mcs & 7;
+ streams = (rate->mcs >> 3) + 1;
+
+ bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
+ 13500000 : 6500000;
+
+ if (modulation < 4)
+ bitrate *= (modulation + 1);
+ else if (modulation == 4)
+ bitrate *= (modulation + 2);
+ else
+ bitrate *= (modulation + 3);
+
+ bitrate *= streams;
+
+ if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ bitrate = (bitrate / 9) * 10;
+
+ /* do NOT round down here */
+ return (bitrate + 50000) / 100000;
+}
file:561a45cf2a6af562784113cedfd0952108895f9e -> file:6a60c5a2eac83401f197aa892a116473e9874046
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1227,10 +1227,7 @@ int cfg80211_wext_giwrate(struct net_dev
if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
return -EOPNOTSUPP;
- rate->value = 0;
-
- if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
- rate->value = 100000 * sinfo.txrate.legacy;
+ rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
return 0;
}
file:f7496c6a022b7c2213f061c17b435bf0328085ed -> file:3d78d69a629bcd5bbafc644bb3cca0a513e39565
--- a/security/inode.c
+++ b/security/inode.c
@@ -168,13 +168,13 @@ static int create_by_name(const char *na
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry)) {
+ if (!IS_ERR(*dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
error = mkdir(parent->d_inode, *dentry, mode);
else
error = create(parent->d_inode, *dentry, mode);
} else
- error = PTR_ERR(dentry);
+ error = PTR_ERR(*dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
file:8ec02746ca993fa697da600757fbcd21b319e2f7 -> file:e031952a49e133ed45f613ff0c06af57db3c7ec4
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -524,9 +524,8 @@ struct key *find_keyring_by_name(const c
struct key *keyring;
int bucket;
- keyring = ERR_PTR(-EINVAL);
if (!name)
- goto error;
+ return ERR_PTR(-EINVAL);
bucket = keyring_hash(name);
@@ -553,17 +552,18 @@ struct key *find_keyring_by_name(const c
KEY_SEARCH) < 0)
continue;
- /* we've got a match */
- atomic_inc(&keyring->usage);
- read_unlock(&keyring_name_lock);
- goto error;
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+ * (ie. it has a zero usage count) */
+ if (!atomic_inc_not_zero(&keyring->usage))
+ continue;
+ goto out;
}
}
- read_unlock(&keyring_name_lock);
keyring = ERR_PTR(-ENOKEY);
-
- error:
+out:
+ read_unlock(&keyring_name_lock);
return keyring;
} /* end find_keyring_by_name() */
file:5c23afb31ece464ad0165e1a3fb052a8969244c5 -> file:931cfda6e1f9ff2c2030a04335fd4d2a47820e99
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -509,7 +509,7 @@ try_again:
ret = install_thread_keyring();
if (ret < 0) {
- key = ERR_PTR(ret);
+ key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
@@ -527,7 +527,7 @@ try_again:
ret = install_process_keyring();
if (ret < 0) {
- key = ERR_PTR(ret);
+ key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
@@ -586,7 +586,7 @@ try_again:
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
- key = ERR_PTR(-EINVAL);
+ key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
file:03fe63ed55bda1a1cfacc95138e409e8ef53f0d2 -> file:9ac7bfd3bbdd386fedd677077f9ac834fd643c17
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -336,8 +336,10 @@ static int construct_alloc_key(struct ke
key_already_present:
mutex_unlock(&key_construction_mutex);
- if (dest_keyring)
+ if (dest_keyring) {
+ __key_link(dest_keyring, key_ref_to_ptr(key_ref));
up_write(&dest_keyring->sem);
+ }
mutex_unlock(&user->cons_lock);
key_put(key);
*_key = key = key_ref_to_ptr(key_ref);
@@ -428,6 +430,11 @@ struct key *request_key_and_link(struct
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ construct_get_dest_keyring(&dest_keyring);
+ key_link(dest_keyring, key);
+ key_put(dest_keyring);
+ }
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
file:68c7348d1acc6628f4c6121207edc34f6a7f707a -> file:04b6145d767f96093423d5f733e6fe1b6a4e5687
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -128,7 +128,7 @@ int ebitmap_netlbl_export(struct ebitmap
cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
c_iter->bitmap[cmap_idx]
- |= e_iter->maps[cmap_idx] << cmap_sft;
+ |= e_iter->maps[i] << cmap_sft;
}
e_iter = e_iter->next;
}
file:ab73edf2c89a1fe4e996c5f2d7ea6c751fba2d30 -> file:7ba779d9e2e797c55e40d629c23e7fc67c2779f9
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -314,10 +314,10 @@ int snd_pcm_hw_refine(struct snd_pcm_sub
if (!params->info)
params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES;
if (!params->fifo_size) {
- if (snd_mask_min(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT]) ==
- snd_mask_max(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT]) &&
- snd_mask_min(&params->masks[SNDRV_PCM_HW_PARAM_CHANNELS]) ==
- snd_mask_max(&params->masks[SNDRV_PCM_HW_PARAM_CHANNELS])) {
+ m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ if (snd_mask_min(m) == snd_mask_max(m) &&
+ snd_interval_min(i) == snd_interval_max(i)) {
changed = substream->ops->ioctl(substream,
SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
if (changed < 0)
file:67ca4403c1c6d00a1b7cdbf172b4326be5d6cf86 -> file:e7efcef7b812e382c1dc477e2b973e151431adbf
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -1867,12 +1867,14 @@ static unsigned int ad1981_jacks_blackli
0x10140523, /* Thinkpad R40 */
0x10140534, /* Thinkpad X31 */
0x10140537, /* Thinkpad T41p */
+ 0x1014053e, /* Thinkpad R40e */
0x10140554, /* Thinkpad T42p/R50p */
0x10140567, /* Thinkpad T43p 2668-G7U */
0x10140581, /* Thinkpad X41-2527 */
0x10280160, /* Dell Dimension 2400 */
0x104380b0, /* Asus A7V8X-MX */
0x11790241, /* Toshiba Satellite A-15 S127 */
+ 0x1179ff10, /* Toshiba P500 */
0x144dc01a, /* Samsung NP-X20C004/SEG */
0 /* end */
};
file:ddcd4a9fd7e69f1cf63fcbc0d79adc8868c4237a -> file:78c8736638d3173b8b991ab4a7bab4d79aed3db0
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -941,13 +941,21 @@ static snd_pcm_uframes_t snd_cmipci_pcm_
struct snd_pcm_substream *substream)
{
size_t ptr;
- unsigned int reg;
+ unsigned int reg, rem, tries;
+
if (!rec->running)
return 0;
#if 1 // this seems better..
reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2;
- ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1);
- ptr >>= rec->shift;
+ for (tries = 0; tries < 3; tries++) {
+ rem = snd_cmipci_read_w(cm, reg);
+ if (rem < rec->dma_size)
+ goto ok;
+ }
+ printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem);
+ return SNDRV_PCM_POS_XRUN;
+ok:
+ ptr = (rec->dma_size - (rem + 1)) >> rec->shift;
#else
reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1;
ptr = snd_cmipci_read(cm, reg) - rec->offset;
file:1305f7ca02c3c2726049dc448dec6b7fa36a63cd -> file:641d7f07392c63edea8bcf8a9bc71957914239cd
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1821,7 +1821,9 @@ static irqreturn_t snd_echo_interrupt(in
/* The hardware doesn't tell us which substream caused the irq,
thus we have to check all running substreams. */
for (ss = 0; ss < DSP_MAXPIPES; ss++) {
- if ((substream = chip->substream[ss])) {
+ substream = chip->substream[ss];
+ if (substream && ((struct audiopipe *)substream->runtime->
+ private_data)->state == PIPE_STATE_STARTED) {
period = pcm_pointer(substream) /
substream->runtime->period_size;
if (period != chip->last_period[ss]) {
file:fec8724b9125c50c8dc0d2f432167302e31a5ee8 -> file:cc2a5a2a147b2f9fb656ef5a348f363f7b5ec246
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -116,6 +116,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},
"{Intel, ICH9},"
"{Intel, ICH10},"
"{Intel, PCH},"
+ "{Intel, CPT},"
"{Intel, SCH},"
"{ATI, SB450},"
"{ATI, SB600},"
@@ -437,6 +438,7 @@ struct azx {
/* driver types */
enum {
AZX_DRIVER_ICH,
+ AZX_DRIVER_PCH,
AZX_DRIVER_SCH,
AZX_DRIVER_ATI,
AZX_DRIVER_ATIHDMI,
@@ -451,6 +453,7 @@ enum {
static char *driver_short_names[] __devinitdata = {
[AZX_DRIVER_ICH] = "HDA Intel",
+ [AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
[AZX_DRIVER_ATI] = "HDA ATI SB",
[AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
@@ -1039,6 +1042,7 @@ static void azx_init_pci(struct azx *chi
0x01, NVIDIA_HDA_ENABLE_COHBIT);
break;
case AZX_DRIVER_SCH:
+ case AZX_DRIVER_PCH:
pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -2222,9 +2226,25 @@ static int azx_dev_free(struct snd_devic
* white/black-listing for position_fix
*/
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
{}
};
@@ -2312,6 +2332,7 @@ static void __devinit check_probe_mask(s
static struct snd_pci_quirk msi_white_list[] __devinitdata = {
SND_PCI_QUIRK(0x103c, 0x30f7, "HP Pavilion dv4t-1300", 1),
SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
+ SND_PCI_QUIRK(0x107b, 0x0380, "Gateway M-6866", 1),
{}
};
@@ -2328,6 +2349,13 @@ static void __devinit check_msi(struct a
"hda_intel: msi for device %04x:%04x set to %d\n",
q->subvendor, q->subdevice, q->value);
chip->msi = q->value;
+ return;
+ }
+
+ /* NVidia chipsets seem to cause troubles with MSI */
+ if (chip->driver_type == AZX_DRIVER_NVIDIA) {
+ printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
+ chip->msi = 0;
}
}
@@ -2377,6 +2405,7 @@ static int __devinit azx_create(struct s
if (bdl_pos_adj[dev] < 0) {
switch (chip->driver_type) {
case AZX_DRIVER_ICH:
+ case AZX_DRIVER_PCH:
bdl_pos_adj[dev] = 1;
break;
default:
@@ -2651,6 +2680,9 @@ static struct pci_device_id azx_ids[] =
{ PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
/* PCH */
{ PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
+ { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
+ /* CPT */
+ { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
/* ATI SB 450/600 */
file:2d603f6aba6319bbf149a8dc5b966aec328aba8e -> file:bd0794e09a8947d6bfc6f298501eba5ee35040e9
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1003,7 +1003,7 @@ static struct snd_pci_quirk ad1986a_cfg_
SND_PCI_QUIRK(0x1043, 0x81cb, "ASUS M2N", AD1986A_3STACK),
SND_PCI_QUIRK(0x1043, 0x8234, "ASUS M2N", AD1986A_3STACK),
SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_3STACK),
- SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
+ SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40-10Q", AD1986A_3STACK),
SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50),
@@ -1789,6 +1789,14 @@ static int patch_ad1981(struct hda_codec
case AD1981_THINKPAD:
spec->mixers[0] = ad1981_thinkpad_mixers;
spec->input_mux = &ad1981_thinkpad_capture_source;
+ /* set the upper-limit for mixer amp to 0dB for avoiding the
+ * possible damage by overloading
+ */
+ snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
+ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
+ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+ (1 << AC_AMPCAP_MUTE_SHIFT));
break;
case AD1981_TOSHIBA:
spec->mixers[0] = ad1981_hp_mixers;
file:905859d4f4dfdca60561e9bafd52492e8a053933 -> file:9d855f476f7a3fc38f17fa96b7d24322b577f875
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -1175,9 +1175,12 @@ static int patch_cxt5045(struct hda_code
switch (codec->subsystem_id >> 16) {
case 0x103c:
- /* HP laptop has a really bad sound over 0dB on NID 0x17.
- * Fix max PCM level to 0 dB
- * (originall it has 0x2b steps with 0dB offset 0x14)
+ case 0x1631:
+ case 0x1734:
+ case 0x17aa:
+ /* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
+ * really bad sound over 0dB on NID 0x17. Fix max PCM level to
+ * 0 dB (originally it has 0x2b steps with 0dB offset 0x14)
*/
snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
(0x14 << AC_AMPCAP_OFFSET_SHIFT) |
@@ -1581,6 +1584,21 @@ static int patch_cxt5047(struct hda_code
#endif
}
spec->vmaster_nid = 0x13;
+
+ switch (codec->subsystem_id >> 16) {
+ case 0x103c:
+ /* HP laptops have really bad sound over 0 dB on NID 0x10.
+ * Fix max PCM level to 0 dB (originally it has 0x1e steps
+ * with 0 dB offset 0x17)
+ */
+ snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
+ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
+ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+ (1 << AC_AMPCAP_MUTE_SHIFT));
+ break;
+ }
+
return 0;
}
@@ -2333,6 +2351,8 @@ static struct snd_pci_quirk cxt5066_cfg_
SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
CXT5066_DELL_LAPTOP),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
+ SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
+ SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
{}
};
file:911dd1fec22ed286feaa411d18b97d4d92204095 -> file:aa2ec237c197aee11e1401a9bf45265415199ffd
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -400,6 +400,8 @@ static int alc_mux_enum_info(struct snd_
unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
if (mux_idx >= spec->num_mux_defs)
mux_idx = 0;
+ if (!spec->input_mux[mux_idx].num_items && mux_idx > 0)
+ mux_idx = 0;
return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
}
@@ -428,6 +430,8 @@ static int alc_mux_enum_put(struct snd_k
mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
imux = &spec->input_mux[mux_idx];
+ if (!imux->num_items && mux_idx > 0)
+ imux = &spec->input_mux[0];
type = get_wcaps_type(get_wcaps(codec, nid));
if (type == AC_WID_AUD_MIX) {
@@ -3967,7 +3971,7 @@ static struct snd_pci_quirk alc880_cfg_t
SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
- SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
+ SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
@@ -6248,6 +6252,7 @@ static const char *alc260_models[ALC260_
static struct snd_pci_quirk alc260_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
+ SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
@@ -6277,7 +6282,7 @@ static struct alc_config_preset alc260_p
.num_dacs = ARRAY_SIZE(alc260_dac_nids),
.dac_nids = alc260_dac_nids,
.num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
- .adc_nids = alc260_adc_nids,
+ .adc_nids = alc260_dual_adc_nids,
.num_channel_mode = ARRAY_SIZE(alc260_modes),
.channel_mode = alc260_modes,
.input_mux = &alc260_capture_source,
@@ -8890,6 +8895,7 @@ static struct snd_pci_quirk alc882_cfg_t
SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
@@ -8917,7 +8923,7 @@ static struct snd_pci_quirk alc882_cfg_t
SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
- SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
+ SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
{}
};
@@ -8931,10 +8937,12 @@ static struct snd_pci_quirk alc882_ssid_
SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31),
SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC885_MB5),
/* FIXME: HP jack sense seems not working for MBP 5,1 or 5,2,
* so apparently no perfect solution yet
*/
@@ -9743,6 +9751,8 @@ static void alc882_auto_init_input_src(s
continue;
mux_idx = c >= spec->num_mux_defs ? 0 : c;
imux = &spec->input_mux[mux_idx];
+ if (!imux->num_items && mux_idx > 0)
+ imux = &spec->input_mux[0];
for (idx = 0; idx < conns; idx++) {
/* if the current connection is the selected one,
* unmute it as default - otherwise mute it
@@ -10613,6 +10623,13 @@ static struct hda_verb alc262_lenovo_300
{}
};
+static struct hda_verb alc262_lenovo_3000_init_verbs[] = {
+ /* Front Mic pin: input vref at 50% */
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50},
+ {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+ {}
+};
+
static struct hda_input_mux alc262_fujitsu_capture_source = {
.num_items = 3,
.items = {
@@ -11680,7 +11697,8 @@ static struct alc_config_preset alc262_p
[ALC262_LENOVO_3000] = {
.mixers = { alc262_lenovo_3000_mixer },
.init_verbs = { alc262_init_verbs, alc262_EAPD_verbs,
- alc262_lenovo_3000_unsol_verbs },
+ alc262_lenovo_3000_unsol_verbs,
+ alc262_lenovo_3000_init_verbs },
.num_dacs = ARRAY_SIZE(alc262_dac_nids),
.dac_nids = alc262_dac_nids,
.hp_nid = 0x03,
file:86de305fc9f225ef382f6a7bdfbd438a2f97840f -> file:fee411b43ef6849d8673544360bb5af8240e3ed4
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1592,6 +1592,10 @@ static struct snd_pci_quirk stac92hd73xx
"Dell Studio 1555", STAC_DELL_M6_DMIC),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
"Dell Studio 1557", STAC_DELL_M6_DMIC),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+ "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+ "Dell Studio 1558", STAC_DELL_M6_BOTH),
{} /* terminator */
};
@@ -1712,6 +1716,8 @@ static struct snd_pci_quirk stac92hd71bx
"HP HDX", STAC_HP_HDX), /* HDX16 */
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620,
"HP dv6", STAC_HP_DV5),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
+ "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
"HP", STAC_HP_DV5),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
@@ -2053,12 +2059,12 @@ static struct snd_pci_quirk stac927x_cfg
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
"Intel D965", STAC_D965_3ST),
/* Dell 3 stack systems */
- SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST),
/* Dell 3 stack systems with verb table in BIOS */
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
file:3e1c20ae2f1c2c42c115c293bc26fdd6c660124f -> file:726fd4b92e19e8d564adef229c9c5201a0785963
--- a/sound/pci/ice1712/maya44.c
+++ b/sound/pci/ice1712/maya44.c
@@ -347,7 +347,7 @@ static int maya_gpio_sw_put(struct snd_k
/* known working input slots (0-4) */
#define MAYA_LINE_IN 1 /* in-2 */
-#define MAYA_MIC_IN 4 /* in-5 */
+#define MAYA_MIC_IN 3 /* in-4 */
static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line)
{
@@ -393,8 +393,8 @@ static int maya_rec_src_put(struct snd_k
int changed;
mutex_lock(&chip->mutex);
- changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY,
- sel ? GPIO_MIC_RELAY : 0);
+ changed = maya_set_gpio_bits(chip->ice, 1 << GPIO_MIC_RELAY,
+ sel ? (1 << GPIO_MIC_RELAY) : 0);
wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN);
mutex_unlock(&chip->mutex);
return changed;
file:75283fbb4b3fd1723d62a73baf663c14f37f41d3 -> file:c2311f85a331605d2c525c564704f07d5829b0fa
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -849,6 +849,7 @@ struct snd_m3 {
struct snd_kcontrol *master_switch;
struct snd_kcontrol *master_volume;
struct tasklet_struct hwvol_tq;
+ unsigned int in_suspend;
#ifdef CONFIG_PM
u16 *suspend_mem;
@@ -884,6 +885,7 @@ static struct pci_device_id snd_m3_ids[]
MODULE_DEVICE_TABLE(pci, snd_m3_ids);
static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
@@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsi
outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
+ /* Ignore spurious HV interrupts during suspend / resume, this avoids
+ mistaking them for a mute button press. */
+ if (chip->in_suspend)
+ return;
+
if (!chip->master_switch || !chip->master_volume)
return;
@@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pc
if (chip->suspend_mem == NULL)
return 0;
+ chip->in_suspend = 1;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
@@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci
snd_m3_hv_init(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ chip->in_suspend = 0;
return 0;
}
#endif /* CONFIG_PM */
file:a83d1968a8450f49816add1bf2bbc2ffbba962c6 -> file:32f98535c0b436f9028d16147e1f5efab4b71aab
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1161,13 +1161,15 @@ static long snd_mixart_BA0_read(struct s
unsigned long count, unsigned long pos)
{
struct mixart_mgr *mgr = entry->private_data;
+ unsigned long maxsize;
- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
- if(count <= 0)
+ if (pos >= MIXART_BA0_SIZE)
return 0;
- if(pos + count > MIXART_BA0_SIZE)
- count = (long)(MIXART_BA0_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
+ maxsize = MIXART_BA0_SIZE - pos;
+ if (count > maxsize)
+ count = maxsize;
+ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
+ if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
return -EFAULT;
return count;
}
@@ -1180,13 +1182,15 @@ static long snd_mixart_BA1_read(struct s
unsigned long count, unsigned long pos)
{
struct mixart_mgr *mgr = entry->private_data;
+ unsigned long maxsize;
- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
- if(count <= 0)
+ if (pos > MIXART_BA1_SIZE)
return 0;
- if(pos + count > MIXART_BA1_SIZE)
- count = (long)(MIXART_BA1_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
+ maxsize = MIXART_BA1_SIZE - pos;
+ if (count > maxsize)
+ count = maxsize;
+ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
+ if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
return -EFAULT;
return count;
}
file:8a332d2f615cfc01e4612a9eb459628e8596c59b -> file:03d6aea19749031f53404682748ef6186ae0edf0
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1791,6 +1791,12 @@ static struct ac97_quirk ac97_quirks[] =
.type = AC97_TUNE_HP_ONLY
},
{
+ .subvendor = 0x110a,
+ .subdevice = 0x0079,
+ .name = "Fujitsu Siemens D1289",
+ .type = AC97_TUNE_HP_ONLY
+ },
+ {
.subvendor = 0x1019,
.subdevice = 0x0a81,
.name = "ECS K7VTA3",
file:4d47bc4f74284764ec24434167371d3245009df8 -> file:10c7550393ba93be17886d637aec0ad7aaacc362
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -90,12 +90,10 @@ static int ak4104_spi_write(struct snd_s
if (reg >= codec->reg_cache_size)
return -EINVAL;
- reg &= AK4104_REG_MASK;
- reg |= AK4104_WRITE;
-
/* only write to the hardware if value has changed */
if (cache[reg] != value) {
- u8 tmp[2] = { reg, value };
+ u8 tmp[2] = { (reg & AK4104_REG_MASK) | AK4104_WRITE, value };
+
if (spi_write(spi, tmp, sizeof(tmp))) {
dev_err(&spi->dev, "SPI write failed\n");
return -EIO;
file:2089fe7d1ba64523f2962410604fbaae38958575 -> file:9f9bcd839cba78805318bf89fa499bf9a1abd982
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -423,8 +423,8 @@ static const struct soc_enum wm8350_enum
SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr),
};
-static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525);
-static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600);
+static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0);
+static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0);
static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1);
static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
file:b9ef4d9152211a7649249e0f6a96c50f484147b0 -> file:775195bf59f9b0fb9b13e3dfa57e4e8be6e06ddc
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -106,21 +106,21 @@ static void wm8400_codec_reset(struct sn
wm8400_reset_codec_reg_cache(wm8400->wm8400);
}
-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0);
+static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0);
-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
+static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -439,7 +439,7 @@ static int outmixer_event (struct snd_so
/* INMIX dB values */
static const unsigned int in_mix_tlv[] = {
TLV_DB_RANGE_HEAD(1),
- 0,7, TLV_DB_LINEAR_ITEM(-1200, 600),
+ 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
};
/* Left In PGA Connections */
file:a9829aa26e53591a0574d44f884c5fccee5f0b29 -> file:8286c62e4df3051fe06d00073f5f861765e42f92
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -93,7 +93,6 @@ SOC_DAPM_SINGLE("Bypass Switch", WM8776_
static const struct snd_soc_dapm_widget wm8776_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AUX"),
-SND_SOC_DAPM_INPUT("AUX"),
SND_SOC_DAPM_INPUT("AIN1"),
SND_SOC_DAPM_INPUT("AIN2"),
file:2d702db4131db36f17d854621c99767777c64c17 -> file:253159cedea627c61769e846ab91f4d3ddf641fe
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -110,21 +110,21 @@ static const u16 wm8990_reg[] = {
#define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
+static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);
-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
+static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -450,7 +450,7 @@ static int outmixer_event(struct snd_soc
/* INMIX dB values */
static const unsigned int in_mix_tlv[] = {
TLV_DB_RANGE_HEAD(1),
- 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
+ 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
};
/* Left In PGA Connections */
file:f26a1254b0bb595a1d3e260d694655e7ba662573 -> file:79633eae6439a5948b0a663ce61ea86e1f4624a6
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -3326,6 +3326,32 @@ static int snd_usb_cm6206_boot_quirk(str
}
/*
+ * This call will put the synth in "USB send" mode, i.e it will send MIDI
+ * messages through USB (this is disabled at startup). The synth will
+ * acknowledge by sending a sysex on endpoint 0x85 and by displaying a USB
+ * sign on its LCD. Values here are chosen based on sniffing USB traffic
+ * under Windows.
+ */
+static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
+{
+ int err, actual_length;
+
+ /* "midi send" enable */
+ static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 };
+
+ void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf,
+ ARRAY_SIZE(seq), &actual_length, 1000);
+ kfree(buf);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
* Setup quirks
*/
#define AUDIOPHILE_SET 0x01 /* if set, parse device_setup */
@@ -3616,6 +3642,12 @@ static void *snd_usb_audio_probe(struct
goto __err_val;
}
+ /* Access Music VirusTI Desktop */
+ if (id == USB_ID(0x133e, 0x0815)) {
+ if (snd_usb_accessmusic_boot_quirk(dev) < 0)
+ goto __err_val;
+ }
+
/*
* found a config. now register to ALSA
*/
file:0eff19ceb7e1f9d74653d80c402d3930a6be8bf8 -> file:64d8d2e6026df684a1a0c298f5aabbeb361c57f1
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -931,6 +931,8 @@ static void snd_usbmidi_output_drain(str
DEFINE_WAIT(wait);
long timeout = msecs_to_jiffies(50);
+ if (ep->umidi->disconnected)
+ return;
/*
* The substream buffer is empty, but some data might still be in the
* currently active URBs, so we have to wait for those to complete.
@@ -1075,14 +1077,21 @@ static unsigned int snd_usbmidi_count_bi
* Frees an output endpoint.
* May be called when ep hasn't been initialized completely.
*/
-static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
{
unsigned int i;
for (i = 0; i < OUTPUT_URBS; ++i)
- if (ep->urbs[i].urb)
+ if (ep->urbs[i].urb) {
free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
ep->max_transfer);
+ ep->urbs[i].urb = NULL;
+ }
+}
+
+static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
+{
+ snd_usbmidi_out_endpoint_clear(ep);
kfree(ep);
}
@@ -1201,15 +1210,18 @@ void snd_usbmidi_disconnect(struct list_
usb_kill_urb(ep->out->urbs[j].urb);
if (umidi->usb_protocol_ops->finish_out_endpoint)
umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
+ ep->out->active_urbs = 0;
+ if (ep->out->drain_urbs) {
+ ep->out->drain_urbs = 0;
+ wake_up(&ep->out->drain_wait);
+ }
}
if (ep->in)
for (j = 0; j < INPUT_URBS; ++j)
usb_kill_urb(ep->in->urbs[j]);
/* free endpoints here; later call can result in Oops */
- if (ep->out) {
- snd_usbmidi_out_endpoint_delete(ep->out);
- ep->out = NULL;
- }
+ if (ep->out)
+ snd_usbmidi_out_endpoint_clear(ep->out);
if (ep->in) {
snd_usbmidi_in_endpoint_delete(ep->in);
ep->in = NULL;
@@ -1360,6 +1372,12 @@ static struct port_info {
EXTERNAL_PORT(0x086a, 0x0001, 8, "%s Broadcast"),
EXTERNAL_PORT(0x086a, 0x0002, 8, "%s Broadcast"),
EXTERNAL_PORT(0x086a, 0x0003, 4, "%s Broadcast"),
+ /* Access Music Virus TI */
+ EXTERNAL_PORT(0x133e, 0x0815, 0, "%s MIDI"),
+ PORT_INFO(0x133e, 0x0815, 1, "%s Synth", 0,
+ SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |
+ SNDRV_SEQ_PORT_TYPE_HARDWARE |
+ SNDRV_SEQ_PORT_TYPE_SYNTHESIZER),
};
static struct port_info *find_port_info(struct snd_usb_midi* umidi, int number)
file:f6f201eb24ceeb00145b406d6fbf18bf09d08817 -> file:391e02f5b1996ac0e829b77c6fc306faaf543add
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -2050,6 +2050,33 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+/* Access Music devices */
+{
+ /* VirusTI Desktop */
+ USB_DEVICE_VENDOR_SPEC(0x133e, 0x0815),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 3,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+ {
+ .ifnum = 4,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
/* */
{
/* aka. Serato Scratch Live DJ Box */
file:bdd3b7ecad0a6dedbf6fc673f7daf3e40c9c49ce -> file:bd498d496952a6f78973b0e6b8422552009770d0
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT
DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
+# Make the path relative to DESTDIR, not prefix
+ifndef DESTDIR
prefix?=$(HOME)
+endif
bindir?=$(prefix)/bin
htmldir?=$(prefix)/share/doc/perf-doc
pdfdir?=$(prefix)/share/doc/perf-doc
@@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man
man1dir=$(mandir)/man1
man5dir=$(mandir)/man5
man7dir=$(mandir)/man7
-# DESTDIR=
ASCIIDOC=asciidoc
ASCIIDOC_EXTRA = --unsafe
file:7e190d522cd5848123b0d98220e6555fd3e70c69 -> file:719d0283c9f383f565ad087aa5b7fb919ec57338
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -218,7 +218,10 @@ STRIP ?= strip
# runtime figures out where they are based on the path to the executable.
# This can help installing the suite in a relocatable way.
+# Make the path relative to DESTDIR, not to prefix
+ifndef DESTDIR
prefix = $(HOME)
+endif
bindir_relative = bin
bindir = $(prefix)/$(bindir_relative)
mandir = share/man
@@ -235,7 +238,6 @@ sysconfdir = $(prefix)/etc
ETC_PERFCONFIG = etc/perfconfig
endif
lib = lib
-# DESTDIR=
export prefix bindir sharedir sysconfdir
file:43cf3ea9e088fd86953637450036ecb56f3964fd -> file:a31a8cd67570384be6b622c8e643c5f46a79d566
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -48,6 +48,9 @@ static inline void callchain_init(struct
INIT_LIST_HEAD(&node->brothers);
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->val);
+
+ node->parent = NULL;
+ node->hit = 0;
}
static inline u64 cumul_hits(struct callchain_node *node)
file:7495ce3473448cd45ee1dba9e0ae441c83245ca4 -> file:4f3434fbb086a2e20ea662ba28530d20040acadc
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1226,7 +1226,7 @@ skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
+ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = vmalloc(dirty_bytes);
if (!new.dirty_bitmap)
@@ -1309,7 +1309,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
{
struct kvm_memory_slot *memslot;
int r, i;
- int n;
+ unsigned long n;
unsigned long any = 0;
r = -EINVAL;
@@ -1321,7 +1321,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
@@ -1663,10 +1663,13 @@ void mark_page_dirty(struct kvm *kvm, gf
memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
+ unsigned long *p = memslot->dirty_bitmap +
+ rel_gfn / BITS_PER_LONG;
+ int offset = rel_gfn % BITS_PER_LONG;
/* avoid RMW */
- if (!test_bit(rel_gfn, memslot->dirty_bitmap))
- set_bit(rel_gfn, memslot->dirty_bitmap);
+ if (!test_bit(offset, p))
+ set_bit(offset, p);
}
}