summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-04-27 11:11:07 +0200
committerThomas Gleixner <tglx@linutronix.de>2010-04-27 11:11:07 +0200
commit846bcf3923056965c3fc0c9ba811c10d25af75e5 (patch)
treec39bd4273a006da2fb1f727ebb7eb1108f5526c8
parent863c3ad87b10617464fc52c21fc7e31987910559 (diff)
parent3e7ad8ed979ce9e3ad897dd87c50bf577966f89c (diff)
downloadlwn-846bcf3923056965c3fc0c9ba811c10d25af75e5.tar.gz
lwn-846bcf3923056965c3fc0c9ba811c10d25af75e5.zip
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-2.6.33.y into rt/2.6.33 Conflicts: Makefile arch/x86/include/asm/rwsem.h Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--Documentation/i2c/busses/i2c-i8013
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c9
-rw-r--r--arch/powerpc/kvm/book3s.c5
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h22
-rw-r--r--arch/s390/mm/vmem.c11
-rw-r--r--arch/sh/include/asm/elf.h6
-rw-r--r--arch/sh/kernel/smp.c1
-rw-r--r--arch/sparc/kernel/ptrace_32.c4
-rw-r--r--arch/sparc/kernel/ptrace_64.c4
-rw-r--r--arch/um/sys-x86_64/Makefile3
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/include/asm/rwsem.h81
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/kernel/amd_iommu.c6
-rw-r--r--arch/x86/kernel/amd_iommu_init.c5
-rw-r--r--arch/x86/kernel/aperture_64.c15
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c250
-rw-r--r--arch/x86/kernel/cpu/perf_event.c104
-rw-r--r--arch/x86/kernel/crash.c6
-rw-r--r--arch/x86/kernel/hpet.c8
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/mpparse.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/svm.c25
-rw-r--r--arch/x86/kvm/vmx.c37
-rw-r--r--arch/x86/kvm/x86.c45
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/cache-smp.c19
-rw-r--r--arch/x86/lib/rwsem_64.S81
-rw-r--r--arch/x86/pci/irq.c2
-rw-r--r--arch/x86/power/hibernate_asm_32.S15
-rw-r--r--drivers/acpi/acpica/exprep.c17
-rw-r--r--drivers/acpi/ec.c35
-rw-r--r--drivers/ata/ahci.c6
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/pata_via.c1
-rw-r--r--drivers/char/agp/intel-agp.c15
-rw-r--r--drivers/char/raw.c1
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_fops.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/radeon/atom.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c29
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/hid/hid-gyration.c5
-rw-r--r--drivers/hwmon/sht15.c13
-rw-r--r--drivers/i2c/busses/Kconfig5
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/input/sparse-keymap.c2
-rw-r--r--drivers/input/tablet/wacom_sys.c12
-rw-r--r--drivers/md/dm-ioctl.c19
-rw-r--r--drivers/md/dm.c7
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/linear.c12
-rw-r--r--drivers/md/multipath.c20
-rw-r--r--drivers/md/raid0.c13
-rw-r--r--drivers/md/raid10.c28
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h12
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/main.c11
-rw-r--r--drivers/net/wireless/b43/pio.h40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4
-rw-r--r--drivers/pci/pci.c24
-rw-r--r--drivers/scsi/libiscsi.c5
-rw-r--r--drivers/usb/class/cdc-acm.c43
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c45
-rw-r--r--drivers/video/sunxvr500.c24
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c99
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/cifs/cifssmb.c19
-rw-r--r--fs/ecryptfs/inode.c108
-rw-r--r--fs/ecryptfs/super.c1
-rw-r--r--fs/ext4/ext4.h6
-rw-r--r--fs/ext4/extents.c2
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/ocfs2/acl.c77
-rw-r--r--fs/ocfs2/suballoc.c13
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/quota/dquot.c12
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c93
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c10
-rw-r--r--fs/xfs/xfs_iget.c19
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/dm-ioctl.h9
-rw-r--r--include/linux/freezer.h7
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/kfifo.h3
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--kernel/cgroup_freezer.c9
-rw-r--r--kernel/irq/manage.c10
-rw-r--r--kernel/lockdep.c6
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/sched.c10
-rw-r--r--mm/readahead.c2
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/mac80211/util.c20
-rw-r--r--sound/pci/hda/hda_intel.c10
-rw-r--r--sound/pci/hda/patch_analog.c8
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/mixart/mixart.c24
-rw-r--r--sound/usb/usbmidi.c24
-rw-r--r--virt/kvm/kvm_main.c13
144 files changed, 1492 insertions, 681 deletions
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 81c0c59a60ea..e1bb5b261693 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,7 +15,8 @@ Supported adapters:
* Intel 82801I (ICH9)
* Intel EP80579 (Tolapai)
* Intel 82801JI (ICH10)
- * Intel PCH
+ * Intel 3400/5 Series (PCH)
+ * Intel Cougar Point (PCH)
Datasheets: Publicly available at the Intel website
Authors:
diff --git a/Makefile b/Makefile
index 70d0b75bfb3a..9991903de8b7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 33
-EXTRAVERSION = .2-rt13
+EXTRAVERSION = .3-rt13
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6b84a0471171..cbeb6e0de5cd 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -172,7 +172,7 @@ not_angel:
adr r0, LC0
ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
- THUMB( ldr sp, [r0, #28] )
+ THUMB( ldr sp, [r0, #32] )
subs r0, r0, r1 @ calculate the delta offset
@ if delta is zero, we are
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5fddcf..d76279aaaea1 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
{
struct kvm_memory_slot *memslot;
int r, i;
- long n, base;
+ long base;
+ unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
@@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;
for (i = 0; i < n/sizeof(long); ++i) {
@@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
int r;
- int n;
+ unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
@@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e294bd9b8c6..e6dc59558fc1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
int is_dirty = 0;
- int r, n;
+ int r;
+ unsigned long n;
down_write(&kvm->slots_lock);
@@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
index 22574e0d9d91..202d8692c6a2 100644
--- a/arch/powerpc/platforms/pseries/offline_states.h
+++ b/arch/powerpc/platforms/pseries/offline_states.h
@@ -9,10 +9,30 @@ enum cpu_state_vals {
CPU_MAX_OFFLINE_STATES
};
+#ifdef CONFIG_HOTPLUG_CPU
extern enum cpu_state_vals get_cpu_current_state(int cpu);
extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
-extern enum cpu_state_vals get_preferred_offline_state(int cpu);
extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
extern void set_default_offline_state(int cpu);
+#else
+static inline enum cpu_state_vals get_cpu_current_state(int cpu)
+{
+ return CPU_STATE_ONLINE;
+}
+
+static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
+{
+}
+
+static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
+{
+}
+
+static inline void set_default_offline_state(int cpu)
+{
+}
+#endif
+
+extern enum cpu_state_vals get_preferred_offline_state(int cpu);
extern int start_secondary(void);
#endif
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 300ab012b0fd..5f91a38d7592 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
- if (MACHINE_HAS_HPAGE)
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
- PTRS_PER_PTE * sizeof(pte_t));
- else
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
- PTRS_PER_PTE * sizeof(pte_t));
+ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+ PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
@@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
(address + HPAGE_SIZE <= start + size) &&
(address >= HPAGE_SIZE)) {
- pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
- _SEGMENT_ENTRY_CO;
+ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
pmd_val(*pm_dir) = pte_val(pte);
address += HPAGE_SIZE - PAGE_SIZE;
continue;
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ac04255022b6..ce830faeebbf 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
#define VSYSCALL_AUX_ENT \
if (vdso_enabled) \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+ else \
+ NEW_AUX_ENT(AT_IGNORE, 0);
#else
#define VSYSCALL_AUX_ENT
#endif /* CONFIG_VSYSCALL */
@@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
#ifdef CONFIG_SH_FPU
#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
#else
-#define FPU_AUX_ENT
+#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif
extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 983e0792d5f3..1d19c199c73d 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
unsigned int cpu;
struct mm_struct *mm = &init_mm;
+ enable_mmu();
atomic_inc(&mm->mm_count);
atomic_inc(&mm->mm_users);
current->active_mm = mm;
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 7e3dfd9bb97e..e608f397e11f 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
*k++ = regs->u_regs[pos++];
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
return -EFAULT;
@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
put_user(reg, u++))
@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
regs->u_regs[pos++] = *k++;
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
return -EFAULT;
@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
put_user(reg, &reg_window[pos++]))
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 2f6524d1a817..aa90da08bf61 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
*k++ = regs->u_regs[pos++];
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
regs->u_regs[pos++] = *k++;
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index 2201e9c20e4a..c1ea9eb04466 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
sysrq.o ksyms.o tls.o
-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
+subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
+ lib/rwsem_64.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
ldt-y = ../sys-i386/ldt.o
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index f20ddf84a893..a19829374e6a 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -319,7 +319,7 @@ config X86_L1_CACHE_SHIFT
config X86_XADD
def_bool y
- depends on X86_32 && !M386
+ depends on X86_64 || !M386
config X86_PPRO_FENCE
bool "PentiumPro memory ordering errata workaround"
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 605191838e02..42f8a37db6fb 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -41,6 +41,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/lockdep.h>
+#include <asm/asm.h>
struct rwsem_waiter;
@@ -55,17 +56,28 @@ extern asmregparm struct rw_anon_semaphore *
/*
* the semaphore definition
+ *
+ * The bias values and the counter type limits the number of
+ * potential readers/writers to 32767 for 32 bits and 2147483647
+ * for 64 bits.
*/
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
+#ifdef CONFIG_X86_64
+# define RWSEM_ACTIVE_MASK 0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+typedef signed long rwsem_count_t;
+
struct rw_anon_semaphore {
- signed long count;
+ rwsem_count_t count;
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -104,7 +116,7 @@ do { \
static inline void __down_read(struct rw_anon_semaphore *sem)
{
asm volatile("# beginning down_read\n\t"
- LOCK_PREFIX " incl (%%eax)\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
/* adds 0x00000001, returns the old value */
" jns 1f\n"
" call call_rwsem_down_read_failed\n"
@@ -120,14 +132,14 @@ static inline void __down_read(struct rw_anon_semaphore *sem)
*/
static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
- __s32 result, tmp;
+ rwsem_count_t result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
- " movl %0,%1\n\t"
+ " mov %0,%1\n\t"
"1:\n\t"
- " movl %1,%2\n\t"
- " addl %3,%2\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
" jle 2f\n\t"
- LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
static inline void
__down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
- int tmp;
+ rwsem_count_t tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile("# beginning down_write\n\t"
- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtract 0x0000ffff, returns the old value */
- " testl %%edx,%%edx\n\t"
+ " test %1,%1\n\t"
/* was the count 0 before? */
" jz 1f\n"
" call call_rwsem_down_write_failed\n"
@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_anon_semaphore *sem)
*/
static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
- signed long ret = cmpxchg(&sem->count,
- RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
+ rwsem_count_t ret = cmpxchg(&sem->count,
+ RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
return 0;
@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
*/
static inline void __up_read(struct rw_anon_semaphore *sem)
{
- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
+ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 1, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n"
@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_anon_semaphore *sem)
*/
static inline void __up_write(struct rw_anon_semaphore *sem)
{
+ rwsem_count_t tmp;
asm volatile("# beginning __up_write\n\t"
- " movl %2,%%edx\n\t"
- LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
/* tries to transition
0xffff0001 -> 0x00000000 */
" jz 1f\n"
" call call_rwsem_wake\n"
"1:\n\t"
"# ending __up_write\n"
- : "+m" (sem->count)
- : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
- : "memory", "cc", "edx");
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc");
}
/*
@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_anon_semaphore *sem)
static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX " addl %2,(%%eax)\n\t"
- /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+ */
" jns 1f\n\t"
" call call_rwsem_downgrade_wake\n"
"1:\n\t"
"# ending __downgrade_write\n"
: "+m" (sem->count)
- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
+ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem)
+static inline void rwsem_atomic_add(rwsem_count_t delta,
+ struct rw_anon_semaphore *sem)
{
- asm volatile(LOCK_PREFIX "addl %1,%0"
+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
: "+m" (sem->count)
- : "ir" (delta));
+ : "er" (delta));
}
/*
* implement exchange and add functionality
*/
-static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem)
+static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
+ struct rw_anon_semaphore *sem)
{
- int tmp = delta;
+ rwsem_count_t tmp = delta;
asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 1e796782cd7b..4cfc90824068 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -135,6 +135,8 @@ int native_cpu_disable(void);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
void play_dead_common(void);
+void wbinvd_on_cpu(int cpu);
+int wbinvd_on_all_cpus(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
{
return cpumask_weight(cpu_callout_mask);
}
+#else /* !CONFIG_SMP */
+#define wbinvd_on_cpu(cpu) wbinvd()
+static inline int wbinvd_on_all_cpus(void)
+{
+ wbinvd();
+ return 0;
+}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus __cpuinitdata;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index adb0ba025702..2e775169bdf0 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -2298,7 +2298,7 @@ static void cleanup_domain(struct protection_domain *domain)
list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
struct device *dev = dev_data->dev;
- do_detach(dev);
+ __detach_device(dev);
atomic_set(&dev_data->bind, 0);
}
@@ -2379,9 +2379,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_pagetable(domain);
- domain_id_free(domain->id);
-
- kfree(domain);
+ protection_domain_free(domain);
dom->priv = NULL;
}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9dc91b431470..883d61990623 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1288,6 +1288,8 @@ static int __init amd_iommu_init(void)
if (ret)
goto free;
+ enable_iommus();
+
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1300,8 +1302,6 @@ static int __init amd_iommu_init(void)
amd_iommu_init_notifier();
- enable_iommus();
-
if (iommu_pass_through)
goto out;
@@ -1315,6 +1315,7 @@ out:
return ret;
free:
+ disable_iommus();
amd_iommu_uninit_devices();
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f147a95fd84a..19f2c703638e 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -394,6 +394,7 @@ void __init gart_iommu_hole_init(void)
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
int dev_base, dev_limit;
+ u32 ctl;
bus = bus_dev_ranges[i].bus;
dev_base = bus_dev_ranges[i].dev_base;
@@ -407,7 +408,19 @@ void __init gart_iommu_hole_init(void)
gart_iommu_aperture = 1;
x86_init.iommu.iommu_init = gart_iommu_init;
- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
+ ctl = read_pci_config(bus, slot, 3,
+ AMD64_GARTAPERTURECTL);
+
+ /*
+ * Before we do anything else disable the GART. It may
+ * still be enabled if we boot into a crash-kernel here.
+ * Reconfiguring the GART while it is enabled could have
+ * unknown side-effects.
+ */
+ ctl &= ~GARTEN;
+ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+
+ aper_order = (ctl >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
aper_base <<= 25;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dfca210f6a10..d4df51725290 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void)
}
#endif
+#ifndef CONFIG_SMP
enable_IR_x2apic();
default_setup_apic_routing();
+#endif
verify_local_APIC();
connect_bsp_APIC();
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index fc6c8ef92dcc..d440123c556f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -18,6 +18,7 @@
#include <asm/processor.h>
#include <linux/smp.h>
#include <asm/k8.h>
+#include <asm/smp.h>
#define LVL_1_INST 1
#define LVL_1_DATA 2
@@ -150,7 +151,8 @@ struct _cpuid4_info {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- unsigned long can_disable;
+ bool can_disable;
+ unsigned int l3_indices;
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
@@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- unsigned long can_disable;
+ bool can_disable;
+ unsigned int l3_indices;
};
unsigned short num_cache_leaves;
@@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
(ebx->split.ways_of_associativity + 1) - 1;
}
+struct _cache_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct _cpuid4_info *, char *);
+ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+};
+
+#ifdef CONFIG_CPU_SUP_AMD
+static unsigned int __cpuinit amd_calc_l3_indices(void)
+{
+ /*
+ * We're called over smp_call_function_single() and therefore
+ * are on the correct cpu.
+ */
+ int cpu = smp_processor_id();
+ int node = cpu_to_node(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned int sc0, sc1, sc2, sc3;
+ u32 val = 0;
+
+ pci_read_config_dword(dev, 0x1C4, &val);
+
+ /* calculate subcache sizes */
+ sc0 = !(val & BIT(0));
+ sc1 = !(val & BIT(4));
+ sc2 = !(val & BIT(8)) + !(val & BIT(9));
+ sc3 = !(val & BIT(12)) + !(val & BIT(13));
+
+ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+}
+
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
@@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
if (boot_cpu_data.x86 == 0x11)
return;
- /* see erratum #382 */
- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
+ /* see errata #382 and #388 */
+ if ((boot_cpu_data.x86 == 0x10) &&
+ ((boot_cpu_data.x86_model < 0x8) ||
+ (boot_cpu_data.x86_mask < 0x1)))
return;
- this_leaf->can_disable = 1;
+ this_leaf->can_disable = true;
+ this_leaf->l3_indices = amd_calc_l3_indices();
+}
+
+static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int index)
+{
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+ int node = amd_get_nb_id(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned int reg = 0;
+
+ if (!this_leaf->can_disable)
+ return -EINVAL;
+
+ if (!dev)
+ return -EINVAL;
+
+ pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+ return sprintf(buf, "0x%08x\n", reg);
+}
+
+#define SHOW_CACHE_DISABLE(index) \
+static ssize_t \
+show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+{ \
+ return show_cache_disable(this_leaf, buf, index); \
+}
+SHOW_CACHE_DISABLE(0)
+SHOW_CACHE_DISABLE(1)
+
+static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
+ const char *buf, size_t count, unsigned int index)
+{
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+ int node = amd_get_nb_id(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned long val = 0;
+
+#define SUBCACHE_MASK (3UL << 20)
+#define SUBCACHE_INDEX 0xfff
+
+ if (!this_leaf->can_disable)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ /* do not allow writes outside of allowed bits */
+ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
+ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+ return -EINVAL;
+
+ val |= BIT(30);
+ pci_write_config_dword(dev, 0x1BC + index * 4, val);
+ /*
+ * We need to WBINVD on a core on the node containing the L3 cache which
+ * indices we disable therefore a simple wbinvd() is not sufficient.
+ */
+ wbinvd_on_cpu(cpu);
+ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+ return count;
+}
+
+#define STORE_CACHE_DISABLE(index) \
+static ssize_t \
+store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+ const char *buf, size_t count) \
+{ \
+ return store_cache_disable(this_leaf, buf, count, index); \
}
+STORE_CACHE_DISABLE(0)
+STORE_CACHE_DISABLE(1)
+
+static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
+ show_cache_disable_0, store_cache_disable_0);
+static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+ show_cache_disable_1, store_cache_disable_1);
+
+#else /* CONFIG_CPU_SUP_AMD */
+static void __cpuinit
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
+{
+};
+#endif /* CONFIG_CPU_SUP_AMD */
static int
__cpuinit cpuid4_cache_lookup_regs(int index,
@@ -711,82 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int index)
-{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
- unsigned int reg = 0;
-
- if (!this_leaf->can_disable)
- return -EINVAL;
-
- if (!dev)
- return -EINVAL;
-
- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
- return sprintf(buf, "%x\n", reg);
-}
-
-#define SHOW_CACHE_DISABLE(index) \
-static ssize_t \
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
-{ \
- return show_cache_disable(this_leaf, buf, index); \
-}
-SHOW_CACHE_DISABLE(0)
-SHOW_CACHE_DISABLE(1)
-
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count, unsigned int index)
-{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
- unsigned long val = 0;
- unsigned int scrubber = 0;
-
- if (!this_leaf->can_disable)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!dev)
- return -EINVAL;
-
- if (strict_strtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- val |= 0xc0000000;
-
- pci_read_config_dword(dev, 0x58, &scrubber);
- scrubber &= ~0x1f000000;
- pci_write_config_dword(dev, 0x58, scrubber);
-
- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
- wbinvd();
- pci_write_config_dword(dev, 0x1BC + index * 4, val);
- return count;
-}
-
-#define STORE_CACHE_DISABLE(index) \
-static ssize_t \
-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
- const char *buf, size_t count) \
-{ \
- return store_cache_disable(this_leaf, buf, count, index); \
-}
-STORE_CACHE_DISABLE(0)
-STORE_CACHE_DISABLE(1)
-
-struct _cache_attr {
- struct attribute attr;
- ssize_t (*show)(struct _cpuid4_info *, char *);
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
-};
-
#define define_one_ro(_name) \
static struct _cache_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
@@ -801,23 +849,28 @@ define_one_ro(size);
define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_list);
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
- show_cache_disable_0, store_cache_disable_0);
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
- show_cache_disable_1, store_cache_disable_1);
+#define DEFAULT_SYSFS_CACHE_ATTRS \
+ &type.attr, \
+ &level.attr, \
+ &coherency_line_size.attr, \
+ &physical_line_partition.attr, \
+ &ways_of_associativity.attr, \
+ &number_of_sets.attr, \
+ &size.attr, \
+ &shared_cpu_map.attr, \
+ &shared_cpu_list.attr
static struct attribute *default_attrs[] = {
- &type.attr,
- &level.attr,
- &coherency_line_size.attr,
- &physical_line_partition.attr,
- &ways_of_associativity.attr,
- &number_of_sets.attr,
- &size.attr,
- &shared_cpu_map.attr,
- &shared_cpu_list.attr,
+ DEFAULT_SYSFS_CACHE_ATTRS,
+ NULL
+};
+
+static struct attribute *default_l3_attrs[] = {
+ DEFAULT_SYSFS_CACHE_ATTRS,
+#ifdef CONFIG_CPU_SUP_AMD
&cache_disable_0.attr,
&cache_disable_1.attr,
+#endif
NULL
};
@@ -908,6 +961,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct _index_kobject *this_object;
+ struct _cpuid4_info *this_leaf;
int retval;
retval = cpuid4_cache_sysfs_init(cpu);
@@ -926,6 +980,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object = INDEX_KOBJECT_PTR(cpu, i);
this_object->cpu = cpu;
this_object->index = i;
+
+ this_leaf = CPUID4_INFO_IDX(cpu, i);
+
+ if (this_leaf->can_disable)
+ ktype_cache.default_attrs = default_l3_attrs;
+ else
+ ktype_cache.default_attrs = default_attrs;
+
retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache,
per_cpu(ici_cache_kobject, cpu),
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 98819b32bb5f..c7ca8e26aa60 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -245,6 +245,97 @@ static u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
+static const u64 westmere_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
+ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
+ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
+ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
+ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
+ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
static __initconst u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -2118,6 +2209,7 @@ static __init int intel_pmu_init(void)
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
+
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
@@ -2129,7 +2221,9 @@ static __init int intel_pmu_init(void)
event_constraints = intel_core_event_constraints;
break;
default:
- case 26:
+ case 26: /* 45 nm nehalem, "Bloomfield" */
+ case 30: /* 45 nm nehalem, "Lynnfield" */
+ case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2142,6 +2236,14 @@ static __init int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
+
+ case 37: /* 32 nm nehalem, "Clarkdale" */
+ case 44: /* 32 nm nehalem, "Gulftown" */
+ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ pr_cont("Westmere events, ");
+ break;
}
return 0;
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a4849c10a77e..ebd4c51d096a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,7 +27,6 @@
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
-#include <asm/x86_init.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
-
-#ifdef CONFIG_X86_64
- x86_platform.iommu_shutdown();
-#endif
-
crash_save_cpu(regs, safe_smp_processor_id());
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ad80a1c718c6..773afc9274a1 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -399,9 +399,15 @@ static int hpet_next_event(unsigned long delta,
* then we might have a real hardware problem. We can not do
* much about it here, but at least alert the user/admin with
* a prominent warning.
+ * An erratum on some chipsets (ICH9,..), results in comparator read
+ * immediately following a write returning old value. Workaround
+ * for this is to read this value second time, when first
+ * read returns old value.
*/
- WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
+ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+ WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
KERN_WARNING "hpet: compare register read back failed.\n");
+ }
return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index bfba6019d762..b2258ca91003 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
* portion of kgdb because this operation requires mutexs to
* complete.
*/
+ hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)kgdb_arch_init;
- attr.type = PERF_TYPE_BREAKPOINT;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index a2c1edd2d3ac..e81030f71a8f 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
{
unsigned long size = get_mpc_size(mpf->physptr);
- reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc");
+ reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
}
static int __init smp_scan_config(unsigned long base, unsigned long length)
@@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf, (u64)virt_to_phys(mpf));
mem = virt_to_phys(mpf);
- reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf");
+ reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
if (mpf->physptr)
smp_reserve_memory(mpf);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 34de53b46f87..4f41b29fde98 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -564,6 +564,9 @@ static void enable_gart_translations(void)
enable_gart_translation(dev, __pa(agp_gatt_table));
}
+
+ /* Flush the GART-TLB to remove stale entries */
+ k8_flush_garts();
}
/*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 89a49fb46a27..28c3d814c092 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1502,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
for_each_sp(pages, sp, parents, i) {
kvm_mmu_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents);
+ zapped++;
}
- zapped += pages.nr;
kvm_mmu_pages_init(parent, &parents, &pages);
}
@@ -1554,14 +1554,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
*/
if (used_pages > kvm_nr_mmu_pages) {
- while (used_pages > kvm_nr_mmu_pages) {
+ while (used_pages > kvm_nr_mmu_pages &&
+ !list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ used_pages -= kvm_mmu_zap_page(kvm, page);
used_pages--;
}
+ kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
}
else
@@ -1608,7 +1610,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- kvm_mmu_zap_page(kvm, sp);
+ if (kvm_mmu_zap_page(kvm, sp))
+ nn = bucket->first;
}
}
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d9b33843c80..d42e191b17fa 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -698,29 +698,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
if (err)
goto free_svm;
+ err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- err = -ENOMEM;
+ if (!page)
goto uninit;
- }
- err = -ENOMEM;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
- goto uninit;
+ goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
- goto uninit;
-
- svm->msrpm = page_address(msrpm_pages);
- svm_vcpu_init_msrpm(svm->msrpm);
+ goto free_page2;
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
- goto uninit;
+ goto free_page3;
+
svm->nested.hsave = page_address(hsave_page);
+ svm->msrpm = page_address(msrpm_pages);
+ svm_vcpu_init_msrpm(svm->msrpm);
+
svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page);
@@ -737,6 +736,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
return &svm->vcpu;
+free_page3:
+ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+ __free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8a8e13965076..3acbe194e525 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -61,6 +61,8 @@ module_param_named(unrestricted_guest,
static int __read_mostly emulate_invalid_guest_state = 0;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -115,7 +117,7 @@ struct vcpu_vmx {
} host_state;
struct {
int vm86_active;
- u8 save_iopl;
+ ulong save_rflags;
struct kvm_save_segment {
u16 selector;
unsigned long base;
@@ -787,18 +789,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
- unsigned long rflags;
+ unsigned long rflags, save_rflags;
rflags = vmcs_readl(GUEST_RFLAGS);
- if (to_vmx(vcpu)->rmode.vm86_active)
- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ }
return rflags;
}
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- if (to_vmx(vcpu)->rmode.vm86_active)
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+ }
vmcs_writel(GUEST_RFLAGS, rflags);
}
@@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vmx->rmode.save_iopl
- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ vmx->rmode.save_rflags = flags;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
@@ -2719,6 +2725,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
kvm_queue_exception(vcpu, vec);
return 1;
case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject the exception
+ * from user space while in guest debugging mode.
+ */
+ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return 0;
/* fall through */
@@ -2841,6 +2853,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject #BP from
+ * user space while in guest debugging mode. Reading it for
+ * #DB as well causes no harm, it is not used in that case.
+ */
+ vmx->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d325c6345bab..55e20f8c518f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -384,21 +384,16 @@ out:
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (cr0 & CR0_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
- cr0, vcpu->arch.cr0);
kvm_inject_gp(vcpu, 0);
return;
}
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
kvm_inject_gp(vcpu, 0);
return;
}
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
- "and a clear PE flag\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
int cs_db, cs_l;
if (!is_pae(vcpu)) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while PAE is disabled\n");
kvm_inject_gp(vcpu, 0);
return;
}
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while CS.L == 1\n");
kvm_inject_gp(vcpu, 0);
return;
@@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE)) {
- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
- "in long mode\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (cr4 & X86_CR4_VMXE) {
- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_long_mode(vcpu)) {
if (cr3 & CR3_L_MODE_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS) {
- printk(KERN_DEBUG
- "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
kvm_inject_gp(vcpu, 0);
return;
}
@@ -595,15 +573,12 @@ static u32 emulated_msrs[] = {
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & efer_reserved_bits) {
- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
- efer);
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu)
&& (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
kvm_inject_gp(vcpu, 0);
return;
}
@@ -913,9 +886,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
- /* only 0 or all 1s can be written to IA32_MCi_CTL */
+ /* only 0 or all 1s can be written to IA32_MCi_CTL
+ * some Linux kernels though clear bit 10 in bank 4 to
+ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+ * this to avoid an uncatched #GP in the guest
+ */
if ((offset & 0x3) == 0 &&
- data != 0 && data != ~(u64)0)
+ data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
vcpu->arch.mce_banks[offset] = data;
break;
@@ -2366,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
int r;
- int n;
+ unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
@@ -2382,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
memslot = &kvm->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
@@ -4599,6 +4576,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
int ret = 0;
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+ u32 desc_limit;
old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
@@ -4621,7 +4599,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
}
- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+ desc_limit = get_desc_limit(&nseg_desc);
+ if (!nseg_desc.p ||
+ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+ desc_limit < 0x2b)) {
kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
return 1;
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index cffd754f3039..ddef409c2816 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
clean-files := inat-tables.c
-obj-$(CONFIG_SMP) += msr-smp.o
+obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o
lib-y += thunk_$(BITS).o
@@ -39,4 +39,5 @@ else
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
endif
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
new file mode 100644
index 000000000000..a3c668875038
--- /dev/null
+++ b/arch/x86/lib/cache-smp.c
@@ -0,0 +1,19 @@
+#include <linux/smp.h>
+#include <linux/module.h>
+
+static void __wbinvd(void *dummy)
+{
+ wbinvd();
+}
+
+void wbinvd_on_cpu(int cpu)
+{
+ smp_call_function_single(cpu, __wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL(wbinvd_on_cpu);
+
+int wbinvd_on_all_cpus(void)
+{
+ return on_each_cpu(__wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL(wbinvd_on_all_cpus);
diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
new file mode 100644
index 000000000000..15acecf0d7aa
--- /dev/null
+++ b/arch/x86/lib/rwsem_64.S
@@ -0,0 +1,81 @@
+/*
+ * x86-64 rwsem wrappers
+ *
+ * This interfaces the inline asm code to the slow-path
+ * C routines. We need to save the call-clobbered regs
+ * that the asm does not mark as clobbered, and move the
+ * argument from %rax to %rdi.
+ *
+ * NOTE! We don't need to save %rax, because the functions
+ * will always return the semaphore pointer in %rax (which
+ * is also the input argument to these helpers)
+ *
+ * The following can clobber %rdx because the asm clobbers it:
+ * call_rwsem_down_write_failed
+ * call_rwsem_wake
+ * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
+ */
+
+#include <linux/linkage.h>
+#include <asm/rwlock.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+#define save_common_regs \
+ pushq %rdi; \
+ pushq %rsi; \
+ pushq %rcx; \
+ pushq %r8; \
+ pushq %r9; \
+ pushq %r10; \
+ pushq %r11
+
+#define restore_common_regs \
+ popq %r11; \
+ popq %r10; \
+ popq %r9; \
+ popq %r8; \
+ popq %rcx; \
+ popq %rsi; \
+ popq %rdi
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_down_read_failed)
+ save_common_regs
+ pushq %rdx
+ movq %rax,%rdi
+ call rwsem_down_read_failed
+ popq %rdx
+ restore_common_regs
+ ret
+ ENDPROC(call_rwsem_down_read_failed)
+
+ENTRY(call_rwsem_down_write_failed)
+ save_common_regs
+ movq %rax,%rdi
+ call rwsem_down_write_failed
+ restore_common_regs
+ ret
+ ENDPROC(call_rwsem_down_write_failed)
+
+ENTRY(call_rwsem_wake)
+ decw %dx /* do nothing if still outstanding active readers */
+ jnz 1f
+ save_common_regs
+ movq %rax,%rdi
+ call rwsem_wake
+ restore_common_regs
+1: ret
+ ENDPROC(call_rwsem_wake)
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_downgrade_wake)
+ save_common_regs
+ pushq %rdx
+ movq %rax,%rdi
+ call rwsem_downgrade_wake
+ popq %rdx
+ restore_common_regs
+ ret
+ ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 0696d506c4ad..b02f6d8ac922 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
+ case PCI_DEVICE_ID_INTEL_CPT_LPC1:
+ case PCI_DEVICE_ID_INTEL_CPT_LPC2:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index b641388d8286..ad47daeafa4e 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
ret
ENTRY(restore_image)
+ movl mmu_cr4_features, %ecx
movl resume_pg_dir, %eax
subl $__PAGE_OFFSET, %eax
movl %eax, %cr3
+ jecxz 1f # cr4 Pentium and higher, skip if zero
+ andl $~(X86_CR4_PGE), %ecx
+ movl %ecx, %cr4; # turn off PGE
+ movl %cr3, %eax; # flush TLB
+ movl %eax, %cr3
+1:
movl restore_pblist, %edx
.p2align 4,,7
@@ -54,16 +61,8 @@ done:
movl $swapper_pg_dir, %eax
subl $__PAGE_OFFSET, %eax
movl %eax, %cr3
- /* Flush TLB, including "global" things (vmalloc) */
movl mmu_cr4_features, %ecx
jecxz 1f # cr4 Pentium and higher, skip if zero
- movl %ecx, %edx
- andl $~(X86_CR4_PGE), %edx
- movl %edx, %cr4; # turn off PGE
-1:
- movl %cr3, %eax; # flush TLB
- movl %eax, %cr3
- jecxz 1f # cr4 Pentium and higher, skip if zero
movl %ecx, %cr4; # turn PGE back on
1:
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 52fec07064f0..83b62521d8d3 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->field.region_obj);
+ /* allow full data read from EC address space */
+ if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_EC) {
+ if (obj_desc->common_field.bit_length > 8) {
+ unsigned width =
+ ACPI_ROUND_BITS_UP_TO_BYTES(
+ obj_desc->common_field.bit_length);
+ // access_bit_width is u8, don't overflow it
+ if (width > 8)
+ width = 8;
+ obj_desc->common_field.access_byte_width =
+ width;
+ obj_desc->common_field.access_bit_width =
+ 8 * width;
+ }
+ }
+
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e497639abfa7..e29382508718 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -601,12 +601,12 @@ static u32 acpi_ec_gpe_handler(void *data)
static acpi_status
acpi_ec_space_handler(u32 function, acpi_physical_address address,
- u32 bits, acpi_integer *value,
+ u32 bits, acpi_integer *value64,
void *handler_context, void *region_context)
{
struct acpi_ec *ec = handler_context;
- int result = 0, i;
- u8 temp = 0;
+ int result = 0, i, bytes = bits / 8;
+ u8 *value = (u8 *)value64;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
@@ -614,32 +614,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
- if (bits != 8 && acpi_strict)
- return AE_BAD_PARAMETER;
-
- if (EC_FLAGS_MSI)
+ if (EC_FLAGS_MSI || bits > 8)
acpi_ec_burst_enable(ec);
- if (function == ACPI_READ) {
- result = acpi_ec_read(ec, address, &temp);
- *value = temp;
- } else {
- temp = 0xff & (*value);
- result = acpi_ec_write(ec, address, temp);
- }
-
- for (i = 8; unlikely(bits - i > 0); i += 8) {
- ++address;
- if (function == ACPI_READ) {
- result = acpi_ec_read(ec, address, &temp);
- (*value) |= ((acpi_integer)temp) << i;
- } else {
- temp = 0xff & ((*value) >> i);
- result = acpi_ec_write(ec, address, temp);
- }
- }
+ for (i = 0; i < bytes; ++i, ++address, ++value)
+ result = (function == ACPI_READ) ?
+ acpi_ec_read(ec, address, value) :
+ acpi_ec_write(ec, address, *value);
- if (EC_FLAGS_MSI)
+ if (EC_FLAGS_MSI || bits > 8)
acpi_ec_burst_disable(ec);
switch (result) {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9e2feb6ce241..462200d93d71 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6f3f2257d0f0..b5f614b9c245 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (CPT) */
+ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6728328f3bea..2401c9cf31b7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
{ "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
+ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
+
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index be7c39552980..ad647502d35c 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -697,6 +697,7 @@ static const struct pci_device_id via[] = {
{ PCI_VDEVICE(VIA, 0x3164), },
{ PCI_VDEVICE(VIA, 0x5324), },
{ PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
+ { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
{ },
};
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3999a5f25f38..8a713f1e9653 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <asm/smp.h>
#include "agp.h"
/*
@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
intel_i830_fini_flush();
}
-static void
-do_wbinvd(void *null)
-{
- wbinvd();
-}
-
/* The chipset_flush interface needs to get data that has already been
* flushed out of the CPU all the way out to main memory, because the GPU
* doesn't snoop those buffers.
@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
memset(pg, 0, 1024);
- if (cpu_has_clflush) {
+ if (cpu_has_clflush)
clflush_cache_range(pg, 1024);
- } else {
- if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
- }
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
/* The intel i830 automatically initializes the agp aperture during POST.
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 64acd05f71c8..9abc3a19d53a 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -247,6 +247,7 @@ static const struct file_operations raw_fops = {
.aio_read = generic_file_aio_read,
.write = do_sync_write,
.aio_write = blkdev_aio_write,
+ .fsync = block_fsync,
.open = raw_open,
.release= raw_release,
.ioctl = raw_ioctl,
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index dcb9083ecde0..76253cf27028 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
list_del_init(&tty->tty_files);
file_list_unlock();
+ put_pid(tty->pgrp);
+ put_pid(tty->session);
free_tty_struct(tty);
}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a935fa..99907c32ea29 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
+ drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c97330412..bfd0e4acfee0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -85,6 +85,8 @@ static struct edid_quirk {
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
@@ -707,15 +709,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 08d14df3bb42..4804872f8b19 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock);
}
out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
+ if (!retcode) {
+ mutex_lock(&dev->struct_mutex);
+ if (minor->type == DRM_MINOR_LEGACY) {
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ else if (dev->dev_mapping != inode->i_mapping)
+ retcode = -ENODEV;
+ }
+ mutex_unlock(&dev->struct_mutex);
}
- mutex_unlock(&dev->struct_mutex);
return retcode;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 93031a75d112..1238bc981bb4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -899,6 +899,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d75788feac6c..b1f929d4e1aa 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -881,11 +881,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
@@ -895,11 +900,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a030b4d..5bdfaf2780d8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -364,11 +364,12 @@ void r300_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
/* FIXME: rv380 one pipes ? */
- if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
+ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
+ (rdev->family == CHIP_R350)) {
/* r300,r350 */
rdev->num_gb_pipes = 2;
} else {
- /* rv350,rv370,rv380 */
+ /* rv350,rv370,rv380,r300 AD */
rdev->num_gb_pipes = 1;
}
rdev->num_z_pipes = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e7b19440102e..81b832eec095 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -670,7 +670,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (p_dac->ps2_pdac_adj)
+ found = 1;
}
out:
@@ -812,7 +814,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
bg = RBIOS8(dac_info + 0x10) & 0xf;
dac = RBIOS8(dac_info + 0x11) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
} else if (rev > 1) {
bg = RBIOS8(dac_info + 0xc) & 0xf;
dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
@@ -825,7 +829,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
bg = RBIOS8(dac_info + 0xe) & 0xf;
dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
}
tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
}
@@ -842,7 +848,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
} else {
bg = RBIOS8(dac_info + 0x4) & 0xf;
dac = RBIOS8(dac_info + 0x5) & 0xf;
@@ -850,7 +858,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
}
} else {
DRM_INFO("No TV DAC info found in BIOS\n");
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f81942f399..2bdfbcd6d0e6 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
+ struct radeon_connector *radeon_conflict;
int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
if (conflict == connector)
continue;
+ radeon_conflict = to_radeon_connector(conflict);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (conflict->encoder_ids[i] == 0)
break;
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
if (conflict->status != connector_status_connected)
continue;
+ if (radeon_conflict->use_digital)
+ continue;
+
if (priority == true) {
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -315,7 +320,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
radeon_encoder = to_radeon_encoder(encoder);
if (!radeon_encoder->enc_priv)
return 0;
- if (rdev->is_atom_bios) {
+ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
struct radeon_encoder_atom_dac *dac_int;
dac_int = radeon_encoder->enc_priv;
dac_int->tv_std = val;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba31d31..f129bbb8bbc8 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
return -EBUSY;
}
-static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
+static void radeon_init_pipes(struct drm_device *dev)
{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
uint32_t gb_tile_config, gb_pipe_sel = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
} else {
/* R3xx */
- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
+ dev->pdev->device != 0x4144) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
dev_priv->num_gb_pipes = 2;
} else {
- /* R3Vxx */
+ /* RV3xx/R300 AD */
dev_priv->num_gb_pipes = 1;
}
}
@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
/* setup the raster pipes */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
- radeon_init_pipes(dev_priv);
+ radeon_init_pipes(dev);
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d085021c1f..9933c2c70648 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,11 +193,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
radeon_bo_list_unreserve(&parser->validated);
- for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj) {
- mutex_lock(&parser->rdev->ddev->struct_mutex);
- drm_gem_object_unreference(parser->relocs[i].gobj);
- mutex_unlock(&parser->rdev->ddev->struct_mutex);
+ if (parser->relocs != NULL) {
+ for (i = 0; i < parser->nrelocs; i++) {
+ if (parser->relocs[i].gobj) {
+ mutex_lock(&parser->rdev->ddev->struct_mutex);
+ drm_gem_object_unreference(parser->relocs[i].gobj);
+ mutex_unlock(&parser->rdev->ddev->struct_mutex);
+ }
}
}
kfree(parser->track);
@@ -246,7 +248,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
r = radeon_cs_parser_relocs(&parser);
if (r) {
- DRM_ERROR("Failed to parse relocation !\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f76ae34878d7..727a4622be93 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -655,6 +655,14 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
radeon_check_arguments(rdev);
+ /* all of the newer IGP chips have an internal gart
+ * However some rs4xx report as AGP, so remove that here.
+ */
+ if ((rdev->family >= CHIP_RS400) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ rdev->flags &= ~RADEON_IS_AGP;
+ }
+
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724457ca..7626bd501ffc 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1276,8 +1276,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 417684daef4c..f2ed27c8055b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
#define NTSC_TV_PLL_N_14 693
#define NTSC_TV_PLL_P_14 7
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
#define VERT_LEAD_IN_LINES 2
#define FRAC_BITS 0xe
#define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
630627, /* defRestart */
347, /* crtcPLL_N */
14, /* crtcPLL_M */
- 8, /* crtcPLL_postDiv */
+ 8, /* crtcPLL_postDiv */
1022, /* pixToTV */
},
+ { /* PAL timing for 14 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_PAL, /* standard */
+ 1131, /* horTotal */
+ 742, /* verTotal */
+ 813, /* horStart */
+ 840, /* horSyncStart */
+ 633, /* verSyncStart */
+ 708369, /* defRestart */
+ 211, /* crtcPLL_N */
+ 9, /* crtcPLL_M */
+ 8, /* crtcPLL_postDiv */
+ 759, /* pixToTV */
+ },
};
#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
if (pll->reference_freq == 2700)
const_ptr = &available_tv_modes[1];
else
- const_ptr = &available_tv_modes[1]; /* FIX ME */
+ const_ptr = &available_tv_modes[3];
}
return const_ptr;
}
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
n = PAL_TV_PLL_N_27;
p = PAL_TV_PLL_P_27;
} else {
- m = PAL_TV_PLL_M_27;
- n = PAL_TV_PLL_N_27;
- p = PAL_TV_PLL_P_27;
+ m = PAL_TV_PLL_M_14;
+ n = PAL_TV_PLL_N_14;
+ p = PAL_TV_PLL_P_14;
}
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c3818562a13e..a27c09f33f86 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
+ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index cab13e8c7d29..62416e6baeca 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static int gyration_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- struct input_dev *input = field->hidinput->input;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
+ return 0;
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(usage->hid & 0xff) == 0x82) {
+ struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, usage->code, 1);
input_sync(input);
input_event(input, usage->type, usage->code, 0);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 864a371f6eb9..fbc997ee67d9 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -302,13 +302,13 @@ error_ret:
**/
static inline int sht15_calc_temp(struct sht15_data *data)
{
- int d1 = 0;
+ int d1 = temppoints[0].d1;
int i;
- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+ for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
- d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+ d1 = (data->supply_uV - temppoints[i - 1].vdd)
* (temppoints[i].d1 - temppoints[i - 1].d1)
/ (temppoints[i].vdd - temppoints[i - 1].vdd)
+ temppoints[i - 1].d1;
@@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
/* If a regulator is available, query what the supply voltage actually is!*/
data->reg = regulator_get(data->dev, "vcc");
if (!IS_ERR(data->reg)) {
- data->supply_uV = regulator_get_voltage(data->reg);
+ int voltage;
+
+ voltage = regulator_get_voltage(data->reg);
+ if (voltage)
+ data->supply_uV = voltage;
+
regulator_enable(data->reg);
/* setup a notifier block to update this if another device
* causes the voltage to change */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5f318ce29770..cb9f95cd51b1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,7 +77,7 @@ config I2C_AMD8111
will be called i2c-amd8111.
config I2C_I801
- tristate "Intel 82801 (ICH)"
+ tristate "Intel 82801 (ICH/PCH)"
depends on PCI
help
If you say yes to this option, support will be included for the Intel
@@ -97,7 +97,8 @@ config I2C_I801
ICH9
Tolapai
ICH10
- PCH
+ 3400/5 Series (PCH)
+ Cougar Point (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5574be2ae6f9..e361da73b986 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
Tolapai 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- PCH 0x3b30 32 hard yes yes yes
+ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
{ 0, }
};
@@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
case PCI_DEVICE_ID_INTEL_ICH10_4:
case PCI_DEVICE_ID_INTEL_ICH10_5:
case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 30bdf427ee6d..f8302c267743 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
}
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index fbd3987af57f..e8d65b372034 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -161,7 +161,7 @@ int sparse_keymap_setup(struct input_dev *dev,
return 0;
err_out:
- kfree(keymap);
+ kfree(map);
return error;
}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 072f33b3b2b0..e53ddc5d0fa1 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -644,13 +644,15 @@ static int wacom_resume(struct usb_interface *intf)
int rv;
mutex_lock(&wacom->lock);
- if (wacom->open) {
+
+ /* switch to wacom mode first */
+ wacom_query_tablet_data(intf, features);
+
+ if (wacom->open)
rv = usb_submit_urb(wacom->irq, GFP_NOIO);
- /* switch to wacom mode if needed */
- if (!wacom_retrieve_hid_descriptor(intf, features))
- wacom_query_tablet_data(intf, features);
- } else
+ else
rv = 0;
+
mutex_unlock(&wacom->lock);
return rv;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e3cf5686d0aa..d7500e1c26f2 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -285,7 +285,8 @@ retry:
up_write(&_hash_lock);
}
-static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
+static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
+ const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
dm_table_put(table);
}
- dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
+ *flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(hc->md);
up_write(&_hash_lock);
@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
__hash_remove(hc);
up_write(&_hash_lock);
- dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
+ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
- param->data_size = 0;
return 0;
}
@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
return r;
param->data_size = 0;
- return dm_hash_rename(param->event_nr, param->name, new_name);
+
+ return dm_hash_rename(param->event_nr, &param->flags, param->name,
+ new_name);
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -899,8 +903,8 @@ static int do_resume(struct dm_ioctl *param)
if (dm_suspended_md(md)) {
r = dm_resume(md);
- if (!r)
- dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
+ if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
}
if (old_map)
@@ -1477,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
{
/* Always clear this flag */
param->flags &= ~DM_BUFFER_FULL_FLAG;
+ param->flags &= ~DM_UEVENT_GENERATED_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fa786b9d8ba7..fe8889e5e355 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2618,18 +2618,19 @@ out:
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
if (!cookie)
- kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+ return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+ action, envp);
}
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 8dadaa5bc396..bad1724d4869 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -125,8 +125,8 @@ void dm_stripe_exit(void);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie);
+int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie);
int dm_io_init(void);
void dm_io_exit(void);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 00435bd20699..001317b50034 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit max_phys_segments to 1 lying within
+ * a single page.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
conf->array_sectors += rdev->sectors;
cnt++;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 32a662fc55c9..f9ee99f9a94c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_phys_segments to one, lying
+ * within a single page.
* (Note: it is very unlikely that a device with
* merge_bvec_fn will be involved in multipath.)
*/
- if (q->merge_bvec_fn &&
- queue_max_sectors(q) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (q->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
conf->working_disks++;
mddev->degraded--;
@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 77605cdceaf1..41ee9deed250 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
disk_stack_limits(mddev->gendisk, rdev1->bdev,
rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit ->max_phys_segments to 1, lying within
+ * a single page.
*/
- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
-
+ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d119b7b75e71..047c468e3d28 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ /* as we don't honour merge_bvec_fn, we must
+ * never risk violating it, so limit
+ * ->max_phys_segments to one lying with a single
+ * page, as a one page request is never in
+ * violation.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
+ * violating it, so limit max_phys_segments to 1 lying
+ * within a single page.
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_phys_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
disk->head_position = 0;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57f149b75fbe..4d353d25d60b 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -660,6 +660,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
i = 0;
}
+ if (i == tx_ring->next_to_use)
+ break;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 67d414b061d7..3db85daa3671 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3255,8 +3255,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
if (max_frame != 16383)
- printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
- "May lead to frame reception errors!\n");
+ printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
+ "NIC may lead to frame reception errors!\n");
tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
}
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d924057..0387658d1e14 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -414,7 +414,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
spin_unlock_irqrestore(&aru->common.cmdlock, flags);
usb_fill_int_urb(urb, aru->udev,
- usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
+ usb_sndintpipe(aru->udev, AR9170_EP_CMD),
aru->common.cmdbuf, plen + 4,
ar9170_usb_tx_urb_complete, NULL, 1);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 33a10716af8b..7b1eab4d85cb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2721,8 +2721,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
all_wiphys_idle = ath9k_all_wiphys_idle(sc);
ath9k_set_wiphy_idle(aphy, idle);
- if (!idle && all_wiphys_idle)
- enable_radio = true;
+ enable_radio = (!idle && all_wiphys_idle);
/*
* After we unlock here its possible another wiphy
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced3..0a00d42642cd 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -78,11 +78,11 @@ config B43_SDIO
If unsure, say N.
-# Data transfers to the device via PIO
-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
+#Data transfers to the device via PIO. We want it as a fallback even
+# if we can do DMA.
config B43_PIO
bool
- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
+ depends on B43
select SSB_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542dc..5e83b6f0a3a0 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
b43-y += lo.o
b43-y += wa.o
b43-y += dma.o
-b43-$(CONFIG_B43_PIO) += pio.o
+b43-y += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index c484cc253892..7df822e84da0 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -694,6 +694,7 @@ struct b43_wldev {
bool radio_hw_enable; /* saved state of radio hardware enabled state */
bool qos_enabled; /* TRUE, if QoS is used. */
bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
+ bool use_pio; /* TRUE if next init should use PIO */
/* PHY/Radio device. */
struct b43_phy phy;
@@ -822,11 +823,9 @@ struct b43_wl {
/* The device LEDs. */
struct b43_leds leds;
-#ifdef CONFIG_B43_PIO
/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
u8 pio_tailspace[4] __attribute__((__aligned__(8)));
-#endif /* CONFIG_B43_PIO */
};
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -877,20 +876,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
{
-#ifdef CONFIG_B43_PIO
return dev->__using_pio_transfers;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_B43_FORCE_PIO
-# define B43_FORCE_PIO 1
+# define B43_PIO_DEFAULT 1
#else
-# define B43_FORCE_PIO 0
+# define B43_PIO_DEFAULT 0
#endif
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40a..615af22c49fd 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 629c166cc512..9eb4f5ead6ff 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+int b43_modparam_pio = B43_PIO_DEFAULT;
+module_param_named(pio, b43_modparam_pio, int, 0644);
+MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -1790,8 +1793,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[4], dma_reason[5]);
b43err(dev->wl, "This device does not support DMA "
"on your system. Please use PIO instead.\n");
- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
- "your kernel configuration.\n");
+ /* Fall back to PIO transfers if we get fatal DMA errors! */
+ dev->use_pio = 1;
+ b43_controller_restart(dev, "DMA error");
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -4358,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
(dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
- B43_FORCE_PIO) {
+ dev->use_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4826,6 +4830,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
if (!wldev)
goto out;
+ wldev->use_pio = b43_modparam_pio;
wldev->dev = dev;
wldev->wl = wl;
b43_set_status(wldev, B43_STAT_UNINIT);
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9ddad..7b3c42f93a16 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
#define B43_PIO_MAX_NR_TXPACKETS 32
-#ifdef CONFIG_B43_PIO
-
struct b43_pio_txpacket {
/* Pointer to the TX queue we belong to. */
struct b43_pio_txqueue *queue;
@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
void b43_pio_tx_suspend(struct b43_wldev *dev);
void b43_pio_tx_resume(struct b43_wldev *dev);
-
-#else /* CONFIG_B43_PIO */
-
-
-static inline int b43_pio_init(struct b43_wldev *dev)
-{
- return 0;
-}
-static inline void b43_pio_free(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_stop(struct b43_wldev *dev)
-{
-}
-static inline int b43_pio_tx(struct b43_wldev *dev,
- struct sk_buff *skb)
-{
- return 0;
-}
-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
- const struct b43_txstatus *status)
-{
-}
-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
-{
-}
-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
-{
-}
-
-#endif /* CONFIG_B43_PIO */
#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 31462813bac0..3b4c5a4610af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -581,6 +581,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
@@ -2008,7 +2010,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ if (qc)
+ iwl_free_tfds_in_queue(priv, sta_id,
+ tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -2035,13 +2039,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ else if (sta_id == IWL_INVALID_STATION)
+ IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
-
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index cffaae772d51..c610e5fbd718 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -657,6 +657,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
int ac = iwl5000_default_queue_to_tx_fifo[i];
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf815..5622a55a939e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2461,7 +2461,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS;
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index fa1c89ba6459..8f1b8509cd66 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -404,21 +404,6 @@ EXPORT_SYMBOL(iwl_init_scan_params);
static int iwl_scan_initiate(struct iwl_priv *priv)
{
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- return -EAGAIN;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
- return -EAGAIN;
- }
-
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->scan_start = jiffies;
@@ -449,6 +434,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
/* We don't schedule scan within next_scan_jiffies period.
* Avoid scanning during possible EAPOL exchange, return
* success immediately.
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f29786521bc3..adbb3ea4d865 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1926,7 +1926,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
{
int i;
- for (i = 0; i < IWL_RATE_COUNT; i++) {
+ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
rates[i].bitrate = iwl3945_rates[i].ieee * 5;
rates[i].hw_value = i; /* Rate scaling will work on indexes */
rates[i].hw_value_short = i;
@@ -3903,7 +3903,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8fb70082b45a..be792519adcd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2612,6 +2612,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return 0;
}
+/* Some architectures require additional programming to enable VGA */
+static arch_set_vga_state_t arch_set_vga_state;
+
+void __init pci_register_set_vga_state(arch_set_vga_state_t func)
+{
+ arch_set_vga_state = func; /* NULL disables */
+}
+
+static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ if (arch_set_vga_state)
+ return arch_set_vga_state(dev, decode, command_bits,
+ change_bridge);
+ return 0;
+}
+
/**
* pci_set_vga_state - set VGA decode state on device and parents if requested
* @dev: the PCI device
@@ -2625,9 +2642,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
+ int rc;
WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ /* ARCH specific VGA enables */
+ rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
+ if (rc)
+ return rc;
+
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (decode == true)
cmd |= command_bits;
@@ -2874,4 +2897,3 @@ EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
-
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c28a712fd4db..e6b67f2d3229 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3027,14 +3027,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
session->state = ISCSI_STATE_TERMINATE;
else if (conn->stop_stage != STOP_CONN_RECOVER)
session->state = ISCSI_STATE_IN_RECOVERY;
+
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
spin_unlock_bh(&session->lock);
del_timer_sync(&conn->transport_timer);
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
- old_stop_stage = conn->stop_stage;
- conn->stop_stage = flag;
conn->c_stage = ISCSI_CONN_STOPPED;
spin_unlock_bh(&session->lock);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d4eb98829e..db6b07136ea3 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
{
wb->use = 0;
acm->transmitting--;
+ usb_autopm_put_interface_async(acm->control);
}
/*
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
}
dbg("%s susp_count: %d", __func__, acm->susp_count);
+ usb_autopm_get_interface_async(acm->control);
if (acm->susp_count) {
- acm->delayed_wb = wb;
- schedule_work(&acm->waker);
+ if (!acm->delayed_wb)
+ acm->delayed_wb = wb;
+ else
+ usb_autopm_put_interface_async(acm->control);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
@@ -534,23 +538,6 @@ static void acm_softint(struct work_struct *work)
tty_kref_put(tty);
}
-static void acm_waker(struct work_struct *waker)
-{
- struct acm *acm = container_of(waker, struct acm, waker);
- int rv;
-
- rv = usb_autopm_get_interface(acm->control);
- if (rv < 0) {
- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
- return;
- }
- if (acm->delayed_wb) {
- acm_start_wb(acm, acm->delayed_wb);
- acm->delayed_wb = NULL;
- }
- usb_autopm_put_interface(acm->control);
-}
-
/*
* TTY handlers
*/
@@ -1178,7 +1165,6 @@ made_compressed_probe:
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
- INIT_WORK(&acm->waker, acm_waker);
init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
tasklet_enable(&acm->urb_task);
cancel_work_sync(&acm->work);
- cancel_work_sync(&acm->waker);
}
static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
+ struct acm_wb *wb;
int rv = 0;
int cnt;
@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
mutex_lock(&acm->mutex);
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
+
+ spin_lock_irq(&acm->write_lock);
+ if (acm->delayed_wb) {
+ wb = acm->delayed_wb;
+ acm->delayed_wb = NULL;
+ spin_unlock_irq(&acm->write_lock);
+ acm_start_wb(acm, wb);
+ } else {
+ spin_unlock_irq(&acm->write_lock);
+ }
+
+ /*
+ * delayed error checking because we must
+ * do the write path at all cost
+ */
if (rv < 0)
goto err_out;
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c4a0ee8ffccf..519eb638b6e9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@ struct acm {
struct mutex mutex;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
- struct work_struct waker;
wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 2e78b0784bdc..9804ee9a79d8 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id)
static const struct dmi_system_id __initdata mbp_device_table[] = {
{
.callback = mbp_dmi_match,
+ .ident = "MacBook 1,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 2,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 3,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 4,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBook 4,2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookPro 3,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index 4cd50497264d..3803745d6eee 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
static int __devinit e3d_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct device_node *of_node;
+ const char *device_type;
struct fb_info *info;
struct e3d_info *ep;
unsigned int line_length;
int err;
+ of_node = pci_device_to_OF_node(pdev);
+ if (!of_node) {
+ printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
+ pci_name(pdev));
+ return -ENODEV;
+ }
+
+ device_type = of_get_property(of_node, "device_type", NULL);
+ if (!device_type) {
+ printk(KERN_INFO "e3d: Ignoring secondary output device "
+ "at %s\n", pci_name(pdev));
+ return -ENODEV;
+ }
+
err = pci_enable_device(pdev);
if (err < 0) {
printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
@@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
ep->info = info;
ep->pdev = pdev;
spin_lock_init(&ep->lock);
- ep->of_node = pci_device_to_OF_node(pdev);
- if (!ep->of_node) {
- printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
- pci_name(pdev));
- err = -ENODEV;
- goto err_release_fb;
- }
+ ep->of_node = of_node;
/* Read the PCI base register of the frame buffer, which we
* need in order to interpret the RAMDAC_VID_*FB* values in
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index a6c5674c78e6..0b9190754e24 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -443,7 +443,7 @@ static void hpwdt_ping(void)
static int hpwdt_change_timer(int new_margin)
{
/* Arbitrary, can't find the card's limits */
- if (new_margin < 30 || new_margin > 600) {
+ if (new_margin < 5 || new_margin > 600) {
printk(KERN_WARNING
"hpwdt: New value passed in is invalid: %d seconds.\n",
new_margin);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 4bdb7f1a9077..e2ebe084986b 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -115,8 +115,37 @@ enum iTCO_chipsets {
TCO_3420, /* 3420 */
TCO_3450, /* 3450 */
TCO_EP80579, /* EP80579 */
- TCO_CPTD, /* CPT Desktop */
- TCO_CPTM, /* CPT Mobile */
+ TCO_CPT1, /* Cougar Point */
+ TCO_CPT2, /* Cougar Point Desktop */
+ TCO_CPT3, /* Cougar Point Mobile */
+ TCO_CPT4, /* Cougar Point */
+ TCO_CPT5, /* Cougar Point */
+ TCO_CPT6, /* Cougar Point */
+ TCO_CPT7, /* Cougar Point */
+ TCO_CPT8, /* Cougar Point */
+ TCO_CPT9, /* Cougar Point */
+ TCO_CPT10, /* Cougar Point */
+ TCO_CPT11, /* Cougar Point */
+ TCO_CPT12, /* Cougar Point */
+ TCO_CPT13, /* Cougar Point */
+ TCO_CPT14, /* Cougar Point */
+ TCO_CPT15, /* Cougar Point */
+ TCO_CPT16, /* Cougar Point */
+ TCO_CPT17, /* Cougar Point */
+ TCO_CPT18, /* Cougar Point */
+ TCO_CPT19, /* Cougar Point */
+ TCO_CPT20, /* Cougar Point */
+ TCO_CPT21, /* Cougar Point */
+ TCO_CPT22, /* Cougar Point */
+ TCO_CPT23, /* Cougar Point */
+ TCO_CPT24, /* Cougar Point */
+ TCO_CPT25, /* Cougar Point */
+ TCO_CPT26, /* Cougar Point */
+ TCO_CPT27, /* Cougar Point */
+ TCO_CPT28, /* Cougar Point */
+ TCO_CPT29, /* Cougar Point */
+ TCO_CPT30, /* Cougar Point */
+ TCO_CPT31, /* Cougar Point */
};
static struct {
@@ -173,8 +202,37 @@ static struct {
{"3420", 2},
{"3450", 2},
{"EP80579", 2},
- {"CPT Desktop", 2},
- {"CPT Mobile", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
+ {"Cougar Point", 2},
{NULL, 0}
};
@@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
{ ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
{ ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
{ ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
+ { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
+ { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
+ { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
+ { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
+ { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
+ { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
+ { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
+ { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
+ { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
+ { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
+ { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
+ { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
+ { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
+ { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
+ { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
+ { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
+ { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
+ { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
+ { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
+ { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
+ { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
+ { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
+ { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
+ { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
+ { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
+ { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
+ { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
+ { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
+ { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
+ { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
+ { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 74a0461a9ac0..92f9590429b2 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
/* No mandatory locks */
- if (__mandatory_lock(inode))
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d11d0289f3d2..8db62b2b6df8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
* NULL first argument is nfsd_sync_dir() and that's not a directory.
*/
-static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
struct block_device *bdev = I_BDEV(filp->f_mapping->host);
int error;
@@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
error = 0;
return error;
}
+EXPORT_SYMBOL(block_fsync);
/*
* pseudo-fs
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 63de4d6a8756..bc3114cb10de 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1430,6 +1430,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
__u32 bytes_sent;
__u16 byte_count;
+ *nbytes = 0;
+
/* cFYI(1, ("write at %lld %d bytes", offset, count));*/
if (tcon->ses == NULL)
return -ECONNABORTED;
@@ -1512,11 +1514,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
cifs_stats_inc(&tcon->num_writes);
if (rc) {
cFYI(1, ("Send error in write = %d", rc));
- *nbytes = 0;
} else {
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
+
+ /*
+ * Mask off high 16 bits when bytes written as returned by the
+ * server is greater than bytes requested by the client. Some
+ * OS/2 servers are known to set incorrect CountHigh values.
+ */
+ if (*nbytes > count)
+ *nbytes &= 0xFFFF;
}
cifs_buf_release(pSMB);
@@ -1605,6 +1614,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
+
+ /*
+ * Mask off high 16 bits when bytes written as returned by the
+ * server is greater than bytes requested by the client. OS/2
+ * servers are known to set incorrect CountHigh values.
+ */
+ if (*nbytes > count)
+ *nbytes &= 0xFFFF;
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 4a430ab4115c..23dc2af56dc6 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -647,38 +647,17 @@ out_lock:
return rc;
}
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+ size_t *bufsiz)
{
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
char *lower_buf;
- size_t lower_bufsiz;
- struct dentry *lower_dentry;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
- char *plaintext_name;
- size_t plaintext_name_size;
+ size_t lower_bufsiz = PATH_MAX;
mm_segment_t old_fs;
int rc;
- lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->readlink) {
- rc = -EINVAL;
- goto out;
- }
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- dentry->d_sb)->mount_crypt_stat;
- /*
- * If the lower filename is encrypted, it will result in a significantly
- * longer name. If needed, truncate the name after decode and decrypt.
- */
- if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
- lower_bufsiz = PATH_MAX;
- else
- lower_bufsiz = bufsiz;
- /* Released in this function */
lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
- if (lower_buf == NULL) {
- printk(KERN_ERR "%s: Out of memory whilst attempting to "
- "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
+ if (!lower_buf) {
rc = -ENOMEM;
goto out;
}
@@ -688,29 +667,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
(char __user *)lower_buf,
lower_bufsiz);
set_fs(old_fs);
- if (rc >= 0) {
- rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
- &plaintext_name_size,
- dentry, lower_buf,
- rc);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to decode and "
- "decrypt filename; rc = [%d]\n", __func__,
- rc);
- goto out_free_lower_buf;
- }
- /* Check for bufsiz <= 0 done in sys_readlinkat() */
- rc = copy_to_user(buf, plaintext_name,
- min((size_t) bufsiz, plaintext_name_size));
- if (rc)
- rc = -EFAULT;
- else
- rc = plaintext_name_size;
- kfree(plaintext_name);
- fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
- }
-out_free_lower_buf:
+ if (rc < 0)
+ goto out;
+ lower_bufsiz = rc;
+ rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
+ lower_buf, lower_bufsiz);
+out:
kfree(lower_buf);
+ return rc;
+}
+
+static int
+ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+ char *kbuf;
+ size_t kbufsiz, copied;
+ int rc;
+
+ rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
+ if (rc)
+ goto out;
+ copied = min_t(size_t, bufsiz, kbufsiz);
+ rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
+ kfree(kbuf);
+ fsstack_copy_attr_atime(dentry->d_inode,
+ ecryptfs_dentry_to_lower(dentry)->d_inode);
out:
return rc;
}
@@ -1015,6 +996,28 @@ out:
return rc;
}
+int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ int rc = 0;
+
+ mount_crypt_stat = &ecryptfs_superblock_to_private(
+ dentry->d_sb)->mount_crypt_stat;
+ generic_fillattr(dentry->d_inode, stat);
+ if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
+ char *target;
+ size_t targetsiz;
+
+ rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
+ if (!rc) {
+ kfree(target);
+ stat->size = targetsiz;
+ }
+ }
+ return rc;
+}
+
int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
@@ -1039,7 +1042,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->setxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1057,7 +1060,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
int rc = 0;
if (!lower_dentry->d_inode->i_op->getxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1084,7 +1087,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->listxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1101,7 +1104,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->removexattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1132,6 +1135,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
.put_link = ecryptfs_put_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
+ .getattr = ecryptfs_getattr_link,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index b15a43a80ab7..1a037f77aa52 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -85,7 +85,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
if (lower_dentry->d_inode) {
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
- d_drop(lower_dentry);
}
}
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 874d169a193e..602d5ad6f5e7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -139,8 +139,8 @@ typedef struct ext4_io_end {
struct inode *inode; /* file being written to */
unsigned int flag; /* unwritten or not */
int error; /* I/O error code */
- ext4_lblk_t offset; /* offset in the file */
- size_t size; /* size of the extent */
+ loff_t offset; /* offset in the file */
+ ssize_t size; /* size of the extent */
struct work_struct work; /* data work queue */
} ext4_io_end_t;
@@ -1744,7 +1744,7 @@ extern void ext4_ext_release(struct super_block *);
extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- loff_t len);
+ ssize_t len);
extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
sector_t block, unsigned int max_blocks,
struct buffer_head *bh, int flags);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 765a4826b118..c56877972b0e 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3603,7 +3603,7 @@ retry:
* Returns 0 on success.
*/
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
- loff_t len)
+ ssize_t len)
{
handle_t *handle;
ext4_lblk_t block;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 89dbb3874943..c16c3b92d8da 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3551,7 +3551,7 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
{
struct inode *inode = io->inode;
loff_t offset = io->offset;
- size_t size = io->size;
+ ssize_t size = io->size;
int ret = 0;
ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index f565f24019b5..72646e2c0f48 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
- unsigned char base[9], ext[4], buf[8], *p;
+ unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
return 0;
}
- i = jiffies & 0xffff;
+ i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
- sprintf(buf, "%04X", i);
+ snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ee77713ce68b..bd39abc51508 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1293,7 +1293,8 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
+ NFS_CAP_POSIX_LOCK;
server->options = data->options;
/* Get a client record */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8b5382efd454..af6948d6faf2 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
res = NULL;
goto out;
/* This turned out not to be a regular file */
+ case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
- /* case -EISDIR: */
/* case -EINVAL: */
default:
goto out;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 375f0fae2c6a..ecf660204755 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1520,6 +1520,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
+ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
+ server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
@@ -1660,7 +1662,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a8587e90fd5a..bbf72d8f9fc0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2121,9 +2121,15 @@ out_acl:
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
- exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
- err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
- exp->ex_path.mnt->mnt_mountpoint, &stat);
+ dentry == exp->ex_path.mnt->mnt_root) {
+ struct path path = exp->ex_path;
+ path_get(&path);
+ while (follow_up(&path)) {
+ if (path.dentry != path.mnt->mnt_root)
+ break;
+ }
+ err = vfs_getattr(path.mnt, path.dentry, &stat);
+ path_put(&path);
if (err)
goto out_nfserr;
}
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 0501974bedd0..8ccf0f8c9cc8 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -30,6 +30,8 @@
#include "alloc.h"
#include "dlmglue.h"
#include "file.h"
+#include "inode.h"
+#include "journal.h"
#include "ocfs2_fs.h"
#include "xattr.h"
@@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type)
}
/*
+ * Helper function to set i_mode in memory and disk. Some call paths
+ * will not have di_bh or a journal handle to pass, in which case it
+ * will create it's own.
+ */
+static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
+ handle_t *handle, umode_t new_mode)
+{
+ int ret, commit_handle = 0;
+ struct ocfs2_dinode *di;
+
+ if (di_bh == NULL) {
+ ret = ocfs2_read_inode_block(inode, &di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ } else
+ get_bh(di_bh);
+
+ if (handle == NULL) {
+ handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
+ OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_brelse;
+ }
+
+ commit_handle = 1;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ inode->i_mode = new_mode;
+ di->i_mode = cpu_to_le16(inode->i_mode);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ if (commit_handle)
+ ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out_brelse:
+ brelse(di_bh);
+out:
+ return ret;
+}
+
+/*
* Set the access or default ACL of an inode.
*/
static int ocfs2_set_acl(handle_t *handle,
@@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle,
if (ret < 0)
return ret;
else {
- inode->i_mode = mode;
if (ret == 0)
acl = NULL;
+
+ ret = ocfs2_acl_set_mode(inode, di_bh,
+ handle, mode);
+ if (ret)
+ return ret;
+
}
}
break;
@@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0;
+ mode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
@@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle,
if (IS_ERR(acl))
return PTR_ERR(acl);
}
- if (!acl)
- inode->i_mode &= ~current_umask();
+ if (!acl) {
+ mode = inode->i_mode & ~current_umask();
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret) {
+ mlog_errno(ret);
+ goto cleanup;
+ }
+ }
}
if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
struct posix_acl *clone;
- mode_t mode;
if (S_ISDIR(inode->i_mode)) {
ret = ocfs2_set_acl(handle, inode, di_bh,
@@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle,
mode = inode->i_mode;
ret = posix_acl_create_masq(clone, &mode);
if (ret >= 0) {
- inode->i_mode = mode;
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c30b644d9572..79b5dacf9312 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
#define do_error(fmt, ...) \
do{ \
- if (clean_error) \
+ if (resize) \
mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
else \
ocfs2_error(sb, fmt, ##__VA_ARGS__); \
@@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
static int ocfs2_validate_gd_self(struct super_block *sb,
struct buffer_head *bh,
- int clean_error)
+ int resize)
{
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb,
static int ocfs2_validate_gd_parent(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh,
- int clean_error)
+ int resize)
{
unsigned int max_bits;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -233,8 +233,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb,
return -EINVAL;
}
- if (le16_to_cpu(gd->bg_chain) >=
- le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
+ /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
+ if ((le16_to_cpu(gd->bg_chain) >
+ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
+ ((le16_to_cpu(gd->bg_chain) ==
+ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
do_error("Group descriptor #%llu has bad chain %u",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_chain));
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 58324c299165..3cd449d23352 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = {
unsigned long badness(struct task_struct *p, unsigned long uptime);
static int proc_oom_score(struct task_struct *task, char *buffer)
{
- unsigned long points;
+ unsigned long points = 0;
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
- points = badness(task->group_leader, uptime.tv_sec);
+ if (pid_alive(task))
+ points = badness(task, uptime.tv_sec);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 6e722c11ce13..6c9da00ddda2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2321,34 +2321,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
if (di->dqb_valid & QIF_SPACE) {
dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
check_blim = 1;
- __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+ set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
if (di->dqb_valid & QIF_BLIMITS) {
dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
check_blim = 1;
- __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+ set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
if (di->dqb_valid & QIF_INODES) {
dm->dqb_curinodes = di->dqb_curinodes;
check_ilim = 1;
- __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+ set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
if (di->dqb_valid & QIF_ILIMITS) {
dm->dqb_isoftlimit = di->dqb_isoftlimit;
dm->dqb_ihardlimit = di->dqb_ihardlimit;
check_ilim = 1;
- __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+ set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
if (di->dqb_valid & QIF_BTIME) {
dm->dqb_btime = di->dqb_btime;
check_blim = 1;
- __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+ set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
if (di->dqb_valid & QIF_ITIME) {
dm->dqb_itime = di->dqb_itime;
check_ilim = 1;
- __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+ set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
if (check_blim) {
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b4a7dd03bdb9..33bc410c6689 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
save_mount_options(s, data);
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
- if (!sbi) {
- errval = -ENOMEM;
- goto error_alloc;
- }
+ if (!sbi)
+ return -ENOMEM;
s->s_fs_info = sbi;
/* Set default values for options: non-aggressive tails, RO on errors */
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
return (0);
error:
- reiserfs_write_unlock(s);
-error_alloc:
if (jinit_done) { /* kill the commit thread, free journal ram */
journal_release_error(NULL, s);
}
+ reiserfs_write_unlock(s);
+
reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s))
brelse(SB_BUFFER_WITH_SB(s));
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 66abe36c1213..1c65a2b3f4ee 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -163,14 +163,17 @@ xfs_ioend_new_eof(
}
/*
- * Update on-disk file size now that data has been written to disk.
- * The current in-memory file size is i_size. If a write is beyond
- * eof i_new_size will be the intended file size until i_size is
- * updated. If this write does not extend all the way to the valid
- * file size then restrict this update to the end of the write.
+ * Update on-disk file size now that data has been written to disk. The
+ * current in-memory file size is i_size. If a write is beyond eof i_new_size
+ * will be the intended file size until i_size is updated. If this write does
+ * not extend all the way to the valid file size then restrict this update to
+ * the end of the write.
+ *
+ * This function does not block as blocking on the inode lock in IO completion
+ * can lead to IO completion order dependency deadlocks.. If it can't get the
+ * inode ilock it will return EAGAIN. Callers must handle this.
*/
-
-STATIC void
+STATIC int
xfs_setfilesize(
xfs_ioend_t *ioend)
{
@@ -181,9 +184,11 @@ xfs_setfilesize(
ASSERT(ioend->io_type != IOMAP_READ);
if (unlikely(ioend->io_error))
- return;
+ return 0;
+
+ if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
+ return EAGAIN;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
isize = xfs_ioend_new_eof(ioend);
if (isize) {
ip->i_d.di_size = isize;
@@ -191,6 +196,28 @@ xfs_setfilesize(
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return 0;
+}
+
+/*
+ * Schedule IO completion handling on a xfsdatad if this was
+ * the final hold on this ioend. If we are asked to wait,
+ * flush the workqueue.
+ */
+STATIC void
+xfs_finish_ioend(
+ xfs_ioend_t *ioend,
+ int wait)
+{
+ if (atomic_dec_and_test(&ioend->io_remaining)) {
+ struct workqueue_struct *wq;
+
+ wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
+ xfsconvertd_workqueue : xfsdatad_workqueue;
+ queue_work(wq, &ioend->io_work);
+ if (wait)
+ flush_workqueue(wq);
+ }
}
/*
@@ -198,11 +225,11 @@ xfs_setfilesize(
*/
STATIC void
xfs_end_io(
- struct work_struct *work)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ int error;
/*
* For unwritten extents we need to issue transactions to convert a
@@ -210,7 +237,6 @@ xfs_end_io(
*/
if (ioend->io_type == IOMAP_UNWRITTEN &&
likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
- int error;
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
@@ -222,30 +248,23 @@ xfs_end_io(
* We might have to update the on-disk file size after extending
* writes.
*/
- if (ioend->io_type != IOMAP_READ)
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
- */
-STATIC void
-xfs_finish_ioend(
- xfs_ioend_t *ioend,
- int wait)
-{
- if (atomic_dec_and_test(&ioend->io_remaining)) {
- struct workqueue_struct *wq;
-
- wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
- xfsconvertd_workqueue : xfsdatad_workqueue;
- queue_work(wq, &ioend->io_work);
- if (wait)
- flush_workqueue(wq);
+ if (ioend->io_type != IOMAP_READ) {
+ error = xfs_setfilesize(ioend);
+ ASSERT(!error || error == EAGAIN);
}
+
+ /*
+ * If we didn't complete processing of the ioend, requeue it to the
+ * tail of the workqueue for another attempt later. Otherwise destroy
+ * it.
+ */
+ if (error == EAGAIN) {
+ atomic_inc(&ioend->io_remaining);
+ xfs_finish_ioend(ioend, 0);
+ /* ensure we don't spin on blocked ioends */
+ delay(1);
+ } else
+ xfs_destroy_ioend(ioend);
}
/*
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 1f5e4bb5e970..6b6b39416ad3 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -613,7 +613,8 @@ xfssyncd(
set_freezable();
timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
for (;;) {
- timeleft = schedule_timeout_interruptible(timeleft);
+ if (list_empty(&mp->m_sync_list))
+ timeleft = schedule_timeout_interruptible(timeleft);
/* swsusp */
try_to_freeze();
if (kthread_should_stop() && list_empty(&mp->m_sync_list))
@@ -633,8 +634,7 @@ xfssyncd(
list_add_tail(&mp->m_sync_work.w_list,
&mp->m_sync_list);
}
- list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
- list_move(&work->w_list, &tmp);
+ list_splice_init(&mp->m_sync_list, &tmp);
spin_unlock(&mp->m_sync_lock);
list_for_each_entry_safe(work, n, &tmp, w_list) {
@@ -693,12 +693,12 @@ xfs_inode_set_reclaim_tag(
xfs_mount_t *mp = ip->i_mount;
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
- read_lock(&pag->pag_ici_lock);
+ write_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
__xfs_inode_set_reclaim_tag(pag, ip);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
- read_unlock(&pag->pag_ici_lock);
+ write_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
}
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 155e798f30a1..fd21160a6238 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -190,13 +190,12 @@ xfs_iget_cache_hit(
trace_xfs_iget_reclaim(ip);
/*
- * We need to set XFS_INEW atomically with clearing the
- * reclaimable tag so that we do have an indicator of the
- * inode still being initialized.
+ * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
+ * from stomping over us while we recycle the inode. We can't
+ * clear the radix tree reclaimable tag yet as it requires
+ * pag_ici_lock to be held exclusive.
*/
- ip->i_flags |= XFS_INEW;
- ip->i_flags &= ~XFS_IRECLAIMABLE;
- __xfs_inode_clear_reclaim_tag(mp, pag, ip);
+ ip->i_flags |= XFS_IRECLAIM;
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
@@ -216,7 +215,15 @@ xfs_iget_cache_hit(
trace_xfs_iget_reclaim(ip);
goto out_error;
}
+
+ write_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+ ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
+ ip->i_flags |= XFS_INEW;
+ __xfs_inode_clear_reclaim_tag(mp, pag, ip);
inode->i_state = I_NEW;
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e6f3b120f51a..0cbdccc4003d 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -6,6 +6,7 @@
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
@@ -375,6 +376,7 @@
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index aa95508d2f95..2c445e113790 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -266,9 +266,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 16
+#define DM_VERSION_MINOR 17
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
+#define DM_VERSION_EXTRA "-ioctl (2010-03-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -316,4 +316,9 @@ enum {
*/
#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+/*
+ * If set, a uevent was generated for which the caller may need to wait.
+ */
+#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 5a361f85cfec..da7e52b099f3 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
extern void cancel_freezing(struct task_struct *p);
#ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_frozen(struct task_struct *task);
+extern int cgroup_freezing_or_frozen(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_frozen(struct task_struct *task) { return 0; }
+static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+ return 0;
+}
#endif /* !CONFIG_CGROUP_FREEZER */
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 917508a27ac7..5191f49c2fec 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2214,6 +2214,7 @@ extern int generic_segment_checks(const struct iovec *iov,
/* fs/block_dev.c */
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
+extern int block_fsync(struct file *filp, struct dentry *dentry, int datasync);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index ece0b1c33816..e117b1aee69c 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -86,7 +86,8 @@ union { \
*/
#define INIT_KFIFO(name) \
name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
- sizeof(struct kfifo), name##kfifo_buffer)
+ sizeof(struct kfifo), \
+ name##kfifo_buffer + sizeof(struct kfifo))
/**
* DEFINE_KFIFO - macro to define and initialize a kfifo
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616d9373..1fe293e98a08 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
*/
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 6
+#define NR_IOBUS_DEVS 200
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
@@ -116,6 +116,11 @@ struct kvm_memory_slot {
int user_alloc;
};
+static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+{
+ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+}
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
diff --git a/include/linux/module.h b/include/linux/module.h
index 6cb1a3cab5d3..bd465d47216a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -457,7 +457,7 @@ void symbol_put_addr(void *addr);
static inline local_t *__module_ref_addr(struct module *mod, int cpu)
{
#ifdef CONFIG_SMP
- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
+ return (local_t *) per_cpu_ptr(mod->refptr, cpu);
#else
return &mod->ref;
#endif
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 34fc6be5bfcf..ebc48090c2a7 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -176,6 +176,7 @@ struct nfs_server {
#define NFS_CAP_ATIME (1U << 11)
#define NFS_CAP_CTIME (1U << 12)
#define NFS_CAP_MTIME (1U << 13)
+#define NFS_CAP_POSIX_LOCK (1U << 14)
/* maximum number of slots to use */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c1968f464c38..0afb5272d859 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -959,6 +959,11 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
#endif /* CONFIG_PCI_DOMAINS */
+/* some architectures require additional setup to direct VGA traffic */
+typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
+ unsigned int command_bits, bool change_bridge);
+extern void pci_register_set_vga_state(arch_set_vga_state_t func);
+
#else /* CONFIG_PCI is not enabled */
/*
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cca8a044e2b6..0be824320580 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2417,6 +2417,9 @@
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
+#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
+#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 59e9ef6aab40..eb3f34d57419 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -47,17 +47,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
struct freezer, css);
}
-int cgroup_frozen(struct task_struct *task)
+int cgroup_freezing_or_frozen(struct task_struct *task)
{
struct freezer *freezer;
enum freezer_state state;
task_lock(task);
freezer = task_freezer(task);
- state = freezer->state;
+ if (!freezer->css.cgroup->parent)
+ state = CGROUP_THAWED; /* root cgroup can't be frozen */
+ else
+ state = freezer->state;
task_unlock(task);
- return state == CGROUP_FROZEN;
+ return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
}
/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8f6284f9e620..b75d3d2d71f8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -850,6 +850,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (new->flags & IRQF_ONESHOT)
desc->status |= IRQ_ONESHOT;
+ /*
+ * Force MSI interrupts to run with interrupts
+ * disabled. The multi vector cards can cause stack
+ * overflows due to nested interrupts when enough of
+ * them are directed to a core and fire at the same
+ * time.
+ */
+ if (desc->msi_desc)
+ new->flags |= IRQF_DISABLED;
+
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1199bdaab194..8627512a2b8f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -600,9 +600,9 @@ static int static_obj(void *obj)
* percpu var?
*/
for_each_possible_cpu(i) {
- start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
- + per_cpu_offset(i);
+ start = (unsigned long) per_cpu_ptr(&__per_cpu_start, i);
+ end = (unsigned long) per_cpu_ptr(&__per_cpu_start, i)
+ + PERCPU_ENOUGH_ROOM;
if ((addr >= start) && (addr < end))
return 1;
diff --git a/kernel/module.c b/kernel/module.c
index f82386bd9ee9..5b6ce39e5f65 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -405,7 +405,7 @@ static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
int cpu;
for_each_possible_cpu(cpu)
- memcpy(pcpudest + per_cpu_offset(cpu), from, size);
+ memcpy(per_cpu_ptr(pcpudest, cpu), from, size);
}
#else /* ... !CONFIG_SMP */
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5ade1bdcf366..de5301557774 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -145,7 +145,7 @@ static void thaw_tasks(bool nosig_only)
if (nosig_only && should_send_signal(p))
continue;
- if (cgroup_frozen(p))
+ if (cgroup_freezing_or_frozen(p))
continue;
thaw_process(p);
diff --git a/kernel/sched.c b/kernel/sched.c
index e6b1ac499313..21c1cf2e27aa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7050,7 +7050,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
int ret;
cpumask_var_t mask;
- if (len < cpumask_size())
+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -7058,10 +7060,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
- if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
+ size_t retlen = min_t(size_t, len, cpumask_size());
+
+ if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
- ret = cpumask_size();
+ ret = retlen;
}
free_cpumask_var(mask);
diff --git a/mm/readahead.c b/mm/readahead.c
index 337b20e946f6..fe1a069fb595 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -502,7 +502,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
return;
/* be dumb */
- if (filp->f_mode & FMODE_RANDOM) {
+ if (filp && (filp->f_mode & FMODE_RANDOM)) {
force_page_cache_readahead(mapping, filp, offset, req_size);
return;
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a4331429598..ba1fadb01192 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_ACTION:
- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
- return RX_DROP_MONITOR;
- /* fall through */
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
skb_queue_tail(&ifmsh->skb_queue, skb);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f81..4eed81bde1a6 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (SN_GT(mpath->sn, orig_sn) ||
(mpath->sn == orig_sn &&
action == MPATH_PREQ &&
- new_metric > mpath->metric)) {
+ new_metric >= mpath->metric)) {
process = false;
fresh_info = false;
}
@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
cpu_to_le32(orig_sn), 0, target_addr,
- cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
+ cpu_to_le32(target_sn), next_hop, hopcount,
ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
rcu_read_unlock();
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3e1ea43d5be6..a71c32c2778c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2355,6 +2355,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
/* should never get here */
WARN_ON(1);
break;
+ case MESH_PLINK_CATEGORY:
+ case MESH_PATH_SEL_CATEGORY:
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+ break;
}
return 1;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 70c79c3013fa..1fdc0a562b47 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1945,6 +1945,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_sub_if_data *sdata;
unsigned long flags;
int i;
bool txok;
@@ -1983,6 +1984,11 @@ void ieee80211_tx_pending(unsigned long data)
if (!txok)
break;
}
+
+ if (skb_queue_empty(&local->pending[i]))
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(sdata->dev, i));
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3848140313f5..27212e8fcaf4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
/* someone still has this queue stopped */
return;
- if (!skb_queue_empty(&local->pending[queue]))
+ if (skb_queue_empty(&local->pending[queue])) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
+ } else
tasklet_schedule(&local->tx_pending_tasklet);
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
- rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -1145,6 +1145,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ rcu_read_lock();
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ ieee80211_sta_tear_down_BA_sessions(sta);
+ }
+ }
+ rcu_read_unlock();
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (netif_running(sdata->dev))
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 9ace8eb0207c..062a8b051b83 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -125,6 +125,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
"{Intel, ICH9},"
"{Intel, ICH10},"
"{Intel, PCH},"
+ "{Intel, CPT},"
"{Intel, SCH},"
"{ATI, SB450},"
"{ATI, SB600},"
@@ -449,6 +450,7 @@ struct azx {
/* driver types */
enum {
AZX_DRIVER_ICH,
+ AZX_DRIVER_PCH,
AZX_DRIVER_SCH,
AZX_DRIVER_ATI,
AZX_DRIVER_ATIHDMI,
@@ -463,6 +465,7 @@ enum {
static char *driver_short_names[] __devinitdata = {
[AZX_DRIVER_ICH] = "HDA Intel",
+ [AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
[AZX_DRIVER_ATI] = "HDA ATI SB",
[AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
@@ -1065,6 +1068,7 @@ static void azx_init_pci(struct azx *chip)
0x01, NVIDIA_HDA_ENABLE_COHBIT);
break;
case AZX_DRIVER_SCH:
+ case AZX_DRIVER_PCH:
pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -2268,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
{}
};
@@ -2357,6 +2362,8 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
+ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
{}
};
@@ -2431,6 +2438,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
if (bdl_pos_adj[dev] < 0) {
switch (chip->driver_type) {
case AZX_DRIVER_ICH:
+ case AZX_DRIVER_PCH:
bdl_pos_adj[dev] = 1;
break;
default:
@@ -2709,6 +2717,8 @@ static struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
/* PCH */
{ PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
+ /* CPT */
+ { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
/* ATI SB 450/600 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 706944126d11..263bf3bd4793 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1805,6 +1805,14 @@ static int patch_ad1981(struct hda_codec *codec)
case AD1981_THINKPAD:
spec->mixers[0] = ad1981_thinkpad_mixers;
spec->input_mux = &ad1981_thinkpad_capture_source;
+ /* set the upper-limit for mixer amp to 0dB for avoiding the
+ * possible damage by overloading
+ */
+ snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
+ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
+ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+ (1 << AC_AMPCAP_MUTE_SHIFT));
break;
case AD1981_TOSHIBA:
spec->mixers[0] = ad1981_hp_mixers;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a79f84119967..bd8a567c9367 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9074,6 +9074,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index a83d1968a845..32f98535c0b4 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1161,13 +1161,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private
unsigned long count, unsigned long pos)
{
struct mixart_mgr *mgr = entry->private_data;
+ unsigned long maxsize;
- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
- if(count <= 0)
+ if (pos >= MIXART_BA0_SIZE)
return 0;
- if(pos + count > MIXART_BA0_SIZE)
- count = (long)(MIXART_BA0_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
+ maxsize = MIXART_BA0_SIZE - pos;
+ if (count > maxsize)
+ count = maxsize;
+ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
+ if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
return -EFAULT;
return count;
}
@@ -1180,13 +1182,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private
unsigned long count, unsigned long pos)
{
struct mixart_mgr *mgr = entry->private_data;
+ unsigned long maxsize;
- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
- if(count <= 0)
+ if (pos > MIXART_BA1_SIZE)
return 0;
- if(pos + count > MIXART_BA1_SIZE)
- count = (long)(MIXART_BA1_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
+ maxsize = MIXART_BA1_SIZE - pos;
+ if (count > maxsize)
+ count = maxsize;
+ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
+ if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
return -EFAULT;
return count;
}
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index b2da478a0fae..c7cb207963f5 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -984,6 +984,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
DEFINE_WAIT(wait);
long timeout = msecs_to_jiffies(50);
+ if (ep->umidi->disconnected)
+ return;
/*
* The substream buffer is empty, but some data might still be in the
* currently active URBs, so we have to wait for those to complete.
@@ -1121,14 +1123,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
* Frees an output endpoint.
* May be called when ep hasn't been initialized completely.
*/
-static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
{
unsigned int i;
for (i = 0; i < OUTPUT_URBS; ++i)
- if (ep->urbs[i].urb)
+ if (ep->urbs[i].urb) {
free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
ep->max_transfer);
+ ep->urbs[i].urb = NULL;
+ }
+}
+
+static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
+{
+ snd_usbmidi_out_endpoint_clear(ep);
kfree(ep);
}
@@ -1260,15 +1269,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
usb_kill_urb(ep->out->urbs[j].urb);
if (umidi->usb_protocol_ops->finish_out_endpoint)
umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
+ ep->out->active_urbs = 0;
+ if (ep->out->drain_urbs) {
+ ep->out->drain_urbs = 0;
+ wake_up(&ep->out->drain_wait);
+ }
}
if (ep->in)
for (j = 0; j < INPUT_URBS; ++j)
usb_kill_urb(ep->in->urbs[j]);
/* free endpoints here; later call can result in Oops */
- if (ep->out) {
- snd_usbmidi_out_endpoint_delete(ep->out);
- ep->out = NULL;
- }
+ if (ep->out)
+ snd_usbmidi_out_endpoint_clear(ep->out);
if (ep->in) {
snd_usbmidi_in_endpoint_delete(ep->in);
ep->in = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6d0e484b40f3..bb06fca047f2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -638,7 +638,7 @@ skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
+ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = vmalloc(dirty_bytes);
if (!new.dirty_bitmap)
@@ -721,7 +721,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
{
struct kvm_memory_slot *memslot;
int r, i;
- int n;
+ unsigned long n;
unsigned long any = 0;
r = -EINVAL;
@@ -733,7 +733,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
@@ -1075,10 +1075,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
+ unsigned long *p = memslot->dirty_bitmap +
+ rel_gfn / BITS_PER_LONG;
+ int offset = rel_gfn % BITS_PER_LONG;
/* avoid RMW */
- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
+ if (!generic_test_le_bit(offset, p))
+ generic___set_le_bit(offset, p);
}
}