diff options
author | Tejun Heo <tj@kernel.org> | 2014-02-10 19:34:30 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-02-10 19:34:30 -0500 |
commit | a8fa94e0f2ab665f9aa665195618178b61ed8304 (patch) | |
tree | 4060502fffd7f56d379372d517f8dcc31bf63955 | |
parent | 9561a8961c708ff6ba3e71a817af0f16bdc1d885 (diff) | |
parent | b28a960c42fcd9cfc987441fa6d1c1a471f0f9ed (diff) | |
download | op-kernel-dev-a8fa94e0f2ab665f9aa665195618178b61ed8304.zip op-kernel-dev-a8fa94e0f2ab665f9aa665195618178b61ed8304.tar.gz |
Merge branch 'master' into driver-core-next-test-merge-rc2
da9846ae1518 ("kernfs: make kernfs_deactivate() honor KERNFS_LOCKDEP
flag") in driver-core-linus conflicts with kernfs_drain() updates in
driver-core-next. The former just adds the missing KERNFS_LOCKDEP
checks which are already handled by kernfs_lockdep() checks in
driver-core-next. The conflict can be resolved by taking code from
driver-core-next.
Conflicts:
fs/kernfs/dir.c
203 files changed, 4354 insertions, 972 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt new file mode 100644 index 0000000..aee38e7 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt @@ -0,0 +1,18 @@ +TI-NSPIRE interrupt controller + +Required properties: +- compatible: Compatible property value should be "lsi,zevio-intc". + +- reg: Physical base address of the controller and length of memory mapped + region. + +- interrupt-controller : Identifies the node as an interrupt controller + +Example: + +interrupt-controller { + compatible = "lsi,zevio-intc"; + interrupt-controller; + reg = <0xDC000000 0x1000>; + #interrupt-cells = <1>; +}; diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt index 47c3009..731a009 100644 --- a/Documentation/dvb/contributors.txt +++ b/Documentation/dvb/contributors.txt @@ -78,7 +78,7 @@ Peter Beutner <p.beutner@gmx.net> Wilson Michaels <wilsonmichaels@earthlink.net> for the lgdt330x frontend driver, and various bugfixes -Michael Krufky <mkrufky@m1k.net> +Michael Krufky <mkrufky@linuxtv.org> for maintaining v4l/dvb inter-tree dependencies Taylor Jacob <rtjacob@earthlink.net> diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 8f441da..7116fda 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1726,16 +1726,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted. option description. memmap=nn[KMG]@ss[KMG] - [KNL] Force usage of a specific region of memory - Region of memory to be used, from ss to ss+nn. + [KNL] Force usage of a specific region of memory. + Region of memory to be used is from ss to ss+nn. memmap=nn[KMG]#ss[KMG] [KNL,ACPI] Mark specific memory as ACPI data. - Region of memory to be used, from ss to ss+nn. + Region of memory to be marked is from ss to ss+nn. memmap=nn[KMG]$ss[KMG] [KNL,ACPI] Mark specific memory as reserved. - Region of memory to be used, from ss to ss+nn. + Region of memory to be reserved is from ss to ss+nn. Example: Exclude memory from 0x18690000-0x1869ffff memmap=64K$0x18690000 or @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Shuffling Zombie Juror # *DOCUMENTATION* diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dd4327f..27bbcfc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -36,6 +36,7 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK + select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 84139be..7959dd0 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y @@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_XGENE=y CONFIG_SMP=y CONFIG_PREEMPT=y +CONFIG_CMA=y CONFIG_CMDLINE="console=ttyAMA0" # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y @@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y -CONFIG_BLK_DEV=y +CONFIG_DMA_CMA=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y -CONFIG_MII=y CONFIG_SMC91X=y +CONFIG_SMSC911X=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set @@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_FB=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_USB_SUPPORT is not set +CONFIG_USB=y +CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 01de5aa..0237f08 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_sub_return(int i, atomic_t *v) @@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long tmp; int oldval; + smp_mb(); + asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w4, %2\n" +" stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_add_return(long i, atomic64_t *v) @@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " add %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_sub_return(long i, atomic64_t *v) @@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " sub %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) long oldval; unsigned long res; + smp_mb(); + asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, %2\n" +"1: ldxr %1, %2\n" " cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %4, %2\n" +" stxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" +" dmb ish\n" "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 78e20ba..409ca37 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -25,7 +25,7 @@ #define wfi() asm volatile("wfi" : : : "memory") #define isb() asm volatile("isb" : : : "memory") -#define dsb() asm volatile("dsb sy" : : : "memory") +#define dsb(opt) asm volatile("dsb sy" : : : "memory") #define mb() dsb() #define rmb() asm volatile("dsb ld" : : : "memory") diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index fea9ee3..88932498 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *); static inline void __flush_icache_all(void) { asm("ic ialluis"); + dsb(); } #define flush_dcache_mmap_lock(mapping) \ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 56166d7..57c0fa7 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, %2\n" + "1: ldxrb %w0, %2\n" " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, %2\n" + "1: ldxrh %w0, %2\n" " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, %2\n" + "1: ldxr %w0, %2\n" " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, %2\n" + "1: ldxr %0, %2\n" " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; default: BUILD_BUG(); } + smp_mb(); return ret; } diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 7883412..c4a7f94 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -42,7 +42,7 @@ #define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_EL1_EC_FP_EXC64 (0x2C) -#define ESR_EL1_EC_SERRROR (0x2F) +#define ESR_EL1_EC_SERROR (0x2F) #define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_EL1_EC_SOFTSTP_EL0 (0x32) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 78cc3ab..5f750dc 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -24,10 +24,11 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ asm volatile( \ -"1: ldaxr %w1, %2\n" \ +"1: ldxr %w1, %2\n" \ insn "\n" \ "2: stlxr %w3, %w0, %2\n" \ " cbnz %w3, 1b\n" \ +" dmb ish\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -40,7 +41,7 @@ " .popsection\n" \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "memory") static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) @@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" " cbnz %w3, 3f\n" "2: stlxr %w3, %w5, %2\n" " cbnz %w3, 1b\n" +" dmb ish\n" "3:\n" " .pushsection .fixup,\"ax\"\n" "4: mov %w0, %w6\n" @@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .popsection\n" : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "r" (oldval), "r" (newval), "Ir" (-EFAULT) - : "cc", "memory"); + : "memory"); *uval = val; return ret; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index c98ef47..0eb3986 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -231,7 +231,7 @@ #define ESR_EL2_EC_SP_ALIGN (0x26) #define ESR_EL2_EC_FP_EXC32 (0x28) #define ESR_EL2_EC_FP_EXC64 (0x2C) -#define ESR_EL2_EC_SERRROR (0x2F) +#define ESR_EL2_EC_SERROR (0x2F) #define ESR_EL2_EC_BREAKPT (0x30) #define ESR_EL2_EC_BREAKPT_HYP (0x31) #define ESR_EL2_EC_SOFTSTP (0x32) diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 3d5cf06..c45b7b1 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " cbnz %w0, 2b\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); return !tmp; } @@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " cbnz %w1, 2b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) @@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " cbnz %w1, 1b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline int arch_read_trylock(arch_rwlock_t *rw) @@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 58125bf..bb8eb8a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg) __SYSCALL(375, sys_setns) __SYSCALL(376, compat_sys_process_vm_readv) __SYSCALL(377, compat_sys_process_vm_writev) -__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ +__SYSCALL(378, sys_kcmp) +__SYSCALL(379, sys_finit_module) +__SYSCALL(380, sys_sched_setattr) +__SYSCALL(381, sys_sched_getattr) #define __NR_compat_syscalls 379 diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 63c48ff..7787208 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S @@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60 .inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c160d0 // ldrd r6, r7, [r1] - .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] + .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] .inst 0xe0303004 // eors r3, r0, r4 .inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffff9 // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe12fff1e // bx lr @@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0 .align 5 __kuser_cmpxchg: // 0xffff0fc0 - .inst 0xe1923e9f // 1: ldaex r3, [r2] + .inst 0xe1923f9f // 1: ldrex r3, [r2] .inst 0xe0533000 // subs r3, r3, r0 .inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffffa // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe12fff1e // bx lr diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 65d40cf..a7149ca 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->use_syscall = use_syscall; vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { vdso_data->cs_cycle_last = tk->clock->cycle_last; @@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->cs_mult = tk->mult; vdso_data->cs_shift = tk->shift; - vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; - vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; } smp_wmb(); diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index d8064af..6d20b7d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S # Actual build commands quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index f0a6d10..fe652ff 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime) bl __do_get_tspec seqcnt_check w9, 1b + mov x30, x2 + cmp w0, #CLOCK_MONOTONIC b.ne 6f @@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime) ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne b.ne 8f + /* xtime_coarse_nsec is already right-shifted */ + mov x12, #0 + /* Get coarse timespec. */ adr vdso_data, _vdso_data 3: seqcnt_acquire @@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime) lsr x11, x11, x12 stp x10, x11, [x1, #TSPEC_TV_SEC] mov x0, xzr - ret x2 + ret 7: mov x30, x2 8: /* Syscall fallback. */ diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S index e5db797..7dac371 100644 --- a/arch/arm64/lib/bitops.S +++ b/arch/arm64/lib/bitops.S @@ -46,11 +46,12 @@ ENTRY( \name ) mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset lsl x4, x2, x3 // Create mask -1: ldaxr x2, [x1] +1: ldxr x2, [x1] lsr x0, x2, x3 // Save old value of bit \instr x2, x2, x4 // toggle bit stlxr w5, x2, [x1] cbnz w5, 1b + dmb ish and x0, x0, #1 3: ret ENDPROC(\name ) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 45b5ab5..fbd7678 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_DMA_CMA)) { struct page *page; + size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (!page) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f557ebb..f8dc7e8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, do { next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0) + if (((addr | next | phys) & ~SECTION_MASK) == 0) { + pmd_t old_pmd =*pmd; set_pmd(pmd, __pmd(phys | prot_sect_kernel)); - else + /* + * Check for previous table entries created during + * boot (__create_page_tables) and flush them. + */ + if (!pmd_none(old_pmd)) + flush_tlb_all(); + } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); + } phys += next - addr; } while (pmd++, addr = next, addr != end); } diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 7083cda..62c6101 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -32,17 +32,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *new_pgd; - if (PGD_SIZE == PAGE_SIZE) - new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); + return (pgd_t *)get_zeroed_page(GFP_KERNEL); else - new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); - - if (!new_pgd) - return NULL; - - return new_pgd; + return kzalloc(PGD_SIZE, GFP_KERNEL); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index afd45e0..ae763d8b 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 312 /* length of syscall table */ +#define NR_syscalls 314 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 34fd6fe..715e85f 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -325,5 +325,7 @@ #define __NR_process_vm_writev 1333 #define __NR_accept4 1334 #define __NR_finit_module 1335 +#define __NR_sched_setattr 1336 +#define __NR_sched_getattr 1337 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ddea607f..fa8d61a 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1773,6 +1773,8 @@ sys_call_table: data8 sys_process_vm_writev data8 sys_accept4 data8 sys_finit_module // 1335 + data8 sys_sched_setattr + data8 sys_sched_getattr .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index 11f3ad2..5483906 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -534,13 +534,10 @@ static int __init db1000_dev_init(void) s0 = AU1100_GPIO1_INT; s1 = AU1100_GPIO4_INT; + gpio_request(19, "sd0_cd"); + gpio_request(20, "sd1_cd"); gpio_direction_input(19); /* sd0 cd# */ gpio_direction_input(20); /* sd1 cd# */ - gpio_direction_input(21); /* touch pendown# */ - gpio_direction_input(207); /* SPI MISO */ - gpio_direction_output(208, 0); /* SPI MOSI */ - gpio_direction_output(209, 1); /* SPI SCK */ - gpio_direction_output(210, 1); /* SPI CS# */ /* spi_gpio on SSI0 pins */ pfc = __raw_readl((void __iomem *)SYS_PINFUNC); diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index cfe092f..6b97495 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode) default: BUG(); } + + return SIGFPE; } #define __disable_fpu() \ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 1dee279..d6e154a 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -369,16 +369,18 @@ #define __NR_process_vm_writev (__NR_Linux + 346) #define __NR_kcmp (__NR_Linux + 347) #define __NR_finit_module (__NR_Linux + 348) +#define __NR_sched_setattr (__NR_Linux + 349) +#define __NR_sched_getattr (__NR_Linux + 350) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 348 +#define __NR_Linux_syscalls 350 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 348 +#define __NR_O32_Linux_syscalls 350 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -695,16 +697,18 @@ #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) #define __NR_getdents64 (__NR_Linux + 308) +#define __NR_sched_setattr (__NR_Linux + 309) +#define __NR_sched_getattr (__NR_Linux + 310) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 308 +#define __NR_Linux_syscalls 310 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 308 +#define __NR_64_Linux_syscalls 310 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1025,15 +1029,17 @@ #define __NR_process_vm_writev (__NR_Linux + 310) #define __NR_kcmp (__NR_Linux + 311) #define __NR_finit_module (__NR_Linux + 312) +#define __NR_sched_setattr (__NR_Linux + 313) +#define __NR_sched_getattr (__NR_Linux + 314) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 312 +#define __NR_Linux_syscalls 314 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 312 +#define __NR_N32_Linux_syscalls 314 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index e8e541b..a5b14f4 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -563,3 +563,5 @@ EXPORT(sys_call_table) PTR sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 57e3742..b56e254 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -425,4 +425,6 @@ EXPORT(sys_call_table) PTR sys_kcmp PTR sys_finit_module PTR sys_getdents64 + PTR sys_sched_setattr + PTR sys_sched_getattr /* 5310 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 2f48f59..f7e5b72 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -418,4 +418,6 @@ EXPORT(sysn32_call_table) PTR compat_sys_process_vm_writev /* 6310 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f1acdb4..6788727d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -541,4 +541,6 @@ EXPORT(sys32_call_table) PTR compat_sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ .size sys32_call_table,.-sys32_call_table diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c index 88d0962..2bedafe 100644 --- a/arch/parisc/hpux/fs.c +++ b/arch/parisc/hpux/fs.c @@ -33,22 +33,9 @@ int hpux_execve(struct pt_regs *regs) { - int error; - struct filename *filename; - - filename = getname((const char __user *) regs->gr[26]); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - - error = do_execve(filename->name, + return do_execve(getname((const char __user *) regs->gr[26]), (const char __user *const __user *) regs->gr[25], (const char __user *const __user *) regs->gr[24]); - - putname(filename); - -out: - return error; } struct hpux_dirent { diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index b3feabd..cf3c008 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -25,6 +25,7 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/spinlock.h> #include "crypt_s390.h" #define AES_KEYLEN_128 1 @@ -32,6 +33,7 @@ #define AES_KEYLEN_256 4 static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); static char keylen_flag; struct s390_aes_ctx { @@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(ctrptr + i, AES_BLOCK_SIZE); + } + return n; +} + static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[AES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; if (!walk->nbytes) return ret; - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= AES_BLOCK_SIZE) { - /* only use complete blocks, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(ctrblk + i, AES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = AES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, sctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > AES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrptr, AES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, sctx->key, buf, in, - AES_BLOCK_SIZE, ctrblk); + AES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != AES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrbuf, AES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); + return ret; } diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 200f2a1..0a5aac8 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -25,6 +25,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; @@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, } static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, - u8 *iv, struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { + struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[DES_BLOCK_SIZE]; + u8 key[DES3_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.key, ctx->key, DES3_KEY_SIZE); do { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, iv, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, iv, DES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, DES_BLOCK_SIZE); out: return ret; @@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); } static int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); } static struct crypto_alg cbc_des_alg = { @@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); } static int cbc_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); } static struct crypto_alg cbc_des3_alg = { @@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = { } }; +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* align to block size, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); + for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE); + crypto_inc(ctrptr + i, DES_BLOCK_SIZE); + } + return n; +} + static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, - struct s390_des_ctx *ctx, struct blkcipher_walk *walk) + struct s390_des_ctx *ctx, + struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[DES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; + + if (!walk->nbytes) + return ret; - memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= DES_BLOCK_SIZE) { - /* align to block size, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(DES_BLOCK_SIZE - 1); - for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, - DES_BLOCK_SIZE); - crypto_inc(ctrblk + i, DES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = DES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, ctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > DES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrptr, DES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } - + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, ctx->key, buf, in, - DES_BLOCK_SIZE, ctrblk); + DES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != DES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrbuf, DES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE); return ret; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 940e50e..0af5250 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -444,6 +444,7 @@ config X86_INTEL_MID bool "Intel MID platform support" depends on X86_32 depends on X86_EXTENDED_PLATFORM + depends on X86_PLATFORM_DEVICES depends on PCI depends on PCI_GOANY depends on X86_IO_APIC @@ -1051,9 +1052,9 @@ config MICROCODE_INTEL This options enables microcode patch loading support for Intel processors. - For latest news and information on obtaining all the required - Intel ingredients for this driver, check: - <http://www.urbanmyth.org/microcode/>. + For the current Intel microcode data package go to + <https://downloadcenter.intel.com> and search for + 'Linux Processor Microcode Data File'. config MICROCODE_AMD bool "AMD microcode loading support" diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0f3621e..321a52c 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT config X86_DECODER_SELFTEST bool "x86 instruction decoder selftest" depends on DEBUG_KERNEL && KPROBES + depends on !COMPILE_TEST ---help--- Perform x86 instruction decoder selftests at build time. This option is useful for checking the sanity of x86 instruction diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index a54ee1d..aaac3b2 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(void); extern int amd_get_subcaches(int); -extern int amd_set_subcaches(int, int); +extern int amd_set_subcaches(int, unsigned long); struct amd_l3_cache { unsigned indices; diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index e6d90ba..04905bfc 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void) static inline void __flush_tlb_one(unsigned long addr) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } @@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr) */ static inline void __flush_tlb_up(void) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); } static inline void flush_tlb_all(void) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb_all(); } diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 787e1bb..3e276eb 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op); extern int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn); + struct gnttab_map_grant_ref *kmap_op); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); @@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) pfn = m2p_find_override_pfn(mfn, ~0); } - /* + /* * pfn is ~0 if there are no entries in the m2p for mfn or if the * entry doesn't map back to the mfn and m2p_override doesn't have a * valid entry for it. diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 59554dc..dec8de4 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu) return (mask >> (4 * cuid)) & 0xf; } -int amd_set_subcaches(int cpu, int mask) +int amd_set_subcaches(int cpu, unsigned long mask) { static unsigned int reset, ban; struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d3153e2..c67ffa6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) { - tlb_flushall_shift = 5; - - if (c->x86 <= 0x11) - tlb_flushall_shift = 4; + tlb_flushall_shift = 6; } static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3db61c6..5cd9bfa 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) case 0x61d: /* six-core 45 nm xeon "Dunnington" */ tlb_flushall_shift = -1; break; + case 0x63a: /* Ivybridge */ + tlb_flushall_shift = 2; + break; case 0x61a: /* 45 nm nehalem, "Bloomfield" */ case 0x61e: /* 45 nm nehalem, "Lynnfield" */ case 0x625: /* 32 nm nehalem, "Clarkdale" */ case 0x62c: /* 32 nm nehalem, "Gulftown" */ case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ case 0x62f: /* 32 nm Xeon E7 */ - tlb_flushall_shift = 6; - break; case 0x62a: /* SandyBridge */ case 0x62d: /* SandyBridge, "Romely-EP" */ - tlb_flushall_shift = 5; - break; - case 0x63a: /* Ivybridge */ - tlb_flushall_shift = 1; - break; default: tlb_flushall_shift = 6; } diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 8384c0f..617a9e2 100644 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg) uci->cpu_sig.sig = cpuid_eax(0x00000001); } + +static void __init get_bsp_sig(void) +{ + unsigned int bsp = boot_cpu_data.cpu_index; + struct ucode_cpu_info *uci = ucode_cpu_info + bsp; + + if (!uci->cpu_sig.sig) + smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); +} #else void load_ucode_amd_ap(void) { @@ -337,31 +346,37 @@ void load_ucode_amd_ap(void) int __init save_microcode_in_initrd_amd(void) { + unsigned long cont; enum ucode_state ret; u32 eax; -#ifdef CONFIG_X86_32 - unsigned int bsp = boot_cpu_data.cpu_index; - struct ucode_cpu_info *uci = ucode_cpu_info + bsp; - - if (!uci->cpu_sig.sig) - smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); + if (!container) + return -EINVAL; +#ifdef CONFIG_X86_32 + get_bsp_sig(); + cont = (unsigned long)container; +#else /* - * Take into account the fact that the ramdisk might get relocated - * and therefore we need to recompute the container's position in - * virtual memory space. + * We need the physical address of the container for both bitness since + * boot_params.hdr.ramdisk_image is a physical address. */ - container = (u8 *)(__va((u32)relocated_ramdisk) + - ((u32)container - boot_params.hdr.ramdisk_image)); + cont = __pa(container); #endif + + /* + * Take into account the fact that the ramdisk might get relocated and + * therefore we need to recompute the container's position in virtual + * memory space. + */ + if (relocated_ramdisk) + container = (u8 *)(__va(relocated_ramdisk) + + (cont - boot_params.hdr.ramdisk_image)); + if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", ucode_new_rev); - if (!container) - return -EINVAL; - eax = cpuid_eax(0x00000001); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index ce2d0a2..0e25a1b 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) } /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); /* Save MTRR state */ @@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) static void post_set(void) __releases(set_atomicity_lock) { /* Flush TLBs (no need to flush caches - they are disabled) */ - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); /* Intel (P6) standard MTRRs */ diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index dbb6087..d99f31d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); #ifdef CONFIG_HOTPLUG_CPU + +/* These two declarations are only used in check_irq_vectors_for_cpu_disable() + * below, which is protected by stop_machine(). Putting them on the stack + * results in a stack frame overflow. Dynamically allocating could result in a + * failure so declare these two cpumasks as global. + */ +static struct cpumask affinity_new, online_new; + /* * This cpu is going to be removed and its vectors migrated to the remaining * online cpus. Check to see if there are enough vectors in the remaining cpus. @@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void) unsigned int this_cpu, vector, this_count, count; struct irq_desc *desc; struct irq_data *data; - struct cpumask affinity_new, online_new; this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 04ee1e2..7c6acd4 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, quirk_amd_nb_node); #endif + +#ifdef CONFIG_PCI +/* + * Processor does not ensure DRAM scrub read/write sequence + * is atomic wrt accesses to CC6 save state area. Therefore + * if a concurrent scrub read/write access is to same address + * the entry may appear as if it is not written. This quirk + * applies to Fam16h models 00h-0Fh + * + * See "Revision Guide" for AMD F16h models 00h-0fh, + * document 51810 rev. 3.04, Nov 2013 + */ +static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) +{ + u32 val; + + /* + * Suggested workaround: + * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b + */ + pci_read_config_dword(dev, 0x58, &val); + if (val & 0x1F) { + val &= ~(0x1F); + pci_write_config_dword(dev, 0x58, val); + } + + pci_read_config_dword(dev, 0x5C, &val); + if (val & BIT(0)) { + val &= ~BIT(0); + pci_write_config_dword(dev, 0x5c, val); + } +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, + amd_disable_seq_and_redirect_scrub); + +#endif diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 81b2750..27aa0455 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) struct numa_memblk *mb = &mi->blk[i]; memblock_set_node(mb->start, mb->end - mb->start, &memblock.memory, mb->nid); - - /* - * At this time, all memory regions reserved by memblock are - * used by the kernel. Set the nid in memblock.reserved will - * mark out all the nodes the kernel resides in. - */ - memblock_set_node(mb->start, mb->end - mb->start, - &memblock.reserved, mb->nid); } /* @@ -565,10 +557,21 @@ static void __init numa_init_array(void) static void __init numa_clear_kernel_node_hotplug(void) { int i, nid; - nodemask_t numa_kernel_nodes; + nodemask_t numa_kernel_nodes = NODE_MASK_NONE; unsigned long start, end; struct memblock_type *type = &memblock.reserved; + /* + * At this time, all memory regions reserved by memblock are + * used by the kernel. Set the nid in memblock.reserved will + * mark out all the nodes the kernel resides in. + */ + for (i = 0; i < numa_meminfo.nr_blks; i++) { + struct numa_memblk *mb = &numa_meminfo.blk[i]; + memblock_set_node(mb->start, mb->end - mb->start, + &memblock.reserved, mb->nid); + } + /* Mark all kernel nodes. */ for (i = 0; i < type->cnt; i++) node_set(type->regions[i].nid, numa_kernel_nodes); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 0342d27..47b6436 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end) nid, start, end); printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); printk(KERN_DEBUG " "); + start = round_down(start, PAGES_PER_SECTION); + end = round_up(end, PAGES_PER_SECTION); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { physnode_map[pfn / PAGES_PER_SECTION] = nid; printk(KERN_CONT "%lx ", pfn); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 1a25187..1953e9c 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -42,15 +42,25 @@ static __init inline int srat_disabled(void) return acpi_numa < 0; } -/* Callback for SLIT parsing */ +/* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are + * not supported at this point. + */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { int i, j; - for (i = 0; i < slit->locality_count; i++) - for (j = 0; j < slit->locality_count; j++) + for (i = 0; i < slit->locality_count; i++) { + if (pxm_to_node(i) == NUMA_NO_NODE) + continue; + for (j = 0; j < slit->locality_count; j++) { + if (pxm_to_node(j) == NUMA_NO_NODE) + continue; numa_set_distance(pxm_to_node(i), pxm_to_node(j), slit->entry[slit->locality_count * i + j]); + } + } } /* Callback for Proximity Domain -> x2APIC mapping */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index ae699b3..dd8dda1 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -103,7 +103,7 @@ static void flush_tlb_func(void *info) if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; - count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) local_flush_tlb(); @@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, info.flush_start = start; info.flush_end = end; - count_vm_event(NR_TLB_REMOTE_FLUSH); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); if (is_uv_system()) { unsigned int cpu; @@ -151,44 +151,19 @@ void flush_tlb_current_task(void) preempt_disable(); - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } -/* - * It can find out the THP large page, or - * HUGETLB page in tlb_flush when THP disabled - */ -static inline unsigned long has_large_page(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr = ALIGN(start, HPAGE_SIZE); - for (; addr < end; addr += HPAGE_SIZE) { - pgd = pgd_offset(mm, addr); - if (likely(!pgd_none(*pgd))) { - pud = pud_offset(pgd, addr); - if (likely(!pud_none(*pud))) { - pmd = pmd_offset(pud, addr); - if (likely(!pmd_none(*pmd))) - if (pmd_large(*pmd)) - return addr; - } - } - } - return 0; -} - void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; unsigned act_entries, tlb_entries = 0; + unsigned long nr_base_pages; preempt_disable(); if (current->active_mm != mm) @@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, tlb_entries = tlb_lli_4k[ENTRIES]; else tlb_entries = tlb_lld_4k[ENTRIES]; + /* Assume all of TLB entries was occupied by this task */ - act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; + act_entries = tlb_entries >> tlb_flushall_shift; + act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm; + nr_base_pages = (end - start) >> PAGE_SHIFT; /* tlb_flushall_shift is on balance point, details in commit log */ - if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + if (nr_base_pages > act_entries) { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { - if (has_large_page(mm, start, end)) { - local_flush_tlb(); - goto flush_all; - } /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } @@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) static void do_flush_tlb_all(void *info) { - count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); @@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - count_vm_event(NR_TLB_REMOTE_FLUSH); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index 7145ec6..4df9591 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -49,7 +49,8 @@ void __init efi_bgrt_init(void) image = efi_lookup_mapped_addr(bgrt_tab->image_address); if (!image) { - image = ioremap(bgrt_tab->image_address, sizeof(bmp_header)); + image = early_memremap(bgrt_tab->image_address, + sizeof(bmp_header)); ioremapped = true; if (!image) return; @@ -57,7 +58,7 @@ void __init efi_bgrt_init(void) memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); if (ioremapped) - iounmap(image); + early_iounmap(image, sizeof(bmp_header)); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); @@ -65,7 +66,8 @@ void __init efi_bgrt_init(void) return; if (ioremapped) { - image = ioremap(bgrt_tab->image_address, bmp_header.size); + image = early_memremap(bgrt_tab->image_address, + bmp_header.size); if (!image) { kfree(bgrt_image); bgrt_image = NULL; @@ -75,5 +77,5 @@ void __init efi_bgrt_init(void) memcpy_fromio(bgrt_image, image, bgrt_image_size); if (ioremapped) - iounmap(image); + early_iounmap(image, bmp_header.size); } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a4d7b64..201d09a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu) * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); + + if (!cpu) + return; + /* + * For BSP, PSE PGE are set in probe_page_size_mask(), for APs + * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init. + */ + if (cpu_has_pse) + set_in_cr4(X86_CR4_PSE); + + if (cpu_has_pge) + set_in_cr4(X86_CR4_PGE); } /* diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 8009acb..696c694 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } + WARN_ON(PagePrivate(page)); + SetPagePrivate(page); + set_page_private(page, mfn); + page->index = pfn_to_mfn(pfn); + + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + return -ENOMEM; if (kmap_op != NULL) { if (!PageHighMem(page)) { @@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page, } EXPORT_SYMBOL_GPL(m2p_add_override); int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn) + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; + unsigned long mfn; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); + mfn = get_phys_to_machine(pfn); + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) + return -EINVAL; if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); @@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page, spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); + WARN_ON(!PagePrivate(page)); + ClearPagePrivate(page); + set_phys_to_machine(pfn, page->index); if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; @@ -0,0 +1,1248 @@ +/* + * fs/kernfs/dir.c - kernfs directory implementation + * + * Copyright (c) 2001-3 Patrick Mochel + * Copyright (c) 2007 SUSE Linux Products GmbH + * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> + * + * This file is released under the GPLv2. + */ + +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/security.h> +#include <linux/hash.h> + +#include "kernfs-internal.h" + +DEFINE_MUTEX(kernfs_mutex); + +#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) + +static bool kernfs_active(struct kernfs_node *kn) +{ + lockdep_assert_held(&kernfs_mutex); + return atomic_read(&kn->active) >= 0; +} + +static bool kernfs_lockdep(struct kernfs_node *kn) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + return kn->flags & KERNFS_LOCKDEP; +#else + return false; +#endif +} + +/** + * kernfs_name_hash + * @name: Null terminated string to hash + * @ns: Namespace tag to hash + * + * Returns 31 bit hash of ns + name (so it fits in an off_t ) + */ +static unsigned int kernfs_name_hash(const char *name, const void *ns) +{ + unsigned long hash = init_name_hash(); + unsigned int len = strlen(name); + while (len--) + hash = partial_name_hash(*name++, hash); + hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31)); + hash &= 0x7fffffffU; + /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ + if (hash < 1) + hash += 2; + if (hash >= INT_MAX) + hash = INT_MAX - 1; + return hash; +} + +static int kernfs_name_compare(unsigned int hash, const char *name, + const void *ns, const struct kernfs_node *kn) +{ + if (hash != kn->hash) + return hash - kn->hash; + if (ns != kn->ns) + return ns - kn->ns; + return strcmp(name, kn->name); +} + +static int kernfs_sd_compare(const struct kernfs_node *left, + const struct kernfs_node *right) +{ + return kernfs_name_compare(left->hash, left->name, left->ns, right); +} + +/** + * kernfs_link_sibling - link kernfs_node into sibling rbtree + * @kn: kernfs_node of interest + * + * Link @kn into its sibling rbtree which starts from + * @kn->parent->dir.children. + * + * Locking: + * mutex_lock(kernfs_mutex) + * + * RETURNS: + * 0 on susccess -EEXIST on failure. + */ +static int kernfs_link_sibling(struct kernfs_node *kn) +{ + struct rb_node **node = &kn->parent->dir.children.rb_node; + struct rb_node *parent = NULL; + + if (kernfs_type(kn) == KERNFS_DIR) + kn->parent->dir.subdirs++; + + while (*node) { + struct kernfs_node *pos; + int result; + + pos = rb_to_kn(*node); + parent = *node; + result = kernfs_sd_compare(kn, pos); + if (result < 0) + node = &pos->rb.rb_left; + else if (result > 0) + node = &pos->rb.rb_right; + else + return -EEXIST; + } + /* add new node and rebalance the tree */ + rb_link_node(&kn->rb, parent, node); + rb_insert_color(&kn->rb, &kn->parent->dir.children); + return 0; +} + +/** + * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree + * @kn: kernfs_node of interest + * + * Try to unlink @kn from its sibling rbtree which starts from + * kn->parent->dir.children. Returns %true if @kn was actually + * removed, %false if @kn wasn't on the rbtree. + * + * Locking: + * mutex_lock(kernfs_mutex) + */ +static bool kernfs_unlink_sibling(struct kernfs_node *kn) +{ + if (RB_EMPTY_NODE(&kn->rb)) + return false; + + if (kernfs_type(kn) == KERNFS_DIR) + kn->parent->dir.subdirs--; + + rb_erase(&kn->rb, &kn->parent->dir.children); + RB_CLEAR_NODE(&kn->rb); + return true; +} + +/** + * kernfs_get_active - get an active reference to kernfs_node + * @kn: kernfs_node to get an active reference to + * + * Get an active reference of @kn. This function is noop if @kn + * is NULL. + * + * RETURNS: + * Pointer to @kn on success, NULL on failure. + */ +struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) +{ + if (unlikely(!kn)) + return NULL; + + if (!atomic_inc_unless_negative(&kn->active)) + return NULL; + + if (kernfs_lockdep(kn)) + rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); + return kn; +} + +/** + * kernfs_put_active - put an active reference to kernfs_node + * @kn: kernfs_node to put an active reference to + * + * Put an active reference to @kn. This function is noop if @kn + * is NULL. + */ +void kernfs_put_active(struct kernfs_node *kn) +{ + struct kernfs_root *root = kernfs_root(kn); + int v; + + if (unlikely(!kn)) + return; + + if (kernfs_lockdep(kn)) + rwsem_release(&kn->dep_map, 1, _RET_IP_); + v = atomic_dec_return(&kn->active); + if (likely(v != KN_DEACTIVATED_BIAS)) + return; + + wake_up_all(&root->deactivate_waitq); +} + +/** + * kernfs_drain - drain kernfs_node + * @kn: kernfs_node to drain + * + * Drain existing usages and nuke all existing mmaps of @kn. Mutiple + * removers may invoke this function concurrently on @kn and all will + * return after draining is complete. + */ +static void kernfs_drain(struct kernfs_node *kn) + __releases(&kernfs_mutex) __acquires(&kernfs_mutex) +{ + struct kernfs_root *root = kernfs_root(kn); + + lockdep_assert_held(&kernfs_mutex); + WARN_ON_ONCE(kernfs_active(kn)); + + mutex_unlock(&kernfs_mutex); + + if (kernfs_lockdep(kn)) { + rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); + if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) + lock_contended(&kn->dep_map, _RET_IP_); + } + + /* but everyone should wait for draining */ + wait_event(root->deactivate_waitq, + atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); + + if (kernfs_lockdep(kn)) { + lock_acquired(&kn->dep_map, _RET_IP_); + rwsem_release(&kn->dep_map, 1, _RET_IP_); + } + + kernfs_unmap_bin_file(kn); + + mutex_lock(&kernfs_mutex); +} + +/** + * kernfs_get - get a reference count on a kernfs_node + * @kn: the target kernfs_node + */ +void kernfs_get(struct kernfs_node *kn) +{ + if (kn) { + WARN_ON(!atomic_read(&kn->count)); + atomic_inc(&kn->count); + } +} +EXPORT_SYMBOL_GPL(kernfs_get); + +/** + * kernfs_put - put a reference count on a kernfs_node + * @kn: the target kernfs_node + * + * Put a reference count of @kn and destroy it if it reached zero. + */ +void kernfs_put(struct kernfs_node *kn) +{ + struct kernfs_node *parent; + struct kernfs_root *root; + + if (!kn || !atomic_dec_and_test(&kn->count)) + return; + root = kernfs_root(kn); + repeat: + /* + * Moving/renaming is always done while holding reference. + * kn->parent won't change beneath us. + */ + parent = kn->parent; + + WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS, + "kernfs_put: %s/%s: released with incorrect active_ref %d\n", + parent ? parent->name : "", kn->name, atomic_read(&kn->active)); + + if (kernfs_type(kn) == KERNFS_LINK) + kernfs_put(kn->symlink.target_kn); + if (!(kn->flags & KERNFS_STATIC_NAME)) + kfree(kn->name); + if (kn->iattr) { + if (kn->iattr->ia_secdata) + security_release_secctx(kn->iattr->ia_secdata, + kn->iattr->ia_secdata_len); + simple_xattrs_free(&kn->iattr->xattrs); + } + kfree(kn->iattr); + ida_simple_remove(&root->ino_ida, kn->ino); + kmem_cache_free(kernfs_node_cache, kn); + + kn = parent; + if (kn) { + if (atomic_dec_and_test(&kn->count)) + goto repeat; + } else { + /* just released the root kn, free @root too */ + ida_destroy(&root->ino_ida); + kfree(root); + } +} +EXPORT_SYMBOL_GPL(kernfs_put); + +static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct kernfs_node *kn; + + if (flags & LOOKUP_RCU) + return -ECHILD; + + /* Always perform fresh lookup for negatives */ + if (!dentry->d_inode) + goto out_bad_unlocked; + + kn = dentry->d_fsdata; + mutex_lock(&kernfs_mutex); + + /* The kernfs node has been deactivated */ + if (!kernfs_active(kn)) + goto out_bad; + + /* The kernfs node has been moved? */ + if (dentry->d_parent->d_fsdata != kn->parent) + goto out_bad; + + /* The kernfs node has been renamed */ + if (strcmp(dentry->d_name.name, kn->name) != 0) + goto out_bad; + + /* The kernfs node has been moved to a different namespace */ + if (kn->parent && kernfs_ns_enabled(kn->parent) && + kernfs_info(dentry->d_sb)->ns != kn->ns) + goto out_bad; + + mutex_unlock(&kernfs_mutex); +out_valid: + return 1; +out_bad: + mutex_unlock(&kernfs_mutex); +out_bad_unlocked: + /* + * @dentry doesn't match the underlying kernfs node, drop the + * dentry and force lookup. If we have submounts we must allow the + * vfs caches to lie about the state of the filesystem to prevent + * leaks and other nasty things, so use check_submounts_and_drop() + * instead of d_drop(). + */ + if (check_submounts_and_drop(dentry) != 0) + goto out_valid; + + return 0; +} + +static void kernfs_dop_release(struct dentry *dentry) +{ + kernfs_put(dentry->d_fsdata); +} + +const struct dentry_operations kernfs_dops = { + .d_revalidate = kernfs_dop_revalidate, + .d_release = kernfs_dop_release, +}; + +static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, + const char *name, umode_t mode, + unsigned flags) +{ + char *dup_name = NULL; + struct kernfs_node *kn; + int ret; + + if (!(flags & KERNFS_STATIC_NAME)) { + name = dup_name = kstrdup(name, GFP_KERNEL); + if (!name) + return NULL; + } + + kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); + if (!kn) + goto err_out1; + + ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL); + if (ret < 0) + goto err_out2; + kn->ino = ret; + + atomic_set(&kn->count, 1); + atomic_set(&kn->active, KN_DEACTIVATED_BIAS); + RB_CLEAR_NODE(&kn->rb); + + kn->name = name; + kn->mode = mode; + kn->flags = flags; + + return kn; + + err_out2: + kmem_cache_free(kernfs_node_cache, kn); + err_out1: + kfree(dup_name); + return NULL; +} + +struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, + const char *name, umode_t mode, + unsigned flags) +{ + struct kernfs_node *kn; + + kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags); + if (kn) { + kernfs_get(parent); + kn->parent = parent; + } + return kn; +} + +/** + * kernfs_add_one - add kernfs_node to parent without warning + * @kn: kernfs_node to be added + * + * The caller must already have initialized @kn->parent. This + * function increments nlink of the parent's inode if @kn is a + * directory and link into the children list of the parent. + * + * RETURNS: + * 0 on success, -EEXIST if entry with the given name already + * exists. + */ +int kernfs_add_one(struct kernfs_node *kn) +{ + struct kernfs_node *parent = kn->parent; + struct kernfs_iattrs *ps_iattr; + bool has_ns; + int ret; + + mutex_lock(&kernfs_mutex); + + ret = -EINVAL; + has_ns = kernfs_ns_enabled(parent); + if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", + has_ns ? "required" : "invalid", parent->name, kn->name)) + goto out_unlock; + + if (kernfs_type(parent) != KERNFS_DIR) + goto out_unlock; + + ret = -ENOENT; + if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) + goto out_unlock; + + kn->hash = kernfs_name_hash(kn->name, kn->ns); + + ret = kernfs_link_sibling(kn); + if (ret) + goto out_unlock; + + /* Update timestamps on the parent */ + ps_iattr = parent->iattr; + if (ps_iattr) { + struct iattr *ps_iattrs = &ps_iattr->ia_iattr; + ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME; + } + + mutex_unlock(&kernfs_mutex); + + /* + * Activate the new node unless CREATE_DEACTIVATED is requested. + * If not activated here, the kernfs user is responsible for + * activating the node with kernfs_activate(). A node which hasn't + * been activated is not visible to userland and its removal won't + * trigger deactivation. + */ + if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) + kernfs_activate(kn); + return 0; + +out_unlock: + mutex_unlock(&kernfs_mutex); + return ret; +} + +/** + * kernfs_find_ns - find kernfs_node with the given name + * @parent: kernfs_node to search under + * @name: name to look for + * @ns: the namespace tag to use + * + * Look for kernfs_node with name @name under @parent. Returns pointer to + * the found kernfs_node on success, %NULL on failure. + */ +static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent, + const unsigned char *name, + const void *ns) +{ + struct rb_node *node = parent->dir.children.rb_node; + bool has_ns = kernfs_ns_enabled(parent); + unsigned int hash; + + lockdep_assert_held(&kernfs_mutex); + + if (has_ns != (bool)ns) { + WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", + has_ns ? "required" : "invalid", parent->name, name); + return NULL; + } + + hash = kernfs_name_hash(name, ns); + while (node) { + struct kernfs_node *kn; + int result; + + kn = rb_to_kn(node); + result = kernfs_name_compare(hash, name, ns, kn); + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return kn; + } + return NULL; +} + +/** + * kernfs_find_and_get_ns - find and get kernfs_node with the given name + * @parent: kernfs_node to search under + * @name: name to look for + * @ns: the namespace tag to use + * + * Look for kernfs_node with name @name under @parent and get a reference + * if found. This function may sleep and returns pointer to the found + * kernfs_node on success, %NULL on failure. + */ +struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, + const char *name, const void *ns) +{ + struct kernfs_node *kn; + + mutex_lock(&kernfs_mutex); + kn = kernfs_find_ns(parent, name, ns); + kernfs_get(kn); + mutex_unlock(&kernfs_mutex); + + return kn; +} +EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); + +/** + * kernfs_create_root - create a new kernfs hierarchy + * @scops: optional syscall operations for the hierarchy + * @priv: opaque data associated with the new directory + * + * Returns the root of the new hierarchy on success, ERR_PTR() value on + * failure. + */ +struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, + void *priv) +{ + struct kernfs_root *root; + struct kernfs_node *kn; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + + ida_init(&root->ino_ida); + + kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, + KERNFS_DIR); + if (!kn) { + ida_destroy(&root->ino_ida); + kfree(root); + return ERR_PTR(-ENOMEM); + } + + kernfs_activate(kn); + kn->priv = priv; + kn->dir.root = root; + + root->syscall_ops = scops; + root->kn = kn; + init_waitqueue_head(&root->deactivate_waitq); + + return root; +} + +/** + * kernfs_destroy_root - destroy a kernfs hierarchy + * @root: root of the hierarchy to destroy + * + * Destroy the hierarchy anchored at @root by removing all existing + * directories and destroying @root. + */ +void kernfs_destroy_root(struct kernfs_root *root) +{ + kernfs_remove(root->kn); /* will also free @root */ +} + +/** + * kernfs_create_dir_ns - create a directory + * @parent: parent in which to create a new directory + * @name: name of the new directory + * @mode: mode of the new directory + * @priv: opaque data associated with the new directory + * @ns: optional namespace tag of the directory + * + * Returns the created node on success, ERR_PTR() value on failure. + */ +struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, + const char *name, umode_t mode, + void *priv, const void *ns) +{ + struct kernfs_node *kn; + int rc; + + /* allocate */ + kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR); + if (!kn) + return ERR_PTR(-ENOMEM); + + kn->dir.root = parent->dir.root; + kn->ns = ns; + kn->priv = priv; + + /* link in */ + rc = kernfs_add_one(kn); + if (!rc) + return kn; + + kernfs_put(kn); + return ERR_PTR(rc); +} + +static struct dentry *kernfs_iop_lookup(struct inode *dir, + struct dentry *dentry, + unsigned int flags) +{ + struct dentry *ret; + struct kernfs_node *parent = dentry->d_parent->d_fsdata; + struct kernfs_node *kn; + struct inode *inode; + const void *ns = NULL; + + mutex_lock(&kernfs_mutex); + + if (kernfs_ns_enabled(parent)) + ns = kernfs_info(dir->i_sb)->ns; + + kn = kernfs_find_ns(parent, dentry->d_name.name, ns); + + /* no such entry */ + if (!kn || !kernfs_active(kn)) { + ret = NULL; + goto out_unlock; + } + kernfs_get(kn); + dentry->d_fsdata = kn; + + /* attach dentry and inode */ + inode = kernfs_get_inode(dir->i_sb, kn); + if (!inode) { + ret = ERR_PTR(-ENOMEM); + goto out_unlock; + } + + /* instantiate and hash dentry */ + ret = d_materialise_unique(dentry, inode); + out_unlock: + mutex_unlock(&kernfs_mutex); + return ret; +} + +static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry, + umode_t mode) +{ + struct kernfs_node *parent = dir->i_private; + struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops; + int ret; + + if (!scops || !scops->mkdir) + return -EPERM; + + if (!kernfs_get_active(parent)) + return -ENODEV; + + ret = scops->mkdir(parent, dentry->d_name.name, mode); + + kernfs_put_active(parent); + return ret; +} + +static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct kernfs_node *kn = dentry->d_fsdata; + struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; + int ret; + + if (!scops || !scops->rmdir) + return -EPERM; + + if (!kernfs_get_active(kn)) + return -ENODEV; + + ret = scops->rmdir(kn); + + kernfs_put_active(kn); + return ret; +} + +static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + struct kernfs_node *kn = old_dentry->d_fsdata; + struct kernfs_node *new_parent = new_dir->i_private; + struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; + int ret; + + if (!scops || !scops->rename) + return -EPERM; + + if (!kernfs_get_active(kn)) + return -ENODEV; + + if (!kernfs_get_active(new_parent)) { + kernfs_put_active(kn); + return -ENODEV; + } + + ret = scops->rename(kn, new_parent, new_dentry->d_name.name); + + kernfs_put_active(new_parent); + kernfs_put_active(kn); + return ret; +} + +const struct inode_operations kernfs_dir_iops = { + .lookup = kernfs_iop_lookup, + .permission = kernfs_iop_permission, + .setattr = kernfs_iop_setattr, + .getattr = kernfs_iop_getattr, + .setxattr = kernfs_iop_setxattr, + .removexattr = kernfs_iop_removexattr, + .getxattr = kernfs_iop_getxattr, + .listxattr = kernfs_iop_listxattr, + + .mkdir = kernfs_iop_mkdir, + .rmdir = kernfs_iop_rmdir, + .rename = kernfs_iop_rename, +}; + +static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos) +{ + struct kernfs_node *last; + + while (true) { + struct rb_node *rbn; + + last = pos; + + if (kernfs_type(pos) != KERNFS_DIR) + break; + + rbn = rb_first(&pos->dir.children); + if (!rbn) + break; + + pos = rb_to_kn(rbn); + } + + return last; +} + +/** + * kernfs_next_descendant_post - find the next descendant for post-order walk + * @pos: the current position (%NULL to initiate traversal) + * @root: kernfs_node whose descendants to walk + * + * Find the next descendant to visit for post-order traversal of @root's + * descendants. @root is included in the iteration and the last node to be + * visited. + */ +static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, + struct kernfs_node *root) +{ + struct rb_node *rbn; + + lockdep_assert_held(&kernfs_mutex); + + /* if first iteration, visit leftmost descendant which may be root */ + if (!pos) + return kernfs_leftmost_descendant(root); + + /* if we visited @root, we're done */ + if (pos == root) + return NULL; + + /* if there's an unvisited sibling, visit its leftmost descendant */ + rbn = rb_next(&pos->rb); + if (rbn) + return kernfs_leftmost_descendant(rb_to_kn(rbn)); + + /* no sibling left, visit parent */ + return pos->parent; +} + +/** + * kernfs_activate - activate a node which started deactivated + * @kn: kernfs_node whose subtree is to be activated + * + * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node + * needs to be explicitly activated. A node which hasn't been activated + * isn't visible to userland and deactivation is skipped during its + * removal. This is useful to construct atomic init sequences where + * creation of multiple nodes should either succeed or fail atomically. + * + * The caller is responsible for ensuring that this function is not called + * after kernfs_remove*() is invoked on @kn. + */ +void kernfs_activate(struct kernfs_node *kn) +{ + struct kernfs_node *pos; + + mutex_lock(&kernfs_mutex); + + pos = NULL; + while ((pos = kernfs_next_descendant_post(pos, kn))) { + if (!pos || (pos->flags & KERNFS_ACTIVATED)) + continue; + + WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); + WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); + + atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); + pos->flags |= KERNFS_ACTIVATED; + } + + mutex_unlock(&kernfs_mutex); +} + +static void __kernfs_remove(struct kernfs_node *kn) +{ + struct kernfs_node *pos; + + lockdep_assert_held(&kernfs_mutex); + + /* + * Short-circuit if @kn has already finished removal. This is for + * kernfs_remove_self() which plays with active ref after removal. + */ + if (!kn || RB_EMPTY_NODE(&kn->rb)) + return; + + pr_debug("kernfs %s: removing\n", kn->name); + + /* prevent any new usage under @kn by deactivating all nodes */ + pos = NULL; + while ((pos = kernfs_next_descendant_post(pos, kn))) + if (kernfs_active(pos)) + atomic_add(KN_DEACTIVATED_BIAS, &pos->active); + + /* deactivate and unlink the subtree node-by-node */ + do { + pos = kernfs_leftmost_descendant(kn); + + /* + * kernfs_drain() drops kernfs_mutex temporarily and @pos's + * base ref could have been put by someone else by the time + * the function returns. Make sure it doesn't go away + * underneath us. + */ + kernfs_get(pos); + + /* + * Drain iff @kn was activated. This avoids draining and + * its lockdep annotations for nodes which have never been + * activated and allows embedding kernfs_remove() in create + * error paths without worrying about draining. + */ + if (kn->flags & KERNFS_ACTIVATED) + kernfs_drain(pos); + else + WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); + + /* + * kernfs_unlink_sibling() succeeds once per node. Use it + * to decide who's responsible for cleanups. + */ + if (!pos->parent || kernfs_unlink_sibling(pos)) { + struct kernfs_iattrs *ps_iattr = + pos->parent ? pos->parent->iattr : NULL; + + /* update timestamps on the parent */ + if (ps_iattr) { + ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME; + ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME; + } + + kernfs_put(pos); + } + + kernfs_put(pos); + } while (pos != kn); +} + +/** + * kernfs_remove - remove a kernfs_node recursively + * @kn: the kernfs_node to remove + * + * Remove @kn along with all its subdirectories and files. + */ +void kernfs_remove(struct kernfs_node *kn) +{ + mutex_lock(&kernfs_mutex); + __kernfs_remove(kn); + mutex_unlock(&kernfs_mutex); +} + +/** + * kernfs_break_active_protection - break out of active protection + * @kn: the self kernfs_node + * + * The caller must be running off of a kernfs operation which is invoked + * with an active reference - e.g. one of kernfs_ops. Each invocation of + * this function must also be matched with an invocation of + * kernfs_unbreak_active_protection(). + * + * This function releases the active reference of @kn the caller is + * holding. Once this function is called, @kn may be removed at any point + * and the caller is solely responsible for ensuring that the objects it + * dereferences are accessible. + */ +void kernfs_break_active_protection(struct kernfs_node *kn) +{ + /* + * Take out ourself out of the active ref dependency chain. If + * we're called without an active ref, lockdep will complain. + */ + kernfs_put_active(kn); +} + +/** + * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() + * @kn: the self kernfs_node + * + * If kernfs_break_active_protection() was called, this function must be + * invoked before finishing the kernfs operation. Note that while this + * function restores the active reference, it doesn't and can't actually + * restore the active protection - @kn may already or be in the process of + * being removed. Once kernfs_break_active_protection() is invoked, that + * protection is irreversibly gone for the kernfs operation instance. + * + * While this function may be called at any point after + * kernfs_break_active_protection() is invoked, its most useful location + * would be right before the enclosing kernfs operation returns. + */ +void kernfs_unbreak_active_protection(struct kernfs_node *kn) +{ + /* + * @kn->active could be in any state; however, the increment we do + * here will be undone as soon as the enclosing kernfs operation + * finishes and this temporary bump can't break anything. If @kn + * is alive, nothing changes. If @kn is being deactivated, the + * soon-to-follow put will either finish deactivation or restore + * deactivated state. If @kn is already removed, the temporary + * bump is guaranteed to be gone before @kn is released. + */ + atomic_inc(&kn->active); + if (kernfs_lockdep(kn)) + rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); +} + +/** + * kernfs_remove_self - remove a kernfs_node from its own method + * @kn: the self kernfs_node to remove + * + * The caller must be running off of a kernfs operation which is invoked + * with an active reference - e.g. one of kernfs_ops. This can be used to + * implement a file operation which deletes itself. + * + * For example, the "delete" file for a sysfs device directory can be + * implemented by invoking kernfs_remove_self() on the "delete" file + * itself. This function breaks the circular dependency of trying to + * deactivate self while holding an active ref itself. It isn't necessary + * to modify the usual removal path to use kernfs_remove_self(). The + * "delete" implementation can simply invoke kernfs_remove_self() on self + * before proceeding with the usual removal path. kernfs will ignore later + * kernfs_remove() on self. + * + * kernfs_remove_self() can be called multiple times concurrently on the + * same kernfs_node. Only the first one actually performs removal and + * returns %true. All others will wait until the kernfs operation which + * won self-removal finishes and return %false. Note that the losers wait + * for the completion of not only the winning kernfs_remove_self() but also + * the whole kernfs_ops which won the arbitration. This can be used to + * guarantee, for example, all concurrent writes to a "delete" file to + * finish only after the whole operation is complete. + */ +bool kernfs_remove_self(struct kernfs_node *kn) +{ + bool ret; + + mutex_lock(&kernfs_mutex); + kernfs_break_active_protection(kn); + + /* + * SUICIDAL is used to arbitrate among competing invocations. Only + * the first one will actually perform removal. When the removal + * is complete, SUICIDED is set and the active ref is restored + * while holding kernfs_mutex. The ones which lost arbitration + * waits for SUICDED && drained which can happen only after the + * enclosing kernfs operation which executed the winning instance + * of kernfs_remove_self() finished. + */ + if (!(kn->flags & KERNFS_SUICIDAL)) { + kn->flags |= KERNFS_SUICIDAL; + __kernfs_remove(kn); + kn->flags |= KERNFS_SUICIDED; + ret = true; + } else { + wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; + DEFINE_WAIT(wait); + + while (true) { + prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); + + if ((kn->flags & KERNFS_SUICIDED) && + atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) + break; + + mutex_unlock(&kernfs_mutex); + schedule(); + mutex_lock(&kernfs_mutex); + } + finish_wait(waitq, &wait); + WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); + ret = false; + } + + /* + * This must be done while holding kernfs_mutex; otherwise, waiting + * for SUICIDED && deactivated could finish prematurely. + */ + kernfs_unbreak_active_protection(kn); + + mutex_unlock(&kernfs_mutex); + return ret; +} + +/** + * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it + * @parent: parent of the target + * @name: name of the kernfs_node to remove + * @ns: namespace tag of the kernfs_node to remove + * + * Look for the kernfs_node with @name and @ns under @parent and remove it. + * Returns 0 on success, -ENOENT if such entry doesn't exist. + */ +int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, + const void *ns) +{ + struct kernfs_node *kn; + + if (!parent) { + WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n", + name); + return -ENOENT; + } + + mutex_lock(&kernfs_mutex); + + kn = kernfs_find_ns(parent, name, ns); + if (kn) + __kernfs_remove(kn); + + mutex_unlock(&kernfs_mutex); + + if (kn) + return 0; + else + return -ENOENT; +} + +/** + * kernfs_rename_ns - move and rename a kernfs_node + * @kn: target node + * @new_parent: new parent to put @sd under + * @new_name: new name + * @new_ns: new namespace tag + */ +int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name, const void *new_ns) +{ + int error; + + mutex_lock(&kernfs_mutex); + + error = -ENOENT; + if (!kernfs_active(kn) || !kernfs_active(new_parent)) + goto out; + + error = 0; + if ((kn->parent == new_parent) && (kn->ns == new_ns) && + (strcmp(kn->name, new_name) == 0)) + goto out; /* nothing to rename */ + + error = -EEXIST; + if (kernfs_find_ns(new_parent, new_name, new_ns)) + goto out; + + /* rename kernfs_node */ + if (strcmp(kn->name, new_name) != 0) { + error = -ENOMEM; + new_name = kstrdup(new_name, GFP_KERNEL); + if (!new_name) + goto out; + + if (kn->flags & KERNFS_STATIC_NAME) + kn->flags &= ~KERNFS_STATIC_NAME; + else + kfree(kn->name); + + kn->name = new_name; + } + + /* + * Move to the appropriate place in the appropriate directories rbtree. + */ + kernfs_unlink_sibling(kn); + kernfs_get(new_parent); + kernfs_put(kn->parent); + kn->ns = new_ns; + kn->hash = kernfs_name_hash(kn->name, kn->ns); + kn->parent = new_parent; + kernfs_link_sibling(kn); + + error = 0; + out: + mutex_unlock(&kernfs_mutex); + return error; +} + +/* Relationship between s_mode and the DT_xxx types */ +static inline unsigned char dt_type(struct kernfs_node *kn) +{ + return (kn->mode >> 12) & 15; +} + +static int kernfs_dir_fop_release(struct inode *inode, struct file *filp) +{ + kernfs_put(filp->private_data); + return 0; +} + +static struct kernfs_node *kernfs_dir_pos(const void *ns, + struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) +{ + if (pos) { + int valid = kernfs_active(pos) && + pos->parent == parent && hash == pos->hash; + kernfs_put(pos); + if (!valid) + pos = NULL; + } + if (!pos && (hash > 1) && (hash < INT_MAX)) { + struct rb_node *node = parent->dir.children.rb_node; + while (node) { + pos = rb_to_kn(node); + + if (hash < pos->hash) + node = node->rb_left; + else if (hash > pos->hash) + node = node->rb_right; + else + break; + } + } + /* Skip over entries which are dying/dead or in the wrong namespace */ + while (pos && (!kernfs_active(pos) || pos->ns != ns)) { + struct rb_node *node = rb_next(&pos->rb); + if (!node) + pos = NULL; + else + pos = rb_to_kn(node); + } + return pos; +} + +static struct kernfs_node *kernfs_dir_next_pos(const void *ns, + struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos) +{ + pos = kernfs_dir_pos(ns, parent, ino, pos); + if (pos) + do { + struct rb_node *node = rb_next(&pos->rb); + if (!node) + pos = NULL; + else + pos = rb_to_kn(node); + } while (pos && (!kernfs_active(pos) || pos->ns != ns)); + return pos; +} + +static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) +{ + struct dentry *dentry = file->f_path.dentry; + struct kernfs_node *parent = dentry->d_fsdata; + struct kernfs_node *pos = file->private_data; + const void *ns = NULL; + + if (!dir_emit_dots(file, ctx)) + return 0; + mutex_lock(&kernfs_mutex); + + if (kernfs_ns_enabled(parent)) + ns = kernfs_info(dentry->d_sb)->ns; + + for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos); + pos; + pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) { + const char *name = pos->name; + unsigned int type = dt_type(pos); + int len = strlen(name); + ino_t ino = pos->ino; + + ctx->pos = pos->hash; + file->private_data = pos; + kernfs_get(pos); + + mutex_unlock(&kernfs_mutex); + if (!dir_emit(ctx, name, len, ino, type)) + return 0; + mutex_lock(&kernfs_mutex); + } + mutex_unlock(&kernfs_mutex); + file->private_data = NULL; + ctx->pos = INT_MAX; + return 0; +} + +static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset, + int whence) +{ + struct inode *inode = file_inode(file); + loff_t ret; + + mutex_lock(&inode->i_mutex); + ret = generic_file_llseek(file, offset, whence); + mutex_unlock(&inode->i_mutex); + + return ret; +} + +const struct file_operations kernfs_dir_fops = { + .read = generic_read_dir, + .iterate = kernfs_fop_readdir, + .release = kernfs_dir_fop_release, + .llseek = kernfs_dir_fop_llseek, +}; diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 470e754..018a428 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, { unsigned long x; struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); - if (sscanf(buf, "%ld\n", &x) == 1) + if (sscanf(buf, "%lu\n", &x) == 1) battery->alarm = x/1000; if (acpi_battery_present(battery)) acpi_battery_set_alarm(battery); diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 50fe34f..75c28ea 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "%c%-8s %s:%s\n", dev->wakeup.flags.run_wake ? '*' : ' ', (device_may_wakeup(&dev->dev) || - (ldev && device_may_wakeup(ldev))) ? + device_may_wakeup(ldev)) ? "enabled" : "disabled", ldev->bus ? ldev->bus->name : "no-bus", dev_name(ldev)); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 7384158..57b053f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src) static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) { u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; - struct acpi_scan_handler *handler = data; struct acpi_device *adev; acpi_status status; @@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); - if (!handler->hotplug.enabled) { + if (!adev->handler) + goto err_out; + + if (!adev->handler->hotplug.enabled) { acpi_handle_err(handle, "Eject disabled\n"); ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; goto err_out; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 0347a37..85e3b61 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package, union acpi_object *element = &(package->package.elements[i]); - if (!element) { - return AE_BAD_DATA; - } - switch (element->type) { case ACPI_TYPE_INTEGER: diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index f0447d3..a697b77 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -170,6 +170,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_vendor, + .ident = "HP EliteBook Revolve 810", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"), + }, + }, + { + .callback = video_detect_force_vendor, .ident = "Lenovo Yoga 13", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 1f14ac4..51824d1 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -46,7 +46,6 @@ #define NVME_Q_DEPTH 1024 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) -#define NVME_MINORS 64 #define ADMIN_TIMEOUT (60 * HZ) static int nvme_major; @@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0); static DEFINE_SPINLOCK(dev_list_lock); static LIST_HEAD(dev_list); static struct task_struct *nvme_thread; +static struct workqueue_struct *nvme_workq; + +static void nvme_reset_failed_dev(struct work_struct *ws); + +struct async_cmd_info { + struct kthread_work work; + struct kthread_worker *worker; + u32 result; + int status; + void *ctx; +}; /* * An NVM Express queue. Each device has at least two (one for admin @@ -66,6 +76,7 @@ static struct task_struct *nvme_thread; struct nvme_queue { struct device *q_dmadev; struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ spinlock_t q_lock; struct nvme_command *sq_cmds; volatile struct nvme_completion *cqes; @@ -80,9 +91,11 @@ struct nvme_queue { u16 sq_head; u16 sq_tail; u16 cq_head; + u16 qid; u8 cq_phase; u8 cqe_seen; u8 q_suspended; + struct async_cmd_info cmdinfo; unsigned long cmdid_data[]; }; @@ -97,6 +110,7 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(sizeof(struct nvme_features) != 64); BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); @@ -111,6 +125,7 @@ struct nvme_cmd_info { nvme_completion_fn fn; void *ctx; unsigned long timeout; + int aborted; }; static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) @@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, info[cmdid].fn = handler; info[cmdid].ctx = ctx; info[cmdid].timeout = jiffies + timeout; + info[cmdid].aborted = 0; return cmdid; } @@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) +#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE) static void special_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) @@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx, return; if (ctx == CMD_CTX_FLUSH) return; + if (ctx == CMD_CTX_ABORT) { + ++dev->abort_limit; + return; + } if (ctx == CMD_CTX_COMPLETED) { dev_warn(&dev->pci_dev->dev, "completed id %d twice on queue %d\n", @@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx, dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); } +static void async_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + struct async_cmd_info *cmdinfo = ctx; + cmdinfo->result = le32_to_cpup(&cqe->result); + cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; + queue_kthread_work(cmdinfo->worker, &cmdinfo->work); +} + /* * Called with local interrupts disabled and the q_lock held. May not sleep. */ @@ -693,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) return 0; - writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); nvmeq->cq_head = head; nvmeq->cq_phase = phase; @@ -804,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, return cmdinfo.status; } +static int nvme_submit_async_cmd(struct nvme_queue *nvmeq, + struct nvme_command *cmd, + struct async_cmd_info *cmdinfo, unsigned timeout) +{ + int cmdid; + + cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout); + if (cmdid < 0) + return cmdid; + cmdinfo->status = -EINTR; + cmd->common.command_id = cmdid; + nvme_submit_cmd(nvmeq, cmd); + return 0; +} + int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, u32 *result) { return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); } +static int nvme_submit_admin_cmd_async(struct nvme_dev *dev, + struct nvme_command *cmd, struct async_cmd_info *cmdinfo) +{ + return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo, + ADMIN_TIMEOUT); +} + static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { int status; @@ -920,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, } /** + * nvme_abort_cmd - Attempt aborting a command + * @cmdid: Command id of a timed out IO + * @queue: The queue with timed out IO + * + * Schedule controller reset if the command was already aborted once before and + * still hasn't been returned to the driver, or if this is the admin queue. + */ +static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) +{ + int a_cmdid; + struct nvme_command cmd; + struct nvme_dev *dev = nvmeq->dev; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + + if (!nvmeq->qid || info[cmdid].aborted) { + if (work_busy(&dev->reset_work)) + return; + list_del_init(&dev->node); + dev_warn(&dev->pci_dev->dev, + "I/O %d QID %d timeout, reset controller\n", cmdid, + nvmeq->qid); + PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev); + queue_work(nvme_workq, &dev->reset_work); + return; + } + + if (!dev->abort_limit) + return; + + a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion, + ADMIN_TIMEOUT); + if (a_cmdid < 0) + return; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abort.opcode = nvme_admin_abort_cmd; + cmd.abort.cid = cmdid; + cmd.abort.sqid = cpu_to_le16(nvmeq->qid); + cmd.abort.command_id = a_cmdid; + + --dev->abort_limit; + info[cmdid].aborted = 1; + info[cmdid].timeout = jiffies + ADMIN_TIMEOUT; + + dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid, + nvmeq->qid); + nvme_submit_cmd(dev->queues[0], &cmd); +} + +/** * nvme_cancel_ios - Cancel outstanding I/Os * @queue: The queue to cancel I/Os on * @timeout: True to only cancel I/Os which have timed out @@ -942,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) continue; if (info[cmdid].ctx == CMD_CTX_CANCELLED) continue; - dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); + if (timeout && nvmeq->dev->initialized) { + nvme_abort_cmd(cmdid, nvmeq); + continue; + } + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, + nvmeq->qid); ctx = cancel_cmdid(nvmeq, cmdid, &fn); fn(nvmeq->dev, ctx, &cqe); } @@ -964,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq) kfree(nvmeq); } -static void nvme_free_queues(struct nvme_dev *dev) +static void nvme_free_queues(struct nvme_dev *dev, int lowest) { int i; - for (i = dev->queue_count - 1; i >= 0; i--) { + for (i = dev->queue_count - 1; i >= lowest; i--) { nvme_free_queue(dev->queues[i]); dev->queue_count--; dev->queues[i] = NULL; } } -static void nvme_disable_queue(struct nvme_dev *dev, int qid) +/** + * nvme_suspend_queue - put queue into suspended state + * @nvmeq - queue to suspend + * + * Returns 1 if already suspended, 0 otherwise. + */ +static int nvme_suspend_queue(struct nvme_queue *nvmeq) { - struct nvme_queue *nvmeq = dev->queues[qid]; - int vector = dev->entry[nvmeq->cq_vector].vector; + int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; spin_lock_irq(&nvmeq->q_lock); if (nvmeq->q_suspended) { spin_unlock_irq(&nvmeq->q_lock); - return; + return 1; } nvmeq->q_suspended = 1; spin_unlock_irq(&nvmeq->q_lock); @@ -991,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) irq_set_affinity_hint(vector, NULL); free_irq(vector, nvmeq); - /* Don't tell the adapter to delete the admin queue */ - if (qid) { - adapter_delete_sq(dev, qid); - adapter_delete_cq(dev, qid); - } + return 0; +} +static void nvme_clear_queue(struct nvme_queue *nvmeq) +{ spin_lock_irq(&nvmeq->q_lock); nvme_process_cq(nvmeq); nvme_cancel_ios(nvmeq, false); spin_unlock_irq(&nvmeq->q_lock); } +static void nvme_disable_queue(struct nvme_dev *dev, int qid) +{ + struct nvme_queue *nvmeq = dev->queues[qid]; + + if (!nvmeq) + return; + if (nvme_suspend_queue(nvmeq)) + return; + + /* Don't tell the adapter to delete the admin queue. + * Don't tell a removed adapter to delete IO queues. */ + if (qid && readl(&dev->bar->csts) != -1) { + adapter_delete_sq(dev, qid); + adapter_delete_cq(dev, qid); + } + nvme_clear_queue(nvmeq); +} + static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth, int vector) { @@ -1025,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->q_dmadev = dmadev; nvmeq->dev = dev; + snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", + dev->instance, qid); spin_lock_init(&nvmeq->q_lock); nvmeq->cq_head = 0; nvmeq->cq_phase = 1; init_waitqueue_head(&nvmeq->sq_full); init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); bio_list_init(&nvmeq->sq_cong); - nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_depth = depth; nvmeq->cq_vector = vector; + nvmeq->qid = qid; nvmeq->q_suspended = 1; dev->queue_count++; @@ -1052,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, { if (use_threaded_interrupts) return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, - nvme_irq_check, nvme_irq, - IRQF_DISABLED | IRQF_SHARED, + nvme_irq_check, nvme_irq, IRQF_SHARED, name, nvmeq); return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, - IRQF_DISABLED | IRQF_SHARED, name, nvmeq); + IRQF_SHARED, name, nvmeq); } static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) @@ -1067,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) nvmeq->sq_tail = 0; nvmeq->cq_head = 0; nvmeq->cq_phase = 1; - nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; memset(nvmeq->cmdid_data, 0, extra); memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); nvme_cancel_ios(nvmeq, false); @@ -1087,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) if (result < 0) goto release_cq; - result = queue_request_irq(dev, nvmeq, "nvme"); + result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result < 0) goto release_sq; - spin_lock(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->q_lock); nvme_init_queue(nvmeq, qid); - spin_unlock(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->q_lock); return result; @@ -1205,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result) return result; - result = queue_request_irq(dev, nvmeq, "nvme admin"); + result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) return result; - spin_lock(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->q_lock); nvme_init_queue(nvmeq, 0); - spin_unlock(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->q_lock); return result; } @@ -1487,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, } } +#ifdef CONFIG_COMPAT +static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + + switch (cmd) { + case SG_IO: + return nvme_sg_io32(ns, arg); + } + return nvme_ioctl(bdev, mode, cmd, arg); +} +#else +#define nvme_compat_ioctl NULL +#endif + +static int nvme_open(struct block_device *bdev, fmode_t mode) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + struct nvme_dev *dev = ns->dev; + + kref_get(&dev->kref); + return 0; +} + +static void nvme_free_dev(struct kref *kref); + +static void nvme_release(struct gendisk *disk, fmode_t mode) +{ + struct nvme_ns *ns = disk->private_data; + struct nvme_dev *dev = ns->dev; + + kref_put(&dev->kref, nvme_free_dev); +} + static const struct block_device_operations nvme_fops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, + .compat_ioctl = nvme_compat_ioctl, + .open = nvme_open, + .release = nvme_release, }; static void nvme_resubmit_bios(struct nvme_queue *nvmeq) @@ -1514,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq) static int nvme_kthread(void *data) { - struct nvme_dev *dev; + struct nvme_dev *dev, *next; while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); spin_lock(&dev_list_lock); - list_for_each_entry(dev, &dev_list, node) { + list_for_each_entry_safe(dev, next, &dev_list, node) { int i; + if (readl(&dev->bar->csts) & NVME_CSTS_CFS && + dev->initialized) { + if (work_busy(&dev->reset_work)) + continue; + list_del_init(&dev->node); + dev_warn(&dev->pci_dev->dev, + "Failed status, reset controller\n"); + PREPARE_WORK(&dev->reset_work, + nvme_reset_failed_dev); + queue_work(nvme_workq, &dev->reset_work); + continue; + } for (i = 0; i < dev->queue_count; i++) { struct nvme_queue *nvmeq = dev->queues[i]; if (!nvmeq) @@ -1541,33 +1721,6 @@ static int nvme_kthread(void *data) return 0; } -static DEFINE_IDA(nvme_index_ida); - -static int nvme_get_ns_idx(void) -{ - int index, error; - - do { - if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) - return -1; - - spin_lock(&dev_list_lock); - error = ida_get_new(&nvme_index_ida, &index); - spin_unlock(&dev_list_lock); - } while (error == -EAGAIN); - - if (error) - index = -1; - return index; -} - -static void nvme_put_ns_idx(int index) -{ - spin_lock(&dev_list_lock); - ida_remove(&nvme_index_ida, index); - spin_unlock(&dev_list_lock); -} - static void nvme_config_discard(struct nvme_ns *ns) { u32 logical_block_size = queue_logical_block_size(ns->queue); @@ -1601,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, ns->dev = dev; ns->queue->queuedata = ns; - disk = alloc_disk(NVME_MINORS); + disk = alloc_disk(0); if (!disk) goto out_free_queue; ns->ns_id = nsid; @@ -1614,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); disk->major = nvme_major; - disk->minors = NVME_MINORS; - disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); + disk->first_minor = 0; disk->fops = &nvme_fops; disk->private_data = ns; disk->queue = ns->queue; disk->driverfs_dev = &dev->pci_dev->dev; + disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); @@ -1635,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, return NULL; } -static void nvme_ns_free(struct nvme_ns *ns) -{ - int index = ns->disk->first_minor / NVME_MINORS; - put_disk(ns->disk); - nvme_put_ns_idx(index); - blk_cleanup_queue(ns->queue); - kfree(ns); -} - static int set_queue_count(struct nvme_dev *dev, int count) { int status; @@ -1659,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count) static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) { - return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); + return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); } static int nvme_setup_io_queues(struct nvme_dev *dev) { + struct nvme_queue *adminq = dev->queues[0]; struct pci_dev *pdev = dev->pci_dev; int result, cpu, i, vecs, nr_io_queues, size, q_depth; @@ -1690,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) } /* Deregister the admin queue's interrupt */ - free_irq(dev->entry[0].vector, dev->queues[0]); + free_irq(dev->entry[0].vector, adminq); vecs = nr_io_queues; for (i = 0; i < vecs; i++) @@ -1728,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ nr_io_queues = vecs; - result = queue_request_irq(dev, dev->queues[0], "nvme admin"); + result = queue_request_irq(dev, adminq, adminq->irqname); if (result) { - dev->queues[0]->q_suspended = 1; + adminq->q_suspended = 1; goto free_queues; } @@ -1739,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) for (i = dev->queue_count - 1; i > nr_io_queues; i--) { struct nvme_queue *nvmeq = dev->queues[i]; - spin_lock(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->q_lock); nvme_cancel_ios(nvmeq, false); - spin_unlock(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->q_lock); nvme_free_queue(nvmeq); dev->queue_count--; @@ -1782,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) return 0; free_queues: - nvme_free_queues(dev); + nvme_free_queues(dev, 1); return result; } @@ -1794,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ static int nvme_dev_add(struct nvme_dev *dev) { + struct pci_dev *pdev = dev->pci_dev; int res; unsigned nn, i; struct nvme_ns *ns; @@ -1803,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev) dma_addr_t dma_addr; int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; - mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, - GFP_KERNEL); + mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); if (!mem) return -ENOMEM; @@ -1817,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev) ctrl = mem; nn = le32_to_cpup(&ctrl->nn); dev->oncs = le16_to_cpup(&ctrl->oncs); + dev->abort_limit = ctrl->acl + 1; memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); if (ctrl->mdts) dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); - if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && - (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) + if ((pdev->vendor == PCI_VENDOR_ID_INTEL) && + (pdev->device == 0x0953) && ctrl->vs[3]) dev->stripe_size = 1 << (ctrl->vs[3] + shift); id_ns = mem; @@ -1871,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) goto disable; - pci_set_drvdata(pdev, dev); dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); if (!dev->bar) goto disable; - - dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); + if (readl(&dev->bar->csts) == -1) { + result = -ENODEV; + goto unmap; + } + dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap)); dev->dbs = ((void __iomem *)dev->bar) + 4096; return 0; + unmap: + iounmap(dev->bar); + dev->bar = NULL; disable: pci_release_regions(pdev); disable_pci: @@ -1898,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev) if (dev->bar) { iounmap(dev->bar); dev->bar = NULL; + pci_release_regions(dev->pci_dev); } - pci_release_regions(dev->pci_dev); if (pci_is_enabled(dev->pci_dev)) pci_disable_device(dev->pci_dev); } +struct nvme_delq_ctx { + struct task_struct *waiter; + struct kthread_worker *worker; + atomic_t refcount; +}; + +static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) +{ + dq->waiter = current; + mb(); + + for (;;) { + set_current_state(TASK_KILLABLE); + if (!atomic_read(&dq->refcount)) + break; + if (!schedule_timeout(ADMIN_TIMEOUT) || + fatal_signal_pending(current)) { + set_current_state(TASK_RUNNING); + + nvme_disable_ctrl(dev, readq(&dev->bar->cap)); + nvme_disable_queue(dev, 0); + + send_sig(SIGKILL, dq->worker->task, 1); + flush_kthread_worker(dq->worker); + return; + } + } + set_current_state(TASK_RUNNING); +} + +static void nvme_put_dq(struct nvme_delq_ctx *dq) +{ + atomic_dec(&dq->refcount); + if (dq->waiter) + wake_up_process(dq->waiter); +} + +static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq) +{ + atomic_inc(&dq->refcount); + return dq; +} + +static void nvme_del_queue_end(struct nvme_queue *nvmeq) +{ + struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; + + nvme_clear_queue(nvmeq); + nvme_put_dq(dq); +} + +static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, + kthread_work_func_t fn) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.delete_queue.opcode = opcode; + c.delete_queue.qid = cpu_to_le16(nvmeq->qid); + + init_kthread_work(&nvmeq->cmdinfo.work, fn); + return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo); +} + +static void nvme_del_cq_work_handler(struct kthread_work *work) +{ + struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, + cmdinfo.work); + nvme_del_queue_end(nvmeq); +} + +static int nvme_delete_cq(struct nvme_queue *nvmeq) +{ + return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq, + nvme_del_cq_work_handler); +} + +static void nvme_del_sq_work_handler(struct kthread_work *work) +{ + struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, + cmdinfo.work); + int status = nvmeq->cmdinfo.status; + + if (!status) + status = nvme_delete_cq(nvmeq); + if (status) + nvme_del_queue_end(nvmeq); +} + +static int nvme_delete_sq(struct nvme_queue *nvmeq) +{ + return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq, + nvme_del_sq_work_handler); +} + +static void nvme_del_queue_start(struct kthread_work *work) +{ + struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, + cmdinfo.work); + allow_signal(SIGKILL); + if (nvme_delete_sq(nvmeq)) + nvme_del_queue_end(nvmeq); +} + +static void nvme_disable_io_queues(struct nvme_dev *dev) +{ + int i; + DEFINE_KTHREAD_WORKER_ONSTACK(worker); + struct nvme_delq_ctx dq; + struct task_struct *kworker_task = kthread_run(kthread_worker_fn, + &worker, "nvme%d", dev->instance); + + if (IS_ERR(kworker_task)) { + dev_err(&dev->pci_dev->dev, + "Failed to create queue del task\n"); + for (i = dev->queue_count - 1; i > 0; i--) + nvme_disable_queue(dev, i); + return; + } + + dq.waiter = NULL; + atomic_set(&dq.refcount, 0); + dq.worker = &worker; + for (i = dev->queue_count - 1; i > 0; i--) { + struct nvme_queue *nvmeq = dev->queues[i]; + + if (nvme_suspend_queue(nvmeq)) + continue; + nvmeq->cmdinfo.ctx = nvme_get_dq(&dq); + nvmeq->cmdinfo.worker = dq.worker; + init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start); + queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); + } + nvme_wait_dq(&dq, dev); + kthread_stop(kworker_task); +} + static void nvme_dev_shutdown(struct nvme_dev *dev) { int i; - for (i = dev->queue_count - 1; i >= 0; i--) - nvme_disable_queue(dev, i); + dev->initialized = 0; spin_lock(&dev_list_lock); list_del_init(&dev->node); spin_unlock(&dev_list_lock); - if (dev->bar) + if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { + for (i = dev->queue_count - 1; i >= 0; i--) { + struct nvme_queue *nvmeq = dev->queues[i]; + nvme_suspend_queue(nvmeq); + nvme_clear_queue(nvmeq); + } + } else { + nvme_disable_io_queues(dev); nvme_shutdown_ctrl(dev); + nvme_disable_queue(dev, 0); + } nvme_dev_unmap(dev); } static void nvme_dev_remove(struct nvme_dev *dev) { - struct nvme_ns *ns, *next; + struct nvme_ns *ns; - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { - list_del(&ns->list); - del_gendisk(ns->disk); - nvme_ns_free(ns); + list_for_each_entry(ns, &dev->namespaces, list) { + if (ns->disk->flags & GENHD_FL_UP) + del_gendisk(ns->disk); + if (!blk_queue_dying(ns->queue)) + blk_cleanup_queue(ns->queue); } } @@ -1985,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev) spin_unlock(&dev_list_lock); } +static void nvme_free_namespaces(struct nvme_dev *dev) +{ + struct nvme_ns *ns, *next; + + list_for_each_entry_safe(ns, next, &dev->namespaces, list) { + list_del(&ns->list); + put_disk(ns->disk); + kfree(ns); + } +} + static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); - nvme_dev_remove(dev); - nvme_dev_shutdown(dev); - nvme_free_queues(dev); - nvme_release_instance(dev); - nvme_release_prp_pools(dev); + + nvme_free_namespaces(dev); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -2056,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev) return result; disable: + nvme_disable_queue(dev, 0); spin_lock(&dev_list_lock); list_del_init(&dev->node); spin_unlock(&dev_list_lock); @@ -2064,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev) return result; } +static int nvme_remove_dead_ctrl(void *arg) +{ + struct nvme_dev *dev = (struct nvme_dev *)arg; + struct pci_dev *pdev = dev->pci_dev; + + if (pci_get_drvdata(pdev)) + pci_stop_and_remove_bus_device(pdev); + kref_put(&dev->kref, nvme_free_dev); + return 0; +} + +static void nvme_remove_disks(struct work_struct *ws) +{ + int i; + struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); + + nvme_dev_remove(dev); + spin_lock(&dev_list_lock); + for (i = dev->queue_count - 1; i > 0; i--) { + BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended); + nvme_free_queue(dev->queues[i]); + dev->queue_count--; + dev->queues[i] = NULL; + } + spin_unlock(&dev_list_lock); +} + +static int nvme_dev_resume(struct nvme_dev *dev) +{ + int ret; + + ret = nvme_dev_start(dev); + if (ret && ret != -EBUSY) + return ret; + if (ret == -EBUSY) { + spin_lock(&dev_list_lock); + PREPARE_WORK(&dev->reset_work, nvme_remove_disks); + queue_work(nvme_workq, &dev->reset_work); + spin_unlock(&dev_list_lock); + } + dev->initialized = 1; + return 0; +} + +static void nvme_dev_reset(struct nvme_dev *dev) +{ + nvme_dev_shutdown(dev); + if (nvme_dev_resume(dev)) { + dev_err(&dev->pci_dev->dev, "Device failed to resume\n"); + kref_get(&dev->kref); + if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", + dev->instance))) { + dev_err(&dev->pci_dev->dev, + "Failed to start controller remove task\n"); + kref_put(&dev->kref, nvme_free_dev); + } + } +} + +static void nvme_reset_failed_dev(struct work_struct *ws) +{ + struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); + nvme_dev_reset(dev); +} + static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int result = -ENOMEM; @@ -2082,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto free; INIT_LIST_HEAD(&dev->namespaces); + INIT_WORK(&dev->reset_work, nvme_reset_failed_dev); dev->pci_dev = pdev; - + pci_set_drvdata(pdev, dev); result = nvme_set_instance(dev); if (result) goto free; @@ -2099,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto release_pools; } + kref_init(&dev->kref); result = nvme_dev_add(dev); if (result) goto shutdown; @@ -2113,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto remove; - kref_init(&dev->kref); + dev->initialized = 1; return 0; remove: nvme_dev_remove(dev); + nvme_free_namespaces(dev); shutdown: nvme_dev_shutdown(dev); release_pools: - nvme_free_queues(dev); + nvme_free_queues(dev, 0); nvme_release_prp_pools(dev); release: nvme_release_instance(dev); @@ -2132,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return result; } +static void nvme_shutdown(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + nvme_dev_shutdown(dev); +} + static void nvme_remove(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); + + spin_lock(&dev_list_lock); + list_del_init(&dev->node); + spin_unlock(&dev_list_lock); + + pci_set_drvdata(pdev, NULL); + flush_work(&dev->reset_work); misc_deregister(&dev->miscdev); + nvme_dev_remove(dev); + nvme_dev_shutdown(dev); + nvme_free_queues(dev, 0); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); } @@ -2159,13 +2551,12 @@ static int nvme_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - int ret; - ret = nvme_dev_start(ndev); - /* XXX: should remove gendisks if resume fails */ - if (ret) - nvme_free_queues(ndev); - return ret; + if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) { + PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev); + queue_work(nvme_workq, &ndev->reset_work); + } + return 0; } static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); @@ -2192,6 +2583,7 @@ static struct pci_driver nvme_driver = { .id_table = nvme_id_table, .probe = nvme_probe, .remove = nvme_remove, + .shutdown = nvme_shutdown, .driver = { .pm = &nvme_dev_pm_ops, }, @@ -2206,9 +2598,14 @@ static int __init nvme_init(void) if (IS_ERR(nvme_thread)) return PTR_ERR(nvme_thread); + result = -ENOMEM; + nvme_workq = create_singlethread_workqueue("nvme"); + if (!nvme_workq) + goto kill_kthread; + result = register_blkdev(nvme_major, "nvme"); if (result < 0) - goto kill_kthread; + goto kill_workq; else if (result > 0) nvme_major = result; @@ -2219,6 +2616,8 @@ static int __init nvme_init(void) unregister_blkdev: unregister_blkdev(nvme_major, "nvme"); + kill_workq: + destroy_workqueue(nvme_workq); kill_kthread: kthread_stop(nvme_thread); return result; @@ -2228,6 +2627,7 @@ static void __exit nvme_exit(void) { pci_unregister_driver(&nvme_driver); unregister_blkdev(nvme_major, "nvme"); + destroy_workqueue(nvme_workq); kthread_stop(nvme_thread); } diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 4a4ff4e..4a0ceb6 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -25,6 +25,7 @@ #include <linux/bio.h> #include <linux/bitops.h> #include <linux/blkdev.h> +#include <linux/compat.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> @@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) return retcode; } +#ifdef CONFIG_COMPAT +typedef struct sg_io_hdr32 { + compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */ + compat_int_t dxfer_direction; /* [i] data transfer direction */ + unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */ + unsigned char mx_sb_len; /* [i] max length to write to sbp */ + unsigned short iovec_count; /* [i] 0 implies no scatter gather */ + compat_uint_t dxfer_len; /* [i] byte count of data transfer */ + compat_uint_t dxferp; /* [i], [*io] points to data transfer memory + or scatter gather list */ + compat_uptr_t cmdp; /* [i], [*i] points to command to perform */ + compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */ + compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */ + compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */ + compat_int_t pack_id; /* [i->o] unused internally (normally) */ + compat_uptr_t usr_ptr; /* [i->o] unused internally */ + unsigned char status; /* [o] scsi status */ + unsigned char masked_status; /* [o] shifted, masked scsi status */ + unsigned char msg_status; /* [o] messaging level data (optional) */ + unsigned char sb_len_wr; /* [o] byte count actually written to sbp */ + unsigned short host_status; /* [o] errors from host adapter */ + unsigned short driver_status; /* [o] errors from software driver */ + compat_int_t resid; /* [o] dxfer_len - actual_transferred */ + compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */ + compat_uint_t info; /* [o] auxiliary information */ +} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */ + +typedef struct sg_iovec32 { + compat_uint_t iov_base; + compat_uint_t iov_len; +} sg_iovec32_t; + +static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count) +{ + sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1); + sg_iovec32_t __user *iov32 = dxferp; + int i; + + for (i = 0; i < iovec_count; i++) { + u32 base, len; + + if (get_user(base, &iov32[i].iov_base) || + get_user(len, &iov32[i].iov_len) || + put_user(compat_ptr(base), &iov[i].iov_base) || + put_user(len, &iov[i].iov_len)) + return -EFAULT; + } + + if (put_user(iov, &sgio->dxferp)) + return -EFAULT; + return 0; +} + +int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg) +{ + sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg; + sg_io_hdr_t __user *sgio; + u16 iovec_count; + u32 data; + void __user *dxferp; + int err; + int interface_id; + + if (get_user(interface_id, &sgio32->interface_id)) + return -EFAULT; + if (interface_id != 'S') + return -EINVAL; + + if (get_user(iovec_count, &sgio32->iovec_count)) + return -EFAULT; + + { + void __user *top = compat_alloc_user_space(0); + void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) + + (iovec_count * sizeof(sg_iovec_t))); + if (new > top) + return -EINVAL; + + sgio = new; + } + + /* Ok, now construct. */ + if (copy_in_user(&sgio->interface_id, &sgio32->interface_id, + (2 * sizeof(int)) + + (2 * sizeof(unsigned char)) + + (1 * sizeof(unsigned short)) + + (1 * sizeof(unsigned int)))) + return -EFAULT; + + if (get_user(data, &sgio32->dxferp)) + return -EFAULT; + dxferp = compat_ptr(data); + if (iovec_count) { + if (sg_build_iovec(sgio, dxferp, iovec_count)) + return -EFAULT; + } else { + if (put_user(dxferp, &sgio->dxferp)) + return -EFAULT; + } + + { + unsigned char __user *cmdp; + unsigned char __user *sbp; + + if (get_user(data, &sgio32->cmdp)) + return -EFAULT; + cmdp = compat_ptr(data); + + if (get_user(data, &sgio32->sbp)) + return -EFAULT; + sbp = compat_ptr(data); + + if (put_user(cmdp, &sgio->cmdp) || + put_user(sbp, &sgio->sbp)) + return -EFAULT; + } + + if (copy_in_user(&sgio->timeout, &sgio32->timeout, + 3 * sizeof(int))) + return -EFAULT; + + if (get_user(data, &sgio32->usr_ptr)) + return -EFAULT; + if (put_user(compat_ptr(data), &sgio->usr_ptr)) + return -EFAULT; + + err = nvme_sg_io(ns, sgio); + if (err >= 0) { + void __user *datap; + + if (copy_in_user(&sgio32->pack_id, &sgio->pack_id, + sizeof(int)) || + get_user(datap, &sgio->usr_ptr) || + put_user((u32)(unsigned long)datap, + &sgio32->usr_ptr) || + copy_in_user(&sgio32->status, &sgio->status, + (4 * sizeof(unsigned char)) + + (2 * sizeof(unsigned short)) + + (3 * sizeof(int)))) + err = -EFAULT; + } + + return err; +} +#endif + int nvme_sg_get_version_num(int __user *ip) { return put_user(sg_version_num, ip); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index da18046..4b97b86 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || !rb_next(&persistent_gnt->node)) { - ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); + ret = gnttab_unmap_refs(unmap, NULL, pages, + segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; @@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work) pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); + ret = gnttab_unmap_refs(unmap, NULL, pages, + segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; @@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work) kfree(persistent_gnt); } if (segs_to_unmap > 0) { - ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); + ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); } @@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, GNTMAP_host_map, pages[i]->handle); pages[i]->handle = BLKBACK_INVALID_HANDLE; if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); + ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, + invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); invcount = 0; } } if (invcount) { - ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); + ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); } @@ -737,7 +740,7 @@ again: } if (segs_to_map) { - ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); BUG_ON(ret); } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index feea87c..6928d09 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, } else { /* Failback to copying a page */ struct page *page = alloc_page(GFP_KERNEL); - char *src = buf->ops->map(pipe, buf, 1); - char *dst; + char *src; if (!page) return -ENOMEM; - dst = kmap(page); offset = sd->pos & ~PAGE_MASK; @@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - memcpy(dst + offset, src + buf->offset, len); - - kunmap(page); + src = buf->ops->map(pipe, buf, 1); + memcpy(page_address(page) + offset, src + buf->offset, len); buf->ops->unmap(pipe, buf, src); sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7e257b2..79606f473 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -57,6 +57,7 @@ struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; + unsigned long long tsc; int freq; }; @@ -96,6 +97,7 @@ struct cpudata { u64 prev_aperf; u64 prev_mperf; + unsigned long long prev_tsc; int sample_ptr; struct sample samples[SAMPLE_COUNT]; }; @@ -548,30 +550,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; - core_pct = div64_u64(int_tofp(sample->aperf * 100), - sample->mperf); - sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); + u64 c0_pct; - sample->core_pct_busy = core_pct; + core_pct = div64_u64(sample->aperf * 100, sample->mperf); + + c0_pct = div64_u64(sample->mperf * 100, sample->tsc); + sample->freq = fp_toint( + mul_fp(int_tofp(cpu->pstate.max_pstate), + int_tofp(core_pct * 1000))); + + sample->core_pct_busy = mul_fp(int_tofp(core_pct), + div_fp(int_tofp(c0_pct + 1), int_tofp(100))); } static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; + unsigned long long tsc; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + tsc = native_read_tsc(); cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; cpu->samples[cpu->sample_ptr].aperf = aperf; cpu->samples[cpu->sample_ptr].mperf = mperf; + cpu->samples[cpu->sample_ptr].tsc = tsc; cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; + cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc; intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; + cpu->prev_tsc = tsc; } static inline void intel_pstate_set_sample_time(struct cpudata *cpu) diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 3f65dd6..a28640f 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, * then the BO is being moved and we should * store up the damage until later. */ - if (!drm_can_sleep()) + if (drm_can_sleep()) ret = ast_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 2fd4a92..32bbba0 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, * then the BO is being moved and we should * store up the damage until later. */ - if (!drm_can_sleep()) + if (drm_can_sleep()) ret = cirrus_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index f9adc27..13b7dd8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, * then the BO is being moved and we should * store up the damage until later. */ - if (!drm_can_sleep()) + if (drm_can_sleep()) ret = mgag200_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index b8583f2..9683747 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, (mga_vga_calculate_mode_bandwidth(mode, bpp) > (32700 * 1024))) { return MODE_BANDWIDTH; - } else if (mode->type == G200_EH && + } else if (mdev->type == G200_EH && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (37500 * 1024))) { return MODE_BANDWIDTH; - } else if (mode->type == G200_ER && + } else if (mdev->type == G200_ER && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (55000 * 1024))) { return MODE_BANDWIDTH; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 7b399dc..2812c7d1a 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_008C64_SQ_VSTMP_RING_SIZE: case R_0288C8_SQ_GS_VERT_ITEMSIZE: /* get value to populate the IB don't remove */ - tmp =radeon_get_ib_value(p, idx); - ib[idx] = 0; + /*tmp =radeon_get_ib_value(p, idx); + ib[idx] = 0;*/ + break; + case SQ_ESGS_RING_BASE: + case SQ_GSVS_RING_BASE: + case SQ_ESTMP_RING_BASE: + case SQ_GSTMP_RING_BASE: + case SQ_PSTMP_RING_BASE: + case SQ_VSTMP_RING_BASE: + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case SQ_CONFIG: track->sq_config = radeon_get_ib_value(p, idx); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index ec8c388..84a1bbb7 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -78,9 +78,10 @@ * 2.34.0 - Add CIK tiling mode array query * 2.35.0 - Add CIK macrotile mode array query * 2.36.0 - Fix CIK DCE tiling setup + * 2.37.0 - allow GS ring setup on r6xx/r7xx */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 36 +#define KMS_DRIVER_MINOR 37 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 20bfbda..ec0c682 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -18,6 +18,7 @@ r600 0x9400 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL 0x00028A40 VGT_GS_MODE 0x00028A6C VGT_GS_OUT_PRIM_TYPE +0x00028B38 VGT_GS_MAX_VERT_OUT 0x000088C8 VGT_GS_PER_ES 0x000088E8 VGT_GS_PER_VS 0x000088D4 VGT_GS_VERTEX_REUSE diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 3707985..53b51c4 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - if (!kref_get_unless_zero(&ref->kref)) { + if (kref_get_unless_zero(&ref->kref)) { rcu_read_unlock(); break; } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 9af9908..75f3190 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) pgoff_t i; struct page **page = ttm->pages; + if (ttm->page_flags & TTM_PAGE_FLAG_SG) + return; + for (i = 0; i < ttm->num_pages; ++i) { (*page)->mapping = NULL; (*page++)->index = 0; diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d95335c..b645647 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h @@ -2583,4 +2583,28 @@ typedef union { float f; } SVGA3dDevCapResult; +typedef enum { + SVGA3DCAPS_RECORD_UNKNOWN = 0, + SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, +} SVGA3dCapsRecordType; + +typedef +struct SVGA3dCapsRecordHeader { + uint32 length; + SVGA3dCapsRecordType type; +} +SVGA3dCapsRecordHeader; + +typedef +struct SVGA3dCapsRecord { + SVGA3dCapsRecordHeader header; + uint32 data[1]; +} +SVGA3dCapsRecord; + + +typedef uint32 SVGA3dCapPair[2]; + #endif /* _SVGA3D_REG_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 82c41da..9426c53 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -37,7 +37,7 @@ struct vmw_user_context { -typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); +typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); static void vmw_user_context_free(struct vmw_resource *res); static struct vmw_resource * @@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); static int vmw_gb_context_destroy(struct vmw_resource *res); -static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); -static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); -static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); +static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind); +static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); +static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); static uint64_t vmw_user_context_size; @@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) if (res->func->destroy == vmw_gb_context_destroy) { mutex_lock(&dev_priv->cmdbuf_mutex); + mutex_lock(&dev_priv->binding_mutex); + (void) vmw_context_binding_state_kill + (&container_of(res, struct vmw_user_context, res)->cbs); (void) vmw_gb_context_destroy(res); if (dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid) __vmw_execbuf_release_pinned_bo(dev_priv, NULL); + mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex); return; } @@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_state_kill(&uctx->cbs); + vmw_context_binding_state_scrub(&uctx->cbs); submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); @@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) SVGA3dCmdHeader header; SVGA3dCmdDestroyGBContext body; } *cmd; - struct vmw_user_context *uctx = - container_of(res, struct vmw_user_context, res); - - BUG_ON(!list_empty(&uctx->cbs.list)); if (likely(res->id == -1)) return 0; @@ -528,8 +530,9 @@ out_unlock: * vmw_context_scrub_shader - scrub a shader binding from a context. * * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. */ -static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) +static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { @@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = bi->i1.shader_type; - cmd->body.shid = SVGA3D_INVALID_ID; + cmd->body.shid = + cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_fifo_commit(dev_priv, sizeof(*cmd)); return 0; @@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) * from a context. * * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. */ -static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) +static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind) { struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { @@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = bi->i1.rt_type; - cmd->body.target.sid = SVGA3D_INVALID_ID; + cmd->body.target.sid = + cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); cmd->body.target.face = 0; cmd->body.target.mipmap = 0; vmw_fifo_commit(dev_priv, sizeof(*cmd)); @@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) * vmw_context_scrub_texture - scrub a texture binding from a context. * * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. * * TODO: Possibly complement this function with a function that takes * a list of texture bindings and combines them to a single command. */ -static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) +static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, + bool rebind) { struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { @@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) cmd->body.c.cid = bi->ctx->id; cmd->body.s1.stage = bi->i1.texture_stage; cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; - cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; + cmd->body.s1.value = + cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_fifo_commit(dev_priv, sizeof(*cmd)); return 0; @@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, vmw_context_binding_drop(loc); loc->bi = *bi; + loc->bi.scrubbed = false; list_add_tail(&loc->ctx_list, &cbs->list); INIT_LIST_HEAD(&loc->res_list); @@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, if (loc->bi.ctx != NULL) vmw_context_binding_drop(loc); - loc->bi = *bi; - list_add_tail(&loc->ctx_list, &cbs->list); - if (bi->res != NULL) + if (bi->res != NULL) { + loc->bi = *bi; + list_add_tail(&loc->ctx_list, &cbs->list); list_add_tail(&loc->res_list, &bi->res->binding_head); - else - INIT_LIST_HEAD(&loc->res_list); + } } /** @@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, */ static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) { - (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); + if (!cb->bi.scrubbed) { + (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); + cb->bi.scrubbed = true; + } vmw_context_binding_drop(cb); } @@ -768,6 +781,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) } /** + * vmw_context_binding_state_scrub - Scrub all bindings associated with a + * struct vmw_ctx_binding state structure. + * + * @cbs: Pointer to the context binding state tracker. + * + * Emits commands to scrub all bindings associated with the + * context binding state tracker. + */ +static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_binding *entry; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (!entry->bi.scrubbed) { + (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); + entry->bi.scrubbed = true; + } + } +} + +/** * vmw_context_binding_res_list_kill - Kill all bindings on a * resource binding list * @@ -785,6 +819,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head) } /** + * vmw_context_binding_res_list_scrub - Scrub all bindings on a + * resource binding list + * + * @head: list head of resource binding list + * + * Scrub all bindings associated with a specific resource. Typically + * called before the resource is evicted. + */ +void vmw_context_binding_res_list_scrub(struct list_head *head) +{ + struct vmw_ctx_binding *entry; + + list_for_each_entry(entry, head, res_list) { + if (!entry->bi.scrubbed) { + (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); + entry->bi.scrubbed = true; + } + } +} + +/** * vmw_context_binding_state_transfer - Commit staged binding info * * @ctx: Pointer to context to commit the staged binding info to. @@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx, list_for_each_entry_safe(entry, next, &from->list, ctx_list) vmw_context_binding_transfer(&uctx->cbs, &entry->bi); } + +/** + * vmw_context_rebind_all - Rebind all scrubbed bindings of a context + * + * @ctx: The context resource + * + * Walks through the context binding list and rebinds all scrubbed + * resources. + */ +int vmw_context_rebind_all(struct vmw_resource *ctx) +{ + struct vmw_ctx_binding *entry; + struct vmw_user_context *uctx = + container_of(ctx, struct vmw_user_context, res); + struct vmw_ctx_binding_state *cbs = &uctx->cbs; + int ret; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (likely(!entry->bi.scrubbed)) + continue; + + if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == + SVGA3D_INVALID_ID)) + continue; + + ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); + if (unlikely(ret != 0)) + return ret; + + entry->bi.scrubbed = false; + } + + return 0; +} + +/** + * vmw_context_binding_list - Return a list of context bindings + * + * @ctx: The context resource + * + * Returns the current list of bindings of the given context. Note that + * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. + */ +struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) +{ + return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9893328..3bdc0ad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev, drm_master_put(&vmw_fp->locked_master); } + vmw_compat_shader_man_destroy(vmw_fp->shman); ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); } @@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; + vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); + if (IS_ERR(vmw_fp->shman)) + goto out_no_shman; + file_priv->driver_priv = vmw_fp; dev_priv->bdev.dev_mapping = dev->dev_mapping; return 0; +out_no_shman: + ttm_object_file_release(&vmw_fp->tfile); out_no_tfile: kfree(vmw_fp); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 554e7fa..ecaa302 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -75,10 +75,14 @@ #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 +struct vmw_compat_shader_manager; + struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; struct list_head fence_events; + bool gb_aware; + struct vmw_compat_shader_manager *shman; }; struct vmw_dma_buffer { @@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo { struct vmw_resource *ctx; struct vmw_resource *res; enum vmw_ctx_binding_type bt; + bool scrubbed; union { SVGA3dShaderType shader_type; SVGA3dRenderTargetType rt_type; @@ -318,7 +323,7 @@ struct vmw_sw_context{ struct drm_open_hash res_ht; bool res_ht_initialized; bool kernel; /**< is the called made from the kernel */ - struct ttm_object_file *tfile; + struct vmw_fpriv *fp; struct list_head validate_nodes; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; uint32_t cur_reloc; @@ -336,6 +341,7 @@ struct vmw_sw_context{ bool needs_post_query_barrier; struct vmw_resource *error_resource; struct vmw_ctx_binding_state staged_bindings; + struct list_head staged_shaders; }; struct vmw_legacy_display; @@ -569,6 +575,8 @@ struct vmw_user_resource_conv; extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); +extern struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res); extern int vmw_resource_validate(struct vmw_resource *res); extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); @@ -957,6 +965,9 @@ extern void vmw_context_binding_state_transfer(struct vmw_resource *res, struct vmw_ctx_binding_state *cbs); extern void vmw_context_binding_res_list_kill(struct list_head *head); +extern void vmw_context_binding_res_list_scrub(struct list_head *head); +extern int vmw_context_rebind_all(struct vmw_resource *ctx); +extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); /* * Surface management - vmwgfx_surface.c @@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, + SVGA3dShaderType shader_type, + u32 *user_key); +extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, + struct list_head *list); +extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, + struct list_head *list); +extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, + u32 user_key, + SVGA3dShaderType shader_type, + struct list_head *list); +extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, + u32 user_key, const void *bytecode, + SVGA3dShaderType shader_type, + size_t size, + struct ttm_object_file *tfile, + struct list_head *list); +extern struct vmw_compat_shader_manager * +vmw_compat_shader_man_create(struct vmw_private *dev_priv); +extern void +vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); + /** * Inline helper functions diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7a5f1eb..269b85c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list, * persistent context binding tracker. */ if (unlikely(val->staged_bindings)) { - vmw_context_binding_state_transfer - (val->res, val->staged_bindings); + if (!backoff) { + vmw_context_binding_state_transfer + (val->res, val->staged_bindings); + } kfree(val->staged_bindings); val->staged_bindings = NULL; } @@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, } /** + * vmw_resource_context_res_add - Put resources previously bound to a context on + * the validation list + * + * @dev_priv: Pointer to a device private structure + * @sw_context: Pointer to a software context used for this command submission + * @ctx: Pointer to the context resource + * + * This function puts all resources that were previously bound to @ctx on + * the resource validation list. This is part of the context state reemission + */ +static int vmw_resource_context_res_add(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + struct vmw_resource *ctx) +{ + struct list_head *binding_list; + struct vmw_ctx_binding *entry; + int ret = 0; + struct vmw_resource *res; + + mutex_lock(&dev_priv->binding_mutex); + binding_list = vmw_context_binding_list(ctx); + + list_for_each_entry(entry, binding_list, ctx_list) { + res = vmw_resource_reference_unless_doomed(entry->bi.res); + if (unlikely(res == NULL)) + continue; + + ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + break; + } + + mutex_unlock(&dev_priv->binding_mutex); + return ret; +} + +/** * vmw_resource_relocation_add - Add a relocation to the relocation list * * @list: Pointer to head of relocation list. @@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb, { struct vmw_resource_relocation *rel; - list_for_each_entry(rel, list, head) - cb[rel->offset] = rel->res->id; + list_for_each_entry(rel, list, head) { + if (likely(rel->res != NULL)) + cb[rel->offset] = rel->res->id; + else + cb[rel->offset] = SVGA_3D_CMD_NOP; + } } static int vmw_cmd_invalid(struct vmw_private *dev_priv, @@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) } /** - * vmw_cmd_res_check - Check that a resource is present and if so, put it + * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it * on the resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @converter: User-space visisble type specific information. - * @id: Pointer to the location in the command buffer currently being + * @id: user-space resource id handle. + * @id_loc: Pointer to the location in the command buffer currently being * parsed from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated + * on exit. */ -static int vmw_cmd_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id, - struct vmw_resource_val_node **p_val) +static int +vmw_cmd_compat_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t id, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; @@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, struct vmw_resource_val_node *node; int ret; - if (*id == SVGA3D_INVALID_ID) { + if (id == SVGA3D_INVALID_ID) { if (p_val) *p_val = NULL; if (res_type == vmw_res_context) { @@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, * resource */ - if (likely(rcache->valid && *id == rcache->handle)) { + if (likely(rcache->valid && id == rcache->handle)) { const struct vmw_resource *res = rcache->res; rcache->node->first_usage = false; @@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, return vmw_resource_relocation_add (&sw_context->res_relocations, res, - id - sw_context->buf_start); + id_loc - sw_context->buf_start); } ret = vmw_user_resource_lookup_handle(dev_priv, - sw_context->tfile, - *id, + sw_context->fp->tfile, + id, converter, &res); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) *id); + (unsigned) id); dump_stack(); return ret; } rcache->valid = true; rcache->res = res; - rcache->handle = *id; + rcache->handle = id; ret = vmw_resource_relocation_add(&sw_context->res_relocations, res, - id - sw_context->buf_start); + id_loc - sw_context->buf_start); if (unlikely(ret != 0)) goto out_no_reloc; @@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, if (p_val) *p_val = node; - if (node->first_usage && res_type == vmw_res_context) { + if (dev_priv->has_mob && node->first_usage && + res_type == vmw_res_context) { + ret = vmw_resource_context_res_add(dev_priv, sw_context, res); + if (unlikely(ret != 0)) + goto out_no_reloc; node->staged_bindings = kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); if (node->staged_bindings == NULL) { @@ -481,6 +534,59 @@ out_no_reloc: } /** + * vmw_cmd_res_check - Check that a resource is present and if so, put it + * on the resource validate list unless it's already there. + * + * @dev_priv: Pointer to a device private structure. + * @sw_context: Pointer to the software context. + * @res_type: Resource type. + * @converter: User-space visisble type specific information. + * @id_loc: Pointer to the location in the command buffer currently being + * parsed from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated + * on exit. + */ +static int +vmw_cmd_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) +{ + return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, + converter, *id_loc, id_loc, p_val); +} + +/** + * vmw_rebind_contexts - Rebind all resources previously bound to + * referenced contexts. + * + * @sw_context: Pointer to the software context. + * + * Rebind context binding points that have been scrubbed because of eviction. + */ +static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) +{ + struct vmw_resource_val_node *val; + int ret; + + list_for_each_entry(val, &sw_context->resource_list, head) { + if (likely(!val->staged_bindings)) + continue; + + ret = vmw_context_rebind_all(val->res); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to rebind context.\n"); + return ret; + } + } + + return 0; +} + +/** * vmw_cmd_cid_check - Check a command header for valid context information. * * @dev_priv: Pointer to a device private structure. @@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); return -EINVAL; @@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); return -EINVAL; @@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); - vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, + header); out_no_surface: vmw_dmabuf_unreference(&vmw_bo); @@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, &cmd->body.sid, NULL); } + +/** + * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_shader_define(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_shader_define_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDefineShader body; + } *cmd; + int ret; + size_t size; + + cmd = container_of(header, struct vmw_shader_define_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(!dev_priv->has_mob)) + return 0; + + size = cmd->header.size - sizeof(cmd->body); + ret = vmw_compat_shader_add(sw_context->fp->shman, + cmd->body.shid, cmd + 1, + cmd->body.type, size, + sw_context->fp->tfile, + &sw_context->staged_shaders); + if (unlikely(ret != 0)) + return ret; + + return vmw_resource_relocation_add(&sw_context->res_relocations, + NULL, &cmd->header.id - + sw_context->buf_start); + + return 0; +} + +/** + * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_shader_destroy_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyShader body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_shader_destroy_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(!dev_priv->has_mob)) + return 0; + + ret = vmw_compat_shader_remove(sw_context->fp->shman, + cmd->body.shid, + cmd->body.type, + &sw_context->staged_shaders); + if (unlikely(ret != 0)) + return ret; + + return vmw_resource_relocation_add(&sw_context->res_relocations, + NULL, &cmd->header.id - + sw_context->buf_start); + + return 0; +} + /** * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER * command @@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, if (dev_priv->has_mob) { struct vmw_ctx_bindinfo bi; struct vmw_resource_val_node *res_node; - - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, - user_shader_converter, - &cmd->body.shid, &res_node); + u32 shid = cmd->body.shid; + + if (shid != SVGA3D_INVALID_ID) + (void) vmw_compat_shader_lookup(sw_context->fp->shman, + cmd->body.type, + &shid); + + ret = vmw_cmd_compat_res_check(dev_priv, sw_context, + vmw_res_shader, + user_shader_converter, + shid, + &cmd->body.shid, &res_node); if (unlikely(ret != 0)) return ret; @@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, } /** + * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_set_shader_const_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSetShaderConst body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_set_shader_const_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (dev_priv->has_mob) + header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; + + return 0; +} + +/** * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER * command * @@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, false, false, false), - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, - true, true, false), - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, - true, true, false), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, + true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, true, false, false), - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, - true, true, false), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, + true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, @@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, } else sw_context->kernel = true; - sw_context->tfile = vmw_fpriv(file_priv)->tfile; + sw_context->fp = vmw_fpriv(file_priv); sw_context->cur_reloc = 0; sw_context->cur_val_buf = 0; sw_context->fence_flags = 0; @@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_unlock; sw_context->res_ht_initialized = true; } + INIT_LIST_HEAD(&sw_context->staged_shaders); INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = vmw_resources_reserve(sw_context); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); if (unlikely(ret != 0)) @@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_err; } + if (dev_priv->has_mob) { + ret = vmw_rebind_contexts(sw_context); + if (unlikely(ret != 0)) + goto out_err; + } + cmd = vmw_fifo_reserve(dev_priv, command_size); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving fifo space for commands.\n"); @@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, } list_splice_init(&sw_context->resource_list, &resource_list); + vmw_compat_shaders_commit(sw_context->fp->shman, + &sw_context->staged_shaders); mutex_unlock(&dev_priv->cmdbuf_mutex); /* @@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv, out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); out_err: - vmw_resource_relocations_free(&sw_context->res_relocations); - vmw_free_relocations(sw_context); ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); +out_err_nores: vmw_resource_list_unreserve(&sw_context->resource_list, true); + vmw_resource_relocations_free(&sw_context->res_relocations); + vmw_free_relocations(sw_context); vmw_clear_validations(sw_context); if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) @@ -2301,6 +2551,8 @@ out_unlock: list_splice_init(&sw_context->resource_list, &resource_list); error_resource = sw_context->error_resource; sw_context->error_resource = NULL; + vmw_compat_shaders_revert(sw_context->fp->shman, + &sw_context->staged_shaders); mutex_unlock(&dev_priv->cmdbuf_mutex); /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 116c497..f9881f9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -29,12 +29,18 @@ #include <drm/vmwgfx_drm.h> #include "vmwgfx_kms.h" +struct svga_3d_compat_cap { + SVGA3dCapsRecordHeader header; + SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; +}; + int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: @@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, __le32 __iomem *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { + param->value = SVGA3D_HWVERSION_WS8_B1; + break; + } + param->value = ioread32(fifo_mem + ((fifo->capabilities & @@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, break; } case DRM_VMW_PARAM_MAX_SURF_MEMORY: - param->value = dev_priv->memory_size; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && + !vmw_fp->gb_aware) + param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; + else + param->value = dev_priv->memory_size; break; case DRM_VMW_PARAM_3D_CAPS_SIZE: - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) - param->value = SVGA3D_DEVCAP_MAX; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && + vmw_fp->gb_aware) + param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) + param->value = sizeof(struct svga_3d_compat_cap) + + sizeof(uint32_t); else param->value = (SVGA_FIFO_3D_CAPS_LAST - - SVGA_FIFO_3D_CAPS + 1); - param->value *= sizeof(uint32_t); + SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); break; case DRM_VMW_PARAM_MAX_MOB_MEMORY: + vmw_fp->gb_aware = true; param->value = dev_priv->max_mob_pages * PAGE_SIZE; break; default: @@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, return 0; } +static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, + size_t size) +{ + struct svga_3d_compat_cap *compat_cap = + (struct svga_3d_compat_cap *) bounce; + unsigned int i; + size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); + unsigned int max_size; + + if (size < pair_offset) + return -EINVAL; + + max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); + + if (max_size > SVGA3D_DEVCAP_MAX) + max_size = SVGA3D_DEVCAP_MAX; + + compat_cap->header.length = + (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); + compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; + + mutex_lock(&dev_priv->hw_mutex); + for (i = 0; i < max_size; ++i) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); + compat_cap->pairs[i][0] = i; + compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + } + mutex_unlock(&dev_priv->hw_mutex); + + return 0; +} + int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, void *bounce; int ret; bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); if (unlikely(arg->pad64 != 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } - if (gb_objects) - size = SVGA3D_DEVCAP_MAX; + if (gb_objects && vmw_fp->gb_aware) + size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (gb_objects) + size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); else - size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); - - size *= sizeof(uint32_t); + size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); if (arg->max_size < size) size = arg->max_size; - bounce = vmalloc(size); + bounce = vzalloc(size); if (unlikely(bounce == NULL)) { DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); return -ENOMEM; } - if (gb_objects) { - int i; + if (gb_objects && vmw_fp->gb_aware) { + int i, num; uint32_t *bounce32 = (uint32_t *) bounce; + num = size / sizeof(uint32_t); + if (num > SVGA3D_DEVCAP_MAX) + num = SVGA3D_DEVCAP_MAX; + mutex_lock(&dev_priv->hw_mutex); - for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { + for (i = 0; i < num; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } mutex_unlock(&dev_priv->hw_mutex); - + } else if (gb_objects) { + ret = vmw_fill_compat_cap(dev_priv, bounce, size); + if (unlikely(ret != 0)) + goto out_err; } else { - fifo_mem = dev_priv->mmio_virt; memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); } @@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, ret = copy_to_user(buffer, bounce, size); if (ret) ret = -EFAULT; +out_err: vfree(bounce); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 4910e7b..d4a5a19 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); + ret = -ENOMEM; goto out_no_fifo; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6fdd82d..2aa4bc6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) return res; } +struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res) +{ + return kref_get_unless_zero(&res->kref) ? res : NULL; +} /** * vmw_resource_release_id - release a resource id to the id manager. @@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref) vmw_dmabuf_unreference(&res->backup); } - if (likely(res->hw_destroy != NULL)) + if (likely(res->hw_destroy != NULL)) { res->hw_destroy(res); + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_res_list_kill(&res->binding_head); + mutex_unlock(&dev_priv->binding_mutex); + } id = res->id; if (res->res_free != NULL) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 1457ec4b..217d941 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -29,6 +29,8 @@ #include "vmwgfx_resource_priv.h" #include "ttm/ttm_placement.h" +#define VMW_COMPAT_SHADER_HT_ORDER 12 + struct vmw_shader { struct vmw_resource res; SVGA3dShaderType type; @@ -40,6 +42,50 @@ struct vmw_user_shader { struct vmw_shader shader; }; +/** + * enum vmw_compat_shader_state - Staging state for compat shaders + */ +enum vmw_compat_shader_state { + VMW_COMPAT_COMMITED, + VMW_COMPAT_ADD, + VMW_COMPAT_DEL +}; + +/** + * struct vmw_compat_shader - Metadata for compat shaders. + * + * @handle: The TTM handle of the guest backed shader. + * @tfile: The struct ttm_object_file the guest backed shader is registered + * with. + * @hash: Hash item for lookup. + * @head: List head for staging lists or the compat shader manager list. + * @state: Staging state. + * + * The structure is protected by the cmdbuf lock. + */ +struct vmw_compat_shader { + u32 handle; + struct ttm_object_file *tfile; + struct drm_hash_item hash; + struct list_head head; + enum vmw_compat_shader_state state; +}; + +/** + * struct vmw_compat_shader_manager - Compat shader manager. + * + * @shaders: Hash table containing staged and commited compat shaders + * @list: List of commited shaders. + * @dev_priv: Pointer to a device private structure. + * + * @shaders and @list are protected by the cmdbuf mutex for now. + */ +struct vmw_compat_shader_manager { + struct drm_open_hash shaders; + struct list_head list; + struct vmw_private *dev_priv; +}; + static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_shader_base_to_res(struct ttm_base_object *base); @@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) return 0; mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_kill(&res->binding_head); + vmw_context_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { @@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } +int vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type, + struct ttm_object_file *tfile, + u32 *handle) +{ + struct vmw_user_shader *ushader; + struct vmw_resource *res, *tmp; + int ret; + + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of shaders anyway. + */ + if (unlikely(vmw_user_shader_size == 0)) + vmw_user_shader_size = + ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_user_shader_size, + false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for shader " + "creation.\n"); + goto out; + } + + ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); + if (unlikely(ushader == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_shader_size); + ret = -ENOMEM; + goto out; + } + + res = &ushader->shader.res; + ushader->base.shareable = false; + ushader->base.tfile = NULL; + + /* + * From here on, the destructor takes over resource freeing. + */ + + ret = vmw_gb_shader_init(dev_priv, res, shader_size, + offset, shader_type, buffer, + vmw_user_shader_free); + if (unlikely(ret != 0)) + goto out; + + tmp = vmw_resource_reference(res); + ret = ttm_base_object_init(tfile, &ushader->base, false, + VMW_RES_SHADER, + &vmw_user_shader_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + if (handle) + *handle = ushader->base.hash.key; +out_err: + vmw_resource_unreference(&res); +out: + return ret; +} + + int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_shader *ushader; - struct vmw_resource *res; - struct vmw_resource *tmp; struct drm_vmw_shader_create_arg *arg = (struct drm_vmw_shader_create_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; @@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, goto out_bad_arg; } - /* - * Approximate idr memory usage with 128 bytes. It will be limited - * by maximum number_of shaders anyway. - */ + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + goto out_bad_arg; - if (unlikely(vmw_user_shader_size == 0)) - vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) - + 128; + ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, + shader_type, tfile, &arg->shader_handle); - ret = ttm_read_lock(&vmaster->lock, true); + ttm_read_unlock(&vmaster->lock); +out_bad_arg: + vmw_dmabuf_unreference(&buffer); + return ret; +} + +/** + * vmw_compat_shader_lookup - Look up a compat shader + * + * @man: Pointer to the compat shader manager. + * @shader_type: The shader type, that combined with the user_key identifies + * the shader. + * @user_key: On entry, this should be a pointer to the user_key. + * On successful exit, it will contain the guest-backed shader's TTM handle. + * + * Returns 0 on success. Non-zero on failure, in which case the value pointed + * to by @user_key is unmodified. + */ +int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, + SVGA3dShaderType shader_type, + u32 *user_key) +{ + struct drm_hash_item *hash; + int ret; + unsigned long key = *user_key | (shader_type << 24); + + ret = drm_ht_find_item(&man->shaders, key, &hash); if (unlikely(ret != 0)) return ret; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_shader_size, - false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader" - " creation.\n"); - goto out_unlock; + *user_key = drm_hash_entry(hash, struct vmw_compat_shader, + hash)->handle; + + return 0; +} + +/** + * vmw_compat_shader_free - Free a compat shader. + * + * @man: Pointer to the compat shader manager. + * @entry: Pointer to a struct vmw_compat_shader. + * + * Frees a struct vmw_compat_shder entry and drops its reference to the + * guest backed shader. + */ +static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, + struct vmw_compat_shader *entry) +{ + list_del(&entry->head); + WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); + WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, + TTM_REF_USAGE)); + kfree(entry); +} + +/** + * vmw_compat_shaders_commit - Commit a list of compat shader actions. + * + * @man: Pointer to the compat shader manager. + * @list: Caller's list of compat shader actions. + * + * This function commits a list of compat shader additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions has commited the fifo contents to the device. + */ +void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, + struct list_head *list) +{ + struct vmw_compat_shader *entry, *next; + + list_for_each_entry_safe(entry, next, list, head) { + list_del(&entry->head); + switch (entry->state) { + case VMW_COMPAT_ADD: + entry->state = VMW_COMPAT_COMMITED; + list_add_tail(&entry->head, &man->list); + break; + case VMW_COMPAT_DEL: + ttm_ref_object_base_unref(entry->tfile, entry->handle, + TTM_REF_USAGE); + kfree(entry); + break; + default: + BUG(); + break; + } } +} - ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); - if (unlikely(ushader == NULL)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); - ret = -ENOMEM; - goto out_unlock; +/** + * vmw_compat_shaders_revert - Revert a list of compat shader actions + * + * @man: Pointer to the compat shader manager. + * @list: Caller's list of compat shader actions. + * + * This function reverts a list of compat shader additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions failed for some reason, and the command stream was never + * submitted. + */ +void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, + struct list_head *list) +{ + struct vmw_compat_shader *entry, *next; + int ret; + + list_for_each_entry_safe(entry, next, list, head) { + switch (entry->state) { + case VMW_COMPAT_ADD: + vmw_compat_shader_free(man, entry); + break; + case VMW_COMPAT_DEL: + ret = drm_ht_insert_item(&man->shaders, &entry->hash); + list_del(&entry->head); + list_add_tail(&entry->head, &man->list); + entry->state = VMW_COMPAT_COMMITED; + break; + default: + BUG(); + break; + } } +} - res = &ushader->shader.res; - ushader->base.shareable = false; - ushader->base.tfile = NULL; +/** + * vmw_compat_shader_remove - Stage a compat shader for removal. + * + * @man: Pointer to the compat shader manager + * @user_key: The key that is used to identify the shader. The key is + * unique to the shader type. + * @shader_type: Shader type. + * @list: Caller's list of staged shader actions. + * + * This function stages a compat shader for removal and removes the key from + * the shader manager's hash table. If the shader was previously only staged + * for addition it is completely removed (But the execbuf code may keep a + * reference if it was bound to a context between addition and removal). If + * it was previously commited to the manager, it is staged for removal. + */ +int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list) +{ + struct vmw_compat_shader *entry; + struct drm_hash_item *hash; + int ret; - /* - * From here on, the destructor takes over resource freeing. - */ + ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), + &hash); + if (likely(ret != 0)) + return -EINVAL; - ret = vmw_gb_shader_init(dev_priv, res, arg->size, - arg->offset, shader_type, buffer, - vmw_user_shader_free); + entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); + + switch (entry->state) { + case VMW_COMPAT_ADD: + vmw_compat_shader_free(man, entry); + break; + case VMW_COMPAT_COMMITED: + (void) drm_ht_remove_item(&man->shaders, &entry->hash); + list_del(&entry->head); + entry->state = VMW_COMPAT_DEL; + list_add_tail(&entry->head, list); + break; + default: + BUG(); + break; + } + + return 0; +} + +/** + * vmw_compat_shader_add - Create a compat shader and add the + * key to the manager + * + * @man: Pointer to the compat shader manager + * @user_key: The key that is used to identify the shader. The key is + * unique to the shader type. + * @bytecode: Pointer to the bytecode of the shader. + * @shader_type: Shader type. + * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is + * to be created with. + * @list: Caller's list of staged shader actions. + * + * Note that only the key is added to the shader manager's hash table. + * The shader is not yet added to the shader manager's list of shaders. + */ +int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, + u32 user_key, const void *bytecode, + SVGA3dShaderType shader_type, + size_t size, + struct ttm_object_file *tfile, + struct list_head *list) +{ + struct vmw_dma_buffer *buf; + struct ttm_bo_kmap_obj map; + bool is_iomem; + struct vmw_compat_shader *compat; + u32 handle; + int ret; + + if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) + return -EINVAL; + + /* Allocate and pin a DMA buffer */ + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (unlikely(buf == NULL)) + return -ENOMEM; + + ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, + true, vmw_dmabuf_bo_free); if (unlikely(ret != 0)) - goto out_unlock; + goto out; - tmp = vmw_resource_reference(res); - ret = ttm_base_object_init(tfile, &ushader->base, false, - VMW_RES_SHADER, - &vmw_user_shader_base_release, NULL); + ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); + if (unlikely(ret != 0)) + goto no_reserve; + /* Map and copy shader bytecode. */ + ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, + &map); if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - goto out_err; + ttm_bo_unreserve(&buf->base); + goto no_reserve; } - arg->shader_handle = ushader->base.hash.key; -out_err: - vmw_resource_unreference(&res); -out_unlock: - ttm_read_unlock(&vmaster->lock); -out_bad_arg: - vmw_dmabuf_unreference(&buffer); + memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); + WARN_ON(is_iomem); + + ttm_bo_kunmap(&map); + ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); + WARN_ON(ret != 0); + ttm_bo_unreserve(&buf->base); + + /* Create a guest-backed shader container backed by the dma buffer */ + ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, + tfile, &handle); + vmw_dmabuf_unreference(&buf); + if (unlikely(ret != 0)) + goto no_reserve; + /* + * Create a compat shader structure and stage it for insertion + * in the manager + */ + compat = kzalloc(sizeof(*compat), GFP_KERNEL); + if (compat == NULL) + goto no_compat; + + compat->hash.key = user_key | (shader_type << 24); + ret = drm_ht_insert_item(&man->shaders, &compat->hash); + if (unlikely(ret != 0)) + goto out_invalid_key; + + compat->state = VMW_COMPAT_ADD; + compat->handle = handle; + compat->tfile = tfile; + list_add_tail(&compat->head, list); + return 0; + +out_invalid_key: + kfree(compat); +no_compat: + ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); +no_reserve: +out: return ret; +} + +/** + * vmw_compat_shader_man_create - Create a compat shader manager + * + * @dev_priv: Pointer to a device private structure. + * + * Typically done at file open time. If successful returns a pointer to a + * compat shader manager. Otherwise returns an error pointer. + */ +struct vmw_compat_shader_manager * +vmw_compat_shader_man_create(struct vmw_private *dev_priv) +{ + struct vmw_compat_shader_manager *man; + int ret; + + man = kzalloc(sizeof(*man), GFP_KERNEL); + + man->dev_priv = dev_priv; + INIT_LIST_HEAD(&man->list); + ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); + if (ret == 0) + return man; + + kfree(man); + return ERR_PTR(ret); +} + +/** + * vmw_compat_shader_man_destroy - Destroy a compat shader manager + * + * @man: Pointer to the shader manager to destroy. + * + * Typically done at file close time. + */ +void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) +{ + struct vmw_compat_shader *entry, *next; + + mutex_lock(&man->dev_priv->cmdbuf_mutex); + list_for_each_entry_safe(entry, next, &man->list, head) + vmw_compat_shader_free(man, entry); + mutex_unlock(&man->dev_priv->cmdbuf_mutex); + kfree(man); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 979da1c..82468d9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, rep->size_addr; if (user_sizes) - ret = copy_to_user(user_sizes, srf->sizes, - srf->num_sizes * sizeof(*srf->sizes)); + ret = copy_to_user(user_sizes, &srf->base_size, + sizeof(srf->base_size)); if (unlikely(ret != 0)) { DRM_ERROR("copy_to_user failed %p %u\n", user_sizes, srf->num_sizes); @@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) return 0; mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_kill(&res->binding_head); + vmw_context_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c index 029ecab..73b3865 100644 --- a/drivers/hwmon/da9055-hwmon.c +++ b/drivers/hwmon/da9055-hwmon.c @@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev) if (hwmon_irq < 0) return hwmon_irq; - hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq); - if (hwmon_irq < 0) - return hwmon_irq; - ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq, NULL, da9055_auxadc_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 3cbf66e..291d11f 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -90,7 +90,8 @@ struct pmbus_data { u32 flags; /* from platform data */ - int exponent; /* linear mode: exponent for output voltages */ + int exponent[PMBUS_PAGES]; + /* linear mode: exponent for output voltages */ const struct pmbus_driver_info *info; @@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data, long val; if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */ - exponent = data->exponent; + exponent = data->exponent[sensor->page]; mantissa = (u16) sensor->data; } else { /* LINEAR11 */ exponent = ((s16)sensor->data) >> 11; @@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) #define MIN_MANTISSA (511 * 1000) static u16 pmbus_data2reg_linear(struct pmbus_data *data, - enum pmbus_sensor_classes class, long val) + struct pmbus_sensor *sensor, long val) { s16 exponent = 0, mantissa; bool negative = false; @@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, if (val == 0) return 0; - if (class == PSC_VOLTAGE_OUT) { + if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 does not support negative voltages */ if (val < 0) return 0; @@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, * For a static exponents, we don't have a choice * but to adjust the value to it. */ - if (data->exponent < 0) - val <<= -data->exponent; + if (data->exponent[sensor->page] < 0) + val <<= -data->exponent[sensor->page]; else - val >>= data->exponent; + val >>= data->exponent[sensor->page]; val = DIV_ROUND_CLOSEST(val, 1000); return val & 0xffff; } @@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, } /* Power is in uW. Convert to mW before converting. */ - if (class == PSC_POWER) + if (sensor->class == PSC_POWER) val = DIV_ROUND_CLOSEST(val, 1000L); /* * For simplicity, convert fan data to milli-units * before calculating the exponent. */ - if (class == PSC_FAN) + if (sensor->class == PSC_FAN) val = val * 1000; /* Reduce large mantissa until it fits into 10 bit */ @@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, } static u16 pmbus_data2reg_direct(struct pmbus_data *data, - enum pmbus_sensor_classes class, long val) + struct pmbus_sensor *sensor, long val) { long m, b, R; - m = data->info->m[class]; - b = data->info->b[class]; - R = data->info->R[class]; + m = data->info->m[sensor->class]; + b = data->info->b[sensor->class]; + R = data->info->R[sensor->class]; /* Power is in uW. Adjust R and b. */ - if (class == PSC_POWER) { + if (sensor->class == PSC_POWER) { R -= 3; b *= 1000; } /* Calculate Y = (m * X + b) * 10^R */ - if (class != PSC_FAN) { + if (sensor->class != PSC_FAN) { R -= 3; /* Adjust R and b for data in milli-units */ b *= 1000; } @@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data, } static u16 pmbus_data2reg_vid(struct pmbus_data *data, - enum pmbus_sensor_classes class, long val) + struct pmbus_sensor *sensor, long val) { val = clamp_val(val, 500, 1600); @@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data, } static u16 pmbus_data2reg(struct pmbus_data *data, - enum pmbus_sensor_classes class, long val) + struct pmbus_sensor *sensor, long val) { u16 regval; - switch (data->info->format[class]) { + switch (data->info->format[sensor->class]) { case direct: - regval = pmbus_data2reg_direct(data, class, val); + regval = pmbus_data2reg_direct(data, sensor, val); break; case vid: - regval = pmbus_data2reg_vid(data, class, val); + regval = pmbus_data2reg_vid(data, sensor, val); break; case linear: default: - regval = pmbus_data2reg_linear(data, class, val); + regval = pmbus_data2reg_linear(data, sensor, val); break; } return regval; @@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev, return -EINVAL; mutex_lock(&data->update_lock); - regval = pmbus_data2reg(data, sensor->class, val); + regval = pmbus_data2reg(data, sensor, val); ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval); if (ret < 0) rv = ret; @@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client, * This function is called for all chips. */ static int pmbus_identify_common(struct i2c_client *client, - struct pmbus_data *data) + struct pmbus_data *data, int page) { int vout_mode = -1; - if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) - vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); + if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE)) + vout_mode = _pmbus_read_byte_data(client, page, + PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { /* * Not all chips support the VOUT_MODE command, @@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client, if (data->info->format[PSC_VOLTAGE_OUT] != linear) return -ENODEV; - data->exponent = ((s8)(vout_mode << 3)) >> 3; + data->exponent[page] = ((s8)(vout_mode << 3)) >> 3; break; case 1: /* VID mode */ if (data->info->format[PSC_VOLTAGE_OUT] != vid) @@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client, } } - pmbus_clear_fault_page(client, 0); + pmbus_clear_fault_page(client, page); return 0; } @@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, struct pmbus_driver_info *info) { struct device *dev = &client->dev; - int ret; + int page, ret; /* * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try @@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, return -ENODEV; } - ret = pmbus_identify_common(client, data); - if (ret < 0) { - dev_err(dev, "Failed to identify chip capabilities\n"); - return ret; + for (page = 0; page < info->pages; page++) { + ret = pmbus_identify_common(client, data, page); + if (ret < 0) { + dev_err(dev, "Failed to identify chip capabilities\n"); + return ret; + } } return 0; } diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 86b484c..5194afb 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o +obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 9300bc3..5409564 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs) ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) & PCI_MSI_DOORBELL_MASK; - writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + + writel(~msimask, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); for (msinr = PCI_MSI_DOORBELL_START; @@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs) ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) & IPI_DOORBELL_MASK; - writel(~IPI_DOORBELL_MASK, per_cpu_int_base + + writel(~ipimask, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); /* Handle all pending doorbells */ diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c new file mode 100644 index 0000000..8ed04c4 --- /dev/null +++ b/drivers/irqchip/irq-zevio.c @@ -0,0 +1,127 @@ +/* + * linux/drivers/irqchip/irq-zevio.c + * + * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <asm/mach/irq.h> +#include <asm/exception.h> + +#include "irqchip.h" + +#define IO_STATUS 0x000 +#define IO_RAW_STATUS 0x004 +#define IO_ENABLE 0x008 +#define IO_DISABLE 0x00C +#define IO_CURRENT 0x020 +#define IO_RESET 0x028 +#define IO_MAX_PRIOTY 0x02C + +#define IO_IRQ_BASE 0x000 +#define IO_FIQ_BASE 0x100 + +#define IO_INVERT_SEL 0x200 +#define IO_STICKY_SEL 0x204 +#define IO_PRIORITY_SEL 0x300 + +#define MAX_INTRS 32 +#define FIQ_START MAX_INTRS + +static struct irq_domain *zevio_irq_domain; +static void __iomem *zevio_irq_io; + +static void zevio_irq_ack(struct irq_data *irqd) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd); + struct irq_chip_regs *regs = + &container_of(irqd->chip, struct irq_chip_type, chip)->regs; + + readl(gc->reg_base + regs->ack); +} + +static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs) +{ + int irqnr; + + while (readl(zevio_irq_io + IO_STATUS)) { + irqnr = readl(zevio_irq_io + IO_CURRENT); + irqnr = irq_find_mapping(zevio_irq_domain, irqnr); + handle_IRQ(irqnr, regs); + }; +} + +static void __init zevio_init_irq_base(void __iomem *base) +{ + /* Disable all interrupts */ + writel(~0, base + IO_DISABLE); + + /* Accept interrupts of all priorities */ + writel(0xF, base + IO_MAX_PRIOTY); + + /* Reset existing interrupts */ + readl(base + IO_RESET); +} + +static int __init zevio_of_init(struct device_node *node, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct irq_chip_generic *gc; + int ret; + + if (WARN_ON(zevio_irq_io || zevio_irq_domain)) + return -EBUSY; + + zevio_irq_io = of_iomap(node, 0); + BUG_ON(!zevio_irq_io); + + /* Do not invert interrupt status bits */ + writel(~0, zevio_irq_io + IO_INVERT_SEL); + + /* Disable sticky interrupts */ + writel(0, zevio_irq_io + IO_STICKY_SEL); + + /* We don't use IRQ priorities. Set each IRQ to highest priority. */ + memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32)); + + /* Init IRQ and FIQ */ + zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE); + zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE); + + zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS, + &irq_generic_chip_ops, NULL); + BUG_ON(!zevio_irq_domain); + + ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1, + "zevio_intc", handle_level_irq, + clr, 0, IRQ_GC_INIT_MASK_CACHE); + BUG_ON(ret); + + gc = irq_get_domain_generic_chip(zevio_irq_domain, 0); + gc->reg_base = zevio_irq_io; + gc->chip_types[0].chip.irq_ack = zevio_irq_ack; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; + gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; + gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE; + gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE; + gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE; + gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET; + + set_handle_irq(zevio_handle_irq); + + pr_info("TI-NSPIRE classic IRQ controller\n"); + return 0; +} + +IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init); diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c index 68f768a..a6c3c9e 100644 --- a/drivers/media/dvb-frontends/cx24117.c +++ b/drivers/media/dvb-frontends/cx24117.c @@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config, switch (demod) { case 0: - dev_err(&state->priv->i2c->dev, + dev_err(&i2c->dev, "%s: Error attaching frontend %d\n", KBUILD_MODNAME, demod); goto error1; @@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config, state->demod = demod - 1; state->priv = priv; - /* test i2c bus for ack */ - if (demod == 0) { - if (cx24117_readreg(state, 0x00) < 0) - goto error3; - } - dev_info(&state->priv->i2c->dev, "%s: Attaching frontend %d\n", KBUILD_MODNAME, state->demod); @@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config, state->frontend.demodulator_priv = state; return &state->frontend; -error3: - kfree(state); error2: cx24117_release_priv(priv); error1: diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c index 4bf0575..8a8e1ec 100644 --- a/drivers/media/dvb-frontends/nxt200x.c +++ b/drivers/media/dvb-frontends/nxt200x.c @@ -2,7 +2,7 @@ * Support for NXT2002 and NXT2004 - VSB/QAM * * Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com> - * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net> + * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org> * based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net> * and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com> * diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 1effc21..9bbd665 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -2554,7 +2554,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd) sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force | (pdata->sdp_free_run_cbar_en << 1) | (pdata->sdp_free_run_man_col_en << 2) | - (pdata->sdp_free_run_force << 3)); + (pdata->sdp_free_run_auto << 3)); /* TODO from platform data */ cp_write(sd, 0x69, 0x14); /* Enable CP CSC */ diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c index 4b83811..77e10e0 100644 --- a/drivers/media/i2c/s5k5baf.c +++ b/drivers/media/i2c/s5k5baf.c @@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr, u16 count, const u16 *seq) { struct i2c_client *c = v4l2_get_subdevdata(&state->sd); - __be16 buf[count + 1]; - int ret, n; + __be16 buf[65]; s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr); if (state->error) return; + v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count, + min(2 * count, 64), seq); + buf[0] = __constant_cpu_to_be16(REG_CMD_BUF); - for (n = 1; n <= count; ++n) - buf[n] = cpu_to_be16(*seq++); - n *= 2; - ret = i2c_master_send(c, (char *)buf, n); - v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count, - min(2 * count, 64), seq - count); + while (count > 0) { + int n = min_t(int, count, ARRAY_SIZE(buf) - 1); + int ret, i; - if (ret != n) { - v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret); - state->error = ret; + for (i = 1; i <= n; ++i) + buf[i] = cpu_to_be16(*seq++); + + i *= 2; + ret = i2c_master_send(c, (char *)buf, i); + if (ret != i) { + v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret); + state->error = ret; + break; + } + + count -= n; } } diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c index d85cb0a..6662b49 100644 --- a/drivers/media/pci/bt8xx/bttv-cards.c +++ b/drivers/media/pci/bt8xx/bttv-cards.c @@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = { }, /* ---- card 0x87---------------------------------- */ [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = { - /* Michael Krufky <mkrufky@m1k.net> */ + /* Michael Krufky <mkrufky@linuxtv.org> */ .name = "DViCO FusionHDTV 5 Lite", .tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */ .tuner_addr = ADDR_UNSET, diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c index 922e823..3f364b7 100644 --- a/drivers/media/pci/bt8xx/bttv-gpio.c +++ b/drivers/media/pci/bt8xx/bttv-gpio.c @@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name) err = device_register(&sub->dev); if (0 != err) { - kfree(sub); + put_device(&sub->dev); return err; } pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev)); diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c index d45e7f6..c9b2350 100644 --- a/drivers/media/pci/saa7134/saa7134-cards.c +++ b/drivers/media/pci/saa7134/saa7134-cards.c @@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = { }}, }, [SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = { - /* Michael Krufky <mkrufky@m1k.net> + /* Michael Krufky <mkrufky@linuxtv.org> * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder * AFAIK, there is no analog demod, thus, * no support for analog television. diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index a7dfd07..da2fc86 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev) return 0; err_gclk: - clk_disable(fimc->clock[CLK_GATE]); + if (!pm_runtime_enabled(dev)) + clk_disable(fimc->clock[CLK_GATE]); err_sd: fimc_unregister_capture_subdev(fimc); err_sclk: @@ -1036,6 +1037,7 @@ err_sclk: return ret; } +#ifdef CONFIG_PM_RUNTIME static int fimc_runtime_resume(struct device *dev) { struct fimc_dev *fimc = dev_get_drvdata(dev); @@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev) dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); return ret; } +#endif #ifdef CONFIG_PM_SLEEP static int fimc_resume(struct device *dev) diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index 1234734..779ec3c 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev) if (!pm_runtime_enabled(dev)) { ret = clk_enable(fimc->clock); if (ret < 0) - goto err_clk_put; + goto err_sd; } fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev); @@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev) return 0; err_clk_dis: - clk_disable(fimc->clock); + if (!pm_runtime_enabled(dev)) + clk_disable(fimc->clock); err_sd: fimc_lite_unregister_capture_subdev(fimc); err_clk_put: @@ -1587,6 +1588,7 @@ err_clk_put: return ret; } +#ifdef CONFIG_PM_RUNTIME static int fimc_lite_runtime_resume(struct device *dev) { struct fimc_lite *fimc = dev_get_drvdata(dev); @@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev) clk_disable(fimc->clock); return 0; } +#endif #ifdef CONFIG_PM_SLEEP static int fimc_lite_resume(struct device *dev) diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index a1c78c8..7d68d0b 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = { { .name = "YUV 4:2:0 planar, Y/CbCr", .fourcc = V4L2_PIX_FMT_NV12, - .depth = 16, + .depth = 12, .colplanes = 2, .h_align = 1, .v_align = 1, @@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = { { .name = "YUV 4:2:0 planar, Y/CbCr", .fourcc = V4L2_PIX_FMT_NV12, - .depth = 16, - .colplanes = 4, + .depth = 12, + .colplanes = 2, .h_align = 4, - .v_align = 1, + .v_align = 4, .flags = SJPEG_FMT_FLAG_ENC_OUTPUT | SJPEG_FMT_FLAG_DEC_CAPTURE | SJPEG_FMT_FLAG_S5P | diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 8f9b2cea..8ede8ea 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = { &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, + { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900, + &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, af9035_id_table); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c index d83df4b..0a98d04 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c @@ -1,7 +1,7 @@ /* * mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state, EXPORT_SYMBOL_GPL(mxl111sf_demod_attach); MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver"); -MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); +MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h index 3f3f8bf..2d4530f 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h @@ -1,7 +1,7 @@ /* * mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c index e4121cb..a619410 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c @@ -1,7 +1,7 @@ /* * mxl111sf-gpio.c - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h index 0220f54..b85a577 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h @@ -1,7 +1,7 @@ /* * mxl111sf-gpio.h - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c index 3443455..a101d06 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c @@ -1,7 +1,7 @@ /* * mxl111sf-i2c.c - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h index a57a45f..4657621 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h @@ -1,7 +1,7 @@ /* * mxl111sf-i2c.h - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c index b741b3a..f6b3480 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c @@ -1,7 +1,7 @@ /* * mxl111sf-phy.c - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h index f075607..0643738 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h @@ -1,7 +1,7 @@ /* * mxl111sf-phy.h - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h index 17831b0..89bf115 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h @@ -1,7 +1,7 @@ /* * mxl111sf-reg.h - driver for the MaxLinear MXL111SF * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c index 879c529..a8d2c70 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c @@ -1,7 +1,7 @@ /* * mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach); MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver"); -MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); +MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h index 90f583e..2046db2 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h @@ -1,7 +1,7 @@ /* * mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner * - * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> + * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, #else static inline struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, - struct mxl111sf_state *mxl_state + struct mxl111sf_state *mxl_state, struct mxl111sf_tuner_config *cfg) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index 08240e4..c7304fa 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com) + * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data) ret = -EINVAL; } - pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data); + pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]); fail: return ret; } @@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = { module_usb_driver(mxl111sf_usb_driver); -MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); +MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h index 9816de8..8516c01 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com) + * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 2f0c89c..c563896 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev) hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n", print_buf); - kfree(print_buf); #endif msleep(100); @@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev) retval = ret != 8; unlock: mutex_unlock(&dev->usbc_mutex); +#ifdef HDPVR_DEBUG + kfree(print_buf); +#endif return retval; } diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index ee52b9f4..f7902fe 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height, aspect.denominator = 9; } image_width = ((image_height * aspect.numerator) / aspect.denominator); + image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1); /* Horizontal */ if (default_gtf) diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index 65411adc..7e6b209 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev, static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; - struct videobuf_queue *q = map->q; - dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", + dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); map->count++; - videobuf_queue_unlock(q); } static void videobuf_vm_close(struct vm_area_struct *vma) @@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma) dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); - if (!--map->count) { + map->count--; + if (0 == map->count) { struct videobuf_dma_contig_memory *mem; dev_dbg(q->dev, "munmap %p q=%p\n", map, q); + videobuf_queue_lock(q); /* We need first to cancel streams, before unmapping */ if (q->streaming) @@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma) kfree(map); + videobuf_queue_unlock(q); } - videobuf_queue_unlock(q); } static const struct vm_operations_struct videobuf_vm_ops = { diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 9db674c..828e7c1 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free); static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; - struct videobuf_queue *q = map->q; dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); map->count++; - videobuf_queue_unlock(q); } static void videobuf_vm_close(struct vm_area_struct *vma) @@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma) dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); - if (!--map->count) { + map->count--; + if (0 == map->count) { dprintk(1, "munmap %p q=%p\n", map, q); + videobuf_queue_lock(q); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; @@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma) q->bufs[i]->baddr = 0; q->ops->buf_release(q, q->bufs[i]); } + videobuf_queue_unlock(q); kfree(map); } - videobuf_queue_unlock(q); return; } diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c index 1365c65..2ff7fcc 100644 --- a/drivers/media/v4l2-core/videobuf-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf-vmalloc.c @@ -54,14 +54,11 @@ MODULE_LICENSE("GPL"); static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; - struct videobuf_queue *q = map->q; dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); map->count++; - videobuf_queue_unlock(q); } static void videobuf_vm_close(struct vm_area_struct *vma) @@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma) dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); - if (!--map->count) { + map->count--; + if (0 == map->count) { struct videobuf_vmalloc_memory *mem; dprintk(1, "munmap %p q=%p\n", map, q); + videobuf_queue_lock(q); /* We need first to cancel streams, before unmapping */ if (q->streaming) @@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma) kfree(map); + videobuf_queue_unlock(q); } - videobuf_queue_unlock(q); return; } diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 5a5fb7f..a127925 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -1776,6 +1776,11 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type) return 0; } + if (!q->num_buffers) { + dprintk(1, "streamon: no buffers have been allocated\n"); + return -EINVAL; + } + /* * If any buffers were queued before streamon, * we can now pass them to driver for processing. diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cd929ae..e2a783f 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data) } } +static void dock_event(acpi_handle handle, u32 type, void *data) +{ + struct acpiphp_context *context; + + mutex_lock(&acpiphp_context_lock); + context = acpiphp_get_context(handle); + if (!context || WARN_ON(context->handle != handle) + || context->func.parent->is_going_away) { + mutex_unlock(&acpiphp_context_lock); + return; + } + get_bridge(context->func.parent); + acpiphp_put_context(context); + mutex_unlock(&acpiphp_context_lock); + + hotplug_event(handle, type, data); + + put_bridge(context->func.parent); +} static const struct acpi_dock_ops acpiphp_dock_ops = { .fixup = post_dock_fixups, - .handler = hotplug_event, + .handler = dock_event, }; /* Check whether the PCI device is managed by native PCIe hotplug driver */ @@ -441,7 +460,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) list_del(&bridge->list); mutex_unlock(&bridge_mutex); + mutex_lock(&acpiphp_context_lock); bridge->is_going_away = true; + mutex_unlock(&acpiphp_context_lock); } /** @@ -742,7 +763,7 @@ static void trim_stale_devices(struct pci_dev *dev) /* The device is a bridge. so check the bus below it. */ pm_runtime_get_sync(&dev->dev); - list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) + list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list) trim_stale_devices(child); pm_runtime_put(&dev->dev); @@ -773,8 +794,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) ; /* do nothing */ } else if (get_slot_status(slot) == ACPI_STA_ALL) { /* remove stale devices if any */ - list_for_each_entry_safe(dev, tmp, &bus->devices, - bus_list) + list_for_each_entry_safe_reverse(dev, tmp, + &bus->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot->device) trim_stale_devices(dev); @@ -805,7 +826,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) int i; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM; - list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { + list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) { for (i=0; i<PCI_BRIDGE_RESOURCES; i++) { struct resource *res = &dev->resource[i]; if ((res->flags & type_mask) && !res->start && @@ -829,7 +850,11 @@ void acpiphp_check_host_bridge(acpi_handle handle) bridge = acpiphp_handle_to_bridge(handle); if (bridge) { + pci_lock_rescan_remove(); + acpiphp_check_bridge(bridge); + + pci_unlock_rescan_remove(); put_bridge(bridge); } } @@ -852,6 +877,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) mutex_unlock(&acpiphp_context_lock); + pci_lock_rescan_remove(); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); switch (type) { @@ -905,6 +931,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) break; } + pci_unlock_rescan_remove(); if (bridge) put_bridge(bridge); } @@ -915,11 +942,9 @@ static void hotplug_event_work(void *data, u32 type) acpi_handle handle = context->handle; acpi_scan_lock_acquire(); - pci_lock_rescan_remove(); hotplug_event(handle, type, context); - pci_unlock_rescan_remove(); acpi_scan_lock_release(); acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL); put_bridge(context->func.parent); @@ -937,6 +962,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data) { struct acpiphp_context *context; u32 ost_code = ACPI_OST_SC_SUCCESS; + acpi_status status; switch (type) { case ACPI_NOTIFY_BUS_CHECK: @@ -972,13 +998,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data) mutex_lock(&acpiphp_context_lock); context = acpiphp_get_context(handle); - if (context && !WARN_ON(context->handle != handle)) { - get_bridge(context->func.parent); - acpiphp_put_context(context); - acpi_hotplug_execute(hotplug_event_work, context, type); + if (!context || WARN_ON(context->handle != handle) + || context->func.parent->is_going_away) + goto err_out; + + get_bridge(context->func.parent); + acpiphp_put_context(context); + status = acpi_hotplug_execute(hotplug_event_work, context, type); + if (ACPI_SUCCESS(status)) { mutex_unlock(&acpiphp_context_lock); return; } + put_bridge(context->func.parent); + + err_out: mutex_unlock(&acpiphp_context_lock); ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 5ee61a4..c0fe609 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev) kref_init(&p->users); /* Add the pinctrl handle to the global list */ + mutex_lock(&pinctrl_list_mutex); list_add_tail(&p->node, &pinctrl_list); + mutex_unlock(&pinctrl_list_mutex); return p; } @@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev) device_root, pctldev, &pinctrl_groups_ops); debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO, device_root, pctldev, &pinctrl_gpioranges_ops); - pinmux_init_device_debugfs(device_root, pctldev); - pinconf_init_device_debugfs(device_root, pctldev); + if (pctldev->desc->pmxops) + pinmux_init_device_debugfs(device_root, pctldev); + if (pctldev->desc->confops) + pinconf_init_device_debugfs(device_root, pctldev); } static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 38c6f8b..d990e33 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1286,22 +1286,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type) switch (type) { case IRQ_TYPE_EDGE_RISING: - irq_set_handler(d->irq, handle_simple_irq); + __irq_set_handler_locked(d->irq, handle_simple_irq); writel_relaxed(mask, pio + PIO_ESR); writel_relaxed(mask, pio + PIO_REHLSR); break; case IRQ_TYPE_EDGE_FALLING: - irq_set_handler(d->irq, handle_simple_irq); + __irq_set_handler_locked(d->irq, handle_simple_irq); writel_relaxed(mask, pio + PIO_ESR); writel_relaxed(mask, pio + PIO_FELLSR); break; case IRQ_TYPE_LEVEL_LOW: - irq_set_handler(d->irq, handle_level_irq); + __irq_set_handler_locked(d->irq, handle_level_irq); writel_relaxed(mask, pio + PIO_LSR); writel_relaxed(mask, pio + PIO_FELLSR); break; case IRQ_TYPE_LEVEL_HIGH: - irq_set_handler(d->irq, handle_level_irq); + __irq_set_handler_locked(d->irq, handle_level_irq); writel_relaxed(mask, pio + PIO_LSR); writel_relaxed(mask, pio + PIO_REHLSR); break; @@ -1310,7 +1310,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type) * disable additional interrupt modes: * fall back to default behavior */ - irq_set_handler(d->irq, handle_simple_irq); + __irq_set_handler_locked(d->irq, handle_simple_irq); writel_relaxed(mask, pio + PIO_AIMDR); return 0; case IRQ_TYPE_NONE: diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c index 17aecde..815384b 100644 --- a/drivers/pinctrl/pinctrl-imx1-core.c +++ b/drivers/pinctrl/pinctrl-imx1-core.c @@ -45,7 +45,7 @@ struct imx1_pinctrl { #define MX1_DDIR 0x00 #define MX1_OCR 0x04 #define MX1_ICONFA 0x0c -#define MX1_ICONFB 0x10 +#define MX1_ICONFB 0x14 #define MX1_GIUS 0x20 #define MX1_GPR 0x38 #define MX1_PUEN 0x40 @@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 old_val; u32 new_val; - dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n", - reg, offset, value); - /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) reg += 0x04; + dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n", + reg, offset, value); + /* Get current state of pins */ old_val = readl(reg); old_val &= mask; @@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; - int offset = pin_id % 16; + int offset = (pin_id % 16) * 2; /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c index a2e93a2..e767355 100644 --- a/drivers/pinctrl/pinctrl-tegra.c +++ b/drivers/pinctrl/pinctrl-tegra.c @@ -645,7 +645,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev, GFP_KERNEL); if (!pmx->regs) { dev_err(&pdev->dev, "Can't alloc regs pointer\n"); - return -ENODEV; + return -ENOMEM; } for (i = 0; i < pmx->nbanks; i++) { diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c index 37b4265..dde0285 100644 --- a/drivers/pinctrl/sirf/pinctrl-prima2.c +++ b/drivers/pinctrl/sirf/pinctrl-prima2.c @@ -413,7 +413,7 @@ static const struct sirfsoc_padmux ac97_padmux = { .funcval = 0, }; -static const unsigned ac97_pins[] = { 33, 34, 35, 36 }; +static const unsigned ac97_pins[] = { 43, 44, 45, 46 }; static const struct sirfsoc_muxmask spi1_muxmask[] = { { diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index b28d1af..9802b67 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data, if (!configs) return -ENOMEM; - configs[0] = pull; + switch (pull) { + case 0: + configs[0] = PIN_CONFIG_BIAS_DISABLE; + break; + case 1: + configs[0] = PIN_CONFIG_BIAS_PULL_DOWN; + break; + case 2: + configs[0] = PIN_CONFIG_BIAS_PULL_UP; + break; + default: + configs[0] = PIN_CONFIG_BIAS_DISABLE; + dev_err(data->dev, "invalid pull state %d - disabling\n", pull); + } map->type = PIN_MAP_TYPE_CONFIGS_PIN; map->data.configs.group_or_pin = data->groups[group]; diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 77b46d0..e10febe 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev, struct ab3100_platform_data *plfdata, struct regulator_init_data *init_data, struct device_node *np, - int id) + unsigned long id) { struct regulator_desc *desc; struct ab3100_regulator *reg; @@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np) err = ab3100_regulator_register( pdev, NULL, ab3100_regulator_matches[i].init_data, ab3100_regulator_matches[i].of_node, - (int) ab3100_regulator_matches[i].driver_data); + (unsigned long)ab3100_regulator_matches[i].driver_data); if (err) { ab3100_regulators_remove(pdev); return err; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b38a6b6..16a309e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1272,6 +1272,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev, if (r->dev.parent && node == r->dev.of_node) return r; + *ret = -EPROBE_DEFER; + return NULL; } else { /* * If we couldn't even get the node then it's @@ -1312,7 +1314,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, struct regulator_dev *rdev; struct regulator *regulator = ERR_PTR(-EPROBE_DEFER); const char *devname = NULL; - int ret = -EPROBE_DEFER; + int ret; if (id == NULL) { pr_err("get() with no identifier\n"); @@ -1322,6 +1324,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, if (dev) devname = dev_name(dev); + if (have_full_constraints()) + ret = -ENODEV; + else + ret = -EPROBE_DEFER; + mutex_lock(®ulator_list_mutex); rdev = regulator_dev_lookup(dev, id, &ret); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index d9e5579..cd0b9e35 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -441,6 +441,7 @@ common_reg: for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { if (!reg_np) { config.init_data = pdata->regulators[i].initdata; + config.of_node = pdata->regulators[i].reg_node; } else { config.init_data = rdata[i].init_data; config.of_node = rdata[i].of_node; diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c index 10bb41c..eecb1f2 100644 --- a/drivers/staging/media/go7007/go7007-loader.c +++ b/drivers/staging/media/go7007/go7007-loader.c @@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface, if (usbdev->descriptor.bNumConfigurations != 1) { dev_err(&interface->dev, "can't handle multiple config\n"); - return -ENODEV; + goto failed2; } vendor = le16_to_cpu(usbdev->descriptor.idVendor); @@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface, return 0; failed2: + usb_put_dev(usbdev); dev_err(&interface->dev, "probe failed\n"); return -ENODEV; } @@ -115,6 +116,7 @@ failed2: static void go7007_loader_disconnect(struct usb_interface *interface) { dev_info(&interface->dev, "disconnect\n"); + usb_put_dev(interface_to_usbdev(interface)); usb_set_intfdata(interface, NULL); } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4c4c566..79d2589 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -223,6 +223,7 @@ config SA1100_WATCHDOG config DW_WATCHDOG tristate "Synopsys DesignWare watchdog" + depends on HAS_IOMEM help Say Y here if to include support for the Synopsys DesignWare watchdog timer found in many chips. diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 34a2704..073b4a1 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map) } pr_debug("map %d+%d\n", map->index, map->count); - err = gnttab_map_refs_userspace(map->map_ops, - use_ptemod ? map->kmap_ops : NULL, - map->pages, - map->count); + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, + map->pages, map->count); if (err) return err; @@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) } } - err = gnttab_unmap_refs_userspace(map->unmap_ops + offset, - use_ptemod ? map->kmap_ops + offset : NULL, - map->pages + offset, - pages); + err = gnttab_unmap_refs(map->unmap_ops + offset, + use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, + pages); if (err) return err; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 8ee13e2..b84e3ab 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) } EXPORT_SYMBOL_GPL(gnttab_batch_copy); -int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, +int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count, - bool m2p_override) + struct page **pages, unsigned int count) { int i, ret; bool lazy = false; pte_t *pte; - unsigned long mfn, pfn; + unsigned long mfn; - BUG_ON(kmap_ops && !m2p_override); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); if (ret) return ret; @@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, map_ops[i].dev_bus_addr >> PAGE_SHIFT); } - return 0; + return ret; } - if (m2p_override && - !in_interrupt() && - paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { + if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { arch_enter_lazy_mmu_mode(); lazy = true; } @@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, } else { mfn = PFN_DOWN(map_ops[i].dev_bus_addr); } - pfn = page_to_pfn(pages[i]); - - WARN_ON(PagePrivate(pages[i])); - SetPagePrivate(pages[i]); - set_page_private(pages[i], mfn); - - pages[i]->index = pfn_to_mfn(pfn); - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { - ret = -ENOMEM; - goto out; - } - if (m2p_override) - ret = m2p_add_override(mfn, pages[i], kmap_ops ? - &kmap_ops[i] : NULL); + ret = m2p_add_override(mfn, pages[i], kmap_ops ? + &kmap_ops[i] : NULL); if (ret) goto out; } @@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, return ret; } - -int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, - struct page **pages, unsigned int count) -{ - return __gnttab_map_refs(map_ops, NULL, pages, count, false); -} EXPORT_SYMBOL_GPL(gnttab_map_refs); -int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count) -{ - return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true); -} -EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace); - -int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, +int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count, - bool m2p_override) + struct page **pages, unsigned int count) { int i, ret; bool lazy = false; - unsigned long pfn, mfn; - BUG_ON(kmap_ops && !m2p_override); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); if (ret) return ret; @@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, INVALID_P2M_ENTRY); } - return 0; + return ret; } - if (m2p_override && - !in_interrupt() && - paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { + if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { arch_enter_lazy_mmu_mode(); lazy = true; } for (i = 0; i < count; i++) { - pfn = page_to_pfn(pages[i]); - mfn = get_phys_to_machine(pfn); - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { - ret = -EINVAL; - goto out; - } - - set_page_private(pages[i], INVALID_P2M_ENTRY); - WARN_ON(!PagePrivate(pages[i])); - ClearPagePrivate(pages[i]); - set_phys_to_machine(pfn, pages[i]->index); - if (m2p_override) - ret = m2p_remove_override(pages[i], - kmap_ops ? - &kmap_ops[i] : NULL, - mfn); + ret = m2p_remove_override(pages[i], kmap_ops ? + &kmap_ops[i] : NULL); if (ret) goto out; } @@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, return ret; } - -int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops, - struct page **pages, unsigned int count) -{ - return __gnttab_unmap_refs(map_ops, NULL, pages, count, false); -} EXPORT_SYMBOL_GPL(gnttab_unmap_refs); -int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count) -{ - return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true); -} -EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace); - static unsigned nr_status_frames(unsigned nr_grant_frames) { BUG_ON(grefs_per_grant_frame == 0); diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 49a62b4..0e8388e 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -92,11 +92,11 @@ #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/mutex.h> -#include <linux/crc32c.h> #include <linux/genhd.h> #include <linux/blkdev.h> #include "ctree.h" #include "disk-io.h" +#include "hash.h" #include "transaction.h" #include "extent_io.h" #include "volumes.h" @@ -1823,7 +1823,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state, size_t sublen = i ? PAGE_CACHE_SIZE : (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); - crc = crc32c(crc, data, sublen); + crc = btrfs_crc32c(crc, data, sublen); } btrfs_csum_final(crc, csum); if (memcmp(csum, h->csum, state->csum_size)) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index e2600cd..b01fb6c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1010,6 +1010,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, bytes = min(bytes, working_bytes); kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); + if (*pg_index == (vcnt - 1) && *pg_offset == 0) + memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); kunmap_atomic(kaddr); flush_dcache_page(page_out); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0e69295..5215f04 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -26,7 +26,6 @@ #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/freezer.h> -#include <linux/crc32c.h> #include <linux/slab.h> #include <linux/migrate.h> #include <linux/ratelimit.h> @@ -35,6 +34,7 @@ #include <asm/unaligned.h> #include "ctree.h" #include "disk-io.h" +#include "hash.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" @@ -244,7 +244,7 @@ out: u32 btrfs_csum_data(char *data, u32 seed, size_t len) { - return crc32c(seed, data, len); + return btrfs_crc32c(seed, data, len); } void btrfs_csum_final(u32 crc, char *result) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9c9ecc9..32312e0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2385,6 +2385,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, spin_unlock(&delayed_refs->lock); locked_ref = NULL; cond_resched(); + count++; continue; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5c4ab9c..184e9cb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2629,7 +2629,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) EXTENT_DEFRAG, 1, cached_state); if (ret) { u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); - if (last_snapshot >= BTRFS_I(inode)->generation) + if (0 && last_snapshot >= BTRFS_I(inode)->generation) /* the inode is shared */ new = record_old_file_extents(inode, ordered_extent); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b013489..383ab45 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4525,7 +4525,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) spin_lock(&root->fs_info->super_lock); strcpy(super_block->label, label); spin_unlock(&root->fs_info->super_lock); - ret = btrfs_end_transaction(trans, root); + ret = btrfs_commit_transaction(trans, root); out_unlock: mnt_drop_write_file(file); @@ -4668,7 +4668,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg) if (ret) return ret; - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); @@ -4689,7 +4689,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg) btrfs_set_super_incompat_flags(super_block, newflags); spin_unlock(&root->fs_info->super_lock); - return btrfs_end_transaction(trans, root); + return btrfs_commit_transaction(trans, root); } long btrfs_ioctl(struct file *file, unsigned int diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 730dce3..9c8d1a3 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -24,12 +24,12 @@ #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/radix-tree.h> -#include <linux/crc32c.h> #include <linux/vmalloc.h> #include <linux/string.h> #include "send.h" #include "backref.h" +#include "hash.h" #include "locking.h" #include "disk-io.h" #include "btrfs_inode.h" @@ -620,7 +620,7 @@ static int send_cmd(struct send_ctx *sctx) hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); hdr->crc = 0; - crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); + crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); hdr->crc = cpu_to_le32(crc); ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, @@ -2774,8 +2774,6 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) return 0; } -#ifdef CONFIG_BTRFS_ASSERT - static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) { struct rb_node *n = sctx->waiting_dir_moves.rb_node; @@ -2796,8 +2794,6 @@ static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) return -ENOENT; } -#endif - static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) { struct rb_node **p = &sctx->pending_dir_moves.rb_node; @@ -2902,7 +2898,9 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) } sctx->send_progress = sctx->cur_ino + 1; - ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0); + ret = del_waiting_dir_move(sctx, pm->ino); + ASSERT(ret == 0); + ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); if (ret < 0) goto out; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index c02f633..97cc241 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1996,7 +1996,7 @@ static void __exit exit_btrfs_fs(void) btrfs_hash_exit(); } -module_init(init_btrfs_fs) +late_initcall(init_btrfs_fs); module_exit(exit_btrfs_fs) MODULE_LICENSE("GPL"); diff --git a/fs/buffer.c b/fs/buffer.c index 651dba1..27265a8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); static void __set_page_dirty(struct page *page, struct address_space *mapping, int warn) { - spin_lock_irq(&mapping->tree_lock); + unsigned long flags; + + spin_lock_irqsave(&mapping->tree_lock, flags); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - spin_unlock_irq(&mapping->tree_lock); + spin_unlock_irqrestore(&mapping->tree_lock, flags); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 853d6d1..a7eda8e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2559,8 +2559,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov, if (rc > 0) { ssize_t err; - err = generic_write_sync(file, pos, rc); - if (err < 0 && rc > 0) + err = generic_write_sync(file, iocb->ki_pos - rc, rc); + if (err < 0) rc = err; } @@ -748,11 +748,10 @@ EXPORT_SYMBOL(setup_arg_pages); #endif /* CONFIG_MMU */ -struct file *open_exec(const char *name) +static struct file *do_open_exec(struct filename *name) { struct file *file; int err; - struct filename tmp = { .name = name }; static const struct open_flags open_exec_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_EXEC | MAY_OPEN, @@ -760,7 +759,7 @@ struct file *open_exec(const char *name) .lookup_flags = LOOKUP_FOLLOW, }; - file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags); + file = do_filp_open(AT_FDCWD, name, &open_exec_flags); if (IS_ERR(file)) goto out; @@ -784,6 +783,12 @@ exit: fput(file); return ERR_PTR(err); } + +struct file *open_exec(const char *name) +{ + struct filename tmp = { .name = name }; + return do_open_exec(&tmp); +} EXPORT_SYMBOL(open_exec); int kernel_read(struct file *file, loff_t offset, @@ -1162,7 +1167,7 @@ int prepare_bprm_creds(struct linux_binprm *bprm) return -ENOMEM; } -void free_bprm(struct linux_binprm *bprm) +static void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { @@ -1432,7 +1437,7 @@ static int exec_binprm(struct linux_binprm *bprm) /* * sys_execve() executes a new program. */ -static int do_execve_common(const char *filename, +static int do_execve_common(struct filename *filename, struct user_arg_ptr argv, struct user_arg_ptr envp) { @@ -1441,6 +1446,9 @@ static int do_execve_common(const char *filename, struct files_struct *displaced; int retval; + if (IS_ERR(filename)) + return PTR_ERR(filename); + /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs @@ -1473,7 +1481,7 @@ static int do_execve_common(const char *filename, check_unsafe_exec(bprm); current->in_execve = 1; - file = open_exec(filename); + file = do_open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) goto out_unmark; @@ -1481,8 +1489,7 @@ static int do_execve_common(const char *filename, sched_exec(); bprm->file = file; - bprm->filename = filename; - bprm->interp = filename; + bprm->filename = bprm->interp = filename->name; retval = bprm_mm_init(bprm); if (retval) @@ -1523,6 +1530,7 @@ static int do_execve_common(const char *filename, acct_update_integrals(current); task_numa_free(current); free_bprm(bprm); + putname(filename); if (displaced) put_files_struct(displaced); return retval; @@ -1544,10 +1552,11 @@ out_files: if (displaced) reset_files_struct(displaced); out_ret: + putname(filename); return retval; } -int do_execve(const char *filename, +int do_execve(struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp) { @@ -1557,7 +1566,7 @@ int do_execve(const char *filename, } #ifdef CONFIG_COMPAT -static int compat_do_execve(const char *filename, +static int compat_do_execve(struct filename *filename, const compat_uptr_t __user *__argv, const compat_uptr_t __user *__envp) { @@ -1607,25 +1616,13 @@ SYSCALL_DEFINE3(execve, const char __user *const __user *, argv, const char __user *const __user *, envp) { - struct filename *path = getname(filename); - int error = PTR_ERR(path); - if (!IS_ERR(path)) { - error = do_execve(path->name, argv, envp); - putname(path); - } - return error; + return do_execve(getname(filename), argv, envp); } #ifdef CONFIG_COMPAT asmlinkage long compat_sys_execve(const char __user * filename, const compat_uptr_t __user * argv, const compat_uptr_t __user * envp) { - struct filename *path = getname(filename); - int error = PTR_ERR(path); - if (!IS_ERR(path)) { - error = compat_do_execve(path->name, argv, envp); - putname(path); - } - return error; + return compat_do_execve(getname(filename), argv, envp); } #endif diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 43e64f6..1a50739 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, if (ret > 0) { ssize_t err; - err = generic_write_sync(file, pos, ret); + err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0 && ret > 0) ret = err; } diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 3bd5ee4..46325d5 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -854,9 +854,6 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, int rc; tid_t tid; - if ((rc = can_set_xattr(inode, name, value, value_len))) - return rc; - /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler @@ -865,6 +862,9 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_setxattr(dentry, name, value, value_len, flags); + if ((rc = can_set_xattr(inode, name, value, value_len))) + return rc; + if (value == NULL) { /* empty EA, do not remove */ value = ""; value_len = 0; @@ -1034,9 +1034,6 @@ int jfs_removexattr(struct dentry *dentry, const char *name) int rc; tid_t tid; - if ((rc = can_set_xattr(inode, name, NULL, 0))) - return rc; - /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler @@ -1045,6 +1042,9 @@ int jfs_removexattr(struct dentry *dentry, const char *name) if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_removexattr(dentry, name); + if ((rc = can_set_xattr(inode, name, NULL, 0))) + return rc; + tid = txBegin(inode->i_sb, 0); mutex_lock(&ji->commit_mutex); rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); @@ -1061,7 +1061,7 @@ int jfs_removexattr(struct dentry *dentry, const char *name) * attributes are handled directly. */ const struct xattr_handler *jfs_xattr_handlers[] = { -#ifdef JFS_POSIX_ACL +#ifdef CONFIG_JFS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif @@ -196,6 +196,7 @@ recopy: goto error; result->uptr = filename; + result->aname = NULL; audit_getname(result); return result; @@ -210,6 +211,35 @@ getname(const char __user * filename) return getname_flags(filename, 0, NULL); } +/* + * The "getname_kernel()" interface doesn't do pathnames longer + * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user. + */ +struct filename * +getname_kernel(const char * filename) +{ + struct filename *result; + char *kname; + int len; + + len = strlen(filename); + if (len >= EMBEDDED_NAME_MAX) + return ERR_PTR(-ENAMETOOLONG); + + result = __getname(); + if (unlikely(!result)) + return ERR_PTR(-ENOMEM); + + kname = (char *)result + sizeof(*result); + result->name = kname; + result->uptr = NULL; + result->aname = NULL; + result->separate = false; + + strlcpy(kname, filename, EMBEDDED_NAME_MAX); + return result; +} + #ifdef CONFIG_AUDITSYSCALL void putname(struct filename *name) { diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 9a5ca03..871d6ed 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -80,7 +80,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) } if (res.acl_access != NULL) { - if (posix_acl_equiv_mode(res.acl_access, NULL) || + if ((posix_acl_equiv_mode(res.acl_access, NULL) == 0) || res.acl_access->a_count == 0) { posix_acl_release(res.acl_access); res.acl_access = NULL; @@ -113,7 +113,7 @@ getout: return ERR_PTR(status); } -int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, +static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, struct posix_acl *dfacl) { struct nfs_server *server = NFS_SERVER(inode); @@ -198,6 +198,15 @@ out: return status; } +int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, + struct posix_acl *dfacl) +{ + int ret; + ret = __nfs3_proc_setacls(inode, acl, dfacl); + return (ret == -EOPNOTSUPP) ? 0 : ret; + +} + int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type) { struct posix_acl *alloc = NULL, *dfacl = NULL; @@ -225,7 +234,7 @@ int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (IS_ERR(alloc)) goto fail; } - status = nfs3_proc_setacls(inode, acl, dfacl); + status = __nfs3_proc_setacls(inode, acl, dfacl); posix_acl_release(alloc); return status; @@ -233,25 +242,6 @@ fail: return PTR_ERR(alloc); } -int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode, - umode_t mode) -{ - struct posix_acl *default_acl, *acl; - int error; - - error = posix_acl_create(dir, &mode, &default_acl, &acl); - if (error) - return (error == -EOPNOTSUPP) ? 0 : error; - - error = nfs3_proc_setacls(inode, acl, default_acl); - - if (acl) - posix_acl_release(acl); - if (default_acl) - posix_acl_release(default_acl); - return error; -} - const struct xattr_handler *nfs3_xattr_handlers[] = { &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index dbb3e1f..860ad26 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -170,7 +170,7 @@ void nfs41_shutdown_client(struct nfs_client *clp) void nfs40_shutdown_client(struct nfs_client *clp) { if (clp->cl_slot_tbl) { - nfs4_release_slot_table(clp->cl_slot_tbl); + nfs4_shutdown_slot_table(clp->cl_slot_tbl); kfree(clp->cl_slot_tbl); } } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 42da6af..2da6a69 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1620,15 +1620,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; - nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args, - &data->o_res.seq_res, task); + nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args, + &data->c_res.seq_res, task); } static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; - nfs40_sequence_done(task, &data->o_res.seq_res); + nfs40_sequence_done(task, &data->c_res.seq_res); data->rpc_status = task->tk_status; if (data->rpc_status == 0) { @@ -1686,7 +1686,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) }; int status; - nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1); + nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); kref_get(&data->kref); data->rpc_done = 0; data->rpc_status = 0; diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index cf883c7..e799dc3 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c @@ -231,14 +231,23 @@ out: return ret; } +/* + * nfs4_release_slot_table - release all slot table entries + */ +static void nfs4_release_slot_table(struct nfs4_slot_table *tbl) +{ + nfs4_shrink_slot_table(tbl, 0); +} + /** - * nfs4_release_slot_table - release resources attached to a slot table + * nfs4_shutdown_slot_table - release resources attached to a slot table * @tbl: slot table to shut down * */ -void nfs4_release_slot_table(struct nfs4_slot_table *tbl) +void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl) { - nfs4_shrink_slot_table(tbl, 0); + nfs4_release_slot_table(tbl); + rpc_destroy_wait_queue(&tbl->slot_tbl_waitq); } /** @@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl, spin_unlock(&tbl->slot_tbl_lock); } -static void nfs4_destroy_session_slot_tables(struct nfs4_session *session) +static void nfs4_release_session_slot_tables(struct nfs4_session *session) { nfs4_release_slot_table(&session->fc_slot_table); nfs4_release_slot_table(&session->bc_slot_table); @@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses) if (status && tbl->slots == NULL) /* Fore and back channel share a connection so get * both slot tables or neither */ - nfs4_destroy_session_slot_tables(ses); + nfs4_release_session_slot_tables(ses); return status; } @@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) return session; } +static void nfs4_destroy_session_slot_tables(struct nfs4_session *session) +{ + nfs4_shutdown_slot_table(&session->fc_slot_table); + nfs4_shutdown_slot_table(&session->bc_slot_table); +} + void nfs4_destroy_session(struct nfs4_session *session) { struct rpc_xprt *xprt; diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index 2323061..b34ada9 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -74,7 +74,7 @@ enum nfs4_session_state { extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs, const char *queue); -extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl); +extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index ea4ba9d..db9bd8a 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2134,7 +2134,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); if (ret > 0) { - int err = generic_write_sync(file, pos, ret); + int err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 8750ae1..aada580 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -4742,6 +4742,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, enum ocfs2_alloc_restarted *reason_ret) { int status = 0, err = 0; + int need_free = 0; int free_extents; enum ocfs2_alloc_restarted reason = RESTART_NONE; u32 bit_off, num_bits; @@ -4796,7 +4797,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); - goto leave; + need_free = 1; + goto bail; } block = ocfs2_clusters_to_blocks(osb->sb, bit_off); @@ -4807,7 +4809,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, num_bits, flags, meta_ac); if (status < 0) { mlog_errno(status); - goto leave; + need_free = 1; + goto bail; } ocfs2_journal_dirty(handle, et->et_root_bh); @@ -4821,6 +4824,19 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, reason = RESTART_TRANS; } +bail: + if (need_free) { + if (data_ac->ac_which == OCFS2_AC_USE_LOCAL) + ocfs2_free_local_alloc_bits(osb, handle, data_ac, + bit_off, num_bits); + else + ocfs2_free_clusters(handle, + data_ac->ac_inode, + data_ac->ac_bh, + ocfs2_clusters_to_blocks(osb->sb, bit_off), + num_bits); + } + leave: if (reason_ret) *reason_ret = reason; @@ -6805,6 +6821,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct buffer_head *di_bh) { int ret, i, has_data, num_pages = 0; + int need_free = 0; + u32 bit_off, num; handle_t *handle; u64 uninitialized_var(block); struct ocfs2_inode_info *oi = OCFS2_I(inode); @@ -6850,7 +6868,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, } if (has_data) { - u32 bit_off, num; unsigned int page_end; u64 phys; @@ -6886,6 +6903,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); if (ret) { mlog_errno(ret); + need_free = 1; goto out_commit; } @@ -6896,6 +6914,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, ret = ocfs2_read_inline_data(inode, pages[0], di_bh); if (ret) { mlog_errno(ret); + need_free = 1; goto out_commit; } @@ -6927,6 +6946,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL); if (ret) { mlog_errno(ret); + need_free = 1; goto out_commit; } @@ -6938,6 +6958,18 @@ out_commit: dquot_free_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 1)); + if (need_free) { + if (data_ac->ac_which == OCFS2_AC_USE_LOCAL) + ocfs2_free_local_alloc_bits(osb, handle, data_ac, + bit_off, num); + else + ocfs2_free_clusters(handle, + data_ac->ac_inode, + data_ac->ac_bh, + ocfs2_clusters_to_blocks(osb->sb, bit_off), + num); + } + ocfs2_commit_trans(osb, handle); out_unlock: diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index cd5496b..0440134 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -781,6 +781,48 @@ bail: return status; } +int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 bit_off, + u32 num_bits) +{ + int status, start; + u32 clear_bits; + struct inode *local_alloc_inode; + void *bitmap; + struct ocfs2_dinode *alloc; + struct ocfs2_local_alloc *la; + + BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); + + local_alloc_inode = ac->ac_inode; + alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; + la = OCFS2_LOCAL_ALLOC(alloc); + + bitmap = la->la_bitmap; + start = bit_off - le32_to_cpu(la->la_bm_off); + clear_bits = num_bits; + + status = ocfs2_journal_access_di(handle, + INODE_CACHE(local_alloc_inode), + osb->local_alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + while (clear_bits--) + ocfs2_clear_bit(start++, bitmap); + + le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits); + ocfs2_journal_dirty(handle, osb->local_alloc_bh); + +bail: + return status; +} + static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc) { u32 count; diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h index 1be9b58..44a7d1f 100644 --- a/fs/ocfs2/localalloc.h +++ b/fs/ocfs2/localalloc.h @@ -55,6 +55,12 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, u32 *bit_off, u32 *num_bits); +int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 bit_off, + u32 num_bits); + void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb, unsigned int num_clusters); void ocfs2_la_enable_worker(struct work_struct *work); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 38bae5a..11c54fd 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -521,8 +521,11 @@ posix_acl_chmod(struct inode *inode, umode_t mode) return -EOPNOTSUPP; acl = get_acl(inode, ACL_TYPE_ACCESS); - if (IS_ERR_OR_NULL(acl)) + if (IS_ERR_OR_NULL(acl)) { + if (acl == ERR_PTR(-EOPNOTSUPP)) + return 0; return PTR_ERR(acl); + } ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode); if (ret) @@ -544,14 +547,15 @@ posix_acl_create(struct inode *dir, umode_t *mode, goto no_acl; p = get_acl(dir, ACL_TYPE_DEFAULT); - if (IS_ERR(p)) + if (IS_ERR(p)) { + if (p == ERR_PTR(-EOPNOTSUPP)) + goto apply_umask; return PTR_ERR(p); - - if (!p) { - *mode &= ~current_umask(); - goto no_acl; } + if (!p) + goto apply_umask; + *acl = posix_acl_clone(p, GFP_NOFS); if (!*acl) return -ENOMEM; @@ -575,6 +579,8 @@ posix_acl_create(struct inode *dir, umode_t *mode, } return 0; +apply_umask: + *mode &= ~current_umask(); no_acl: *default_acl = NULL; *acl = NULL; @@ -222,23 +222,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd) return do_fsync(fd, 1); } -/** - * generic_write_sync - perform syncing after a write if file / inode is sync - * @file: file to which the write happened - * @pos: offset where the write started - * @count: length of the write - * - * This is just a simple wrapper about our general syncing function. - */ -int generic_write_sync(struct file *file, loff_t pos, loff_t count) -{ - if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) - return 0; - return vfs_fsync_range(file, pos, pos + count - 1, - (file->f_flags & __O_SYNC) ? 0 : 1); -} -EXPORT_SYMBOL(generic_write_sync); - /* * sys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 2e7989e..64b48ea 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -799,7 +799,7 @@ xfs_file_aio_write( XFS_STATS_ADD(xs_write_bytes, ret); /* Handle various SYNC-type writes */ - err = generic_write_sync(file, pos, ret); + err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index fd8bf32..b4a745d 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -115,7 +115,6 @@ extern int copy_strings_kernel(int argc, const char *const *argv, extern int prepare_bprm_creds(struct linux_binprm *bprm); extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); -extern void free_bprm(struct linux_binprm *); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 09f553c..6082956 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2079,6 +2079,7 @@ extern struct file * dentry_open(const struct path *, int, const struct cred *); extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname(const char __user *); +extern struct filename *getname_kernel(const char *); enum { FILE_CREATED = 1, @@ -2273,7 +2274,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); -extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); +static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count) +{ + if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) + return 0; + return vfs_fsync_range(file, pos, pos + count - 1, + (file->f_flags & __O_SYNC) ? 0 : 1); +} extern void emergency_sync(void); extern void emergency_remount(void); #ifdef CONFIG_BLOCK diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 3ccfcec..b2fb167 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -379,12 +379,14 @@ struct nfs_openres { * Arguments to the open_confirm call. */ struct nfs_open_confirmargs { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; nfs4_stateid * stateid; struct nfs_seqid * seqid; }; struct nfs_open_confirmres { + struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_seqid * seqid; }; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 26ebcf4..69ae03f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -80,13 +80,14 @@ struct nvme_dev { struct dma_pool *prp_small_pool; int instance; int queue_count; - int db_stride; + u32 db_stride; u32 ctrl_config; struct msix_entry *entry; struct nvme_bar __iomem *bar; struct list_head namespaces; struct kref kref; struct miscdevice miscdev; + struct work_struct reset_work; char name[12]; char serial[20]; char model[40]; @@ -94,6 +95,8 @@ struct nvme_dev { u32 max_hw_sectors; u32 stripe_size; u16 oncs; + u16 abort_limit; + u8 initialized; }; /* @@ -165,6 +168,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, struct sg_io_hdr; int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); +int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); int nvme_sg_get_version_num(int __user *ip); #endif /* _LINUX_NVME_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e464b4e..d1fe1a7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -228,9 +228,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) PAGEFLAG(MappedToDisk, mappedtodisk) -/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ +/* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) -PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ +PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) #ifdef CONFIG_HIGHMEM /* diff --git a/include/linux/sched.h b/include/linux/sched.h index 68a0e84..a781dec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -128,6 +128,7 @@ struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; +struct filename; /* * List of flags we want to share for kernel threads, @@ -2311,7 +2312,7 @@ extern void do_group_exit(int); extern int allow_signal(int); extern int disallow_signal(int); -extern int do_execve(const char *, +extern int do_execve(struct filename *, const char __user * const __user *, const char __user * const __user *); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index c557c6d..3a712e2 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif +#ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ -#endif +#endif /* CONFIG_SMP */ NR_TLB_LOCAL_FLUSH_ALL, NR_TLB_LOCAL_FLUSH_ONE, +#endif /* CONFIG_DEBUG_TLBFLUSH */ NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index a67b384..67ce70c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_numa_events(x, y) do { (void)(y); } while (0) #endif /* CONFIG_NUMA_BALANCING */ +#ifdef CONFIG_DEBUG_TLBFLUSH +#define count_vm_tlb_event(x) count_vm_event(x) +#define count_vm_tlb_events(x, y) count_vm_events(x, y) +#else +#define count_vm_tlb_event(x) do {} while (0) +#define count_vm_tlb_events(x, y) do { (void)(y); } while (0) +#endif + #define __count_zone_vm_events(item, zone, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ zone_idx(zone), delta) diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h index 989c04e..e5ab622 100644 --- a/include/uapi/linux/nvme.h +++ b/include/uapi/linux/nvme.h @@ -350,6 +350,16 @@ struct nvme_delete_queue { __u32 rsvd11[5]; }; +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + struct nvme_download_firmware { __u8 opcode; __u8 flags; @@ -384,6 +394,7 @@ struct nvme_command { struct nvme_download_firmware dlfw; struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; + struct nvme_abort_cmd abort; }; }; diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 7ad033d..a5af2a2 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, + struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); -int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count); int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, + struct gnttab_map_grant_ref *kunmap_ops, struct page **pages, unsigned int count); -int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops, - struct gnttab_map_grant_ref *kunmap_ops, - struct page **pages, unsigned int count); /* Perform a batch of grant map/copy operations. Retry every batch slot * for which the hypervisor returns GNTST_eagain. This is typically due diff --git a/init/main.c b/init/main.c index 2fd9cef..eb03090 100644 --- a/init/main.c +++ b/init/main.c @@ -812,7 +812,7 @@ void __init load_default_modules(void) static int run_init_process(const char *init_filename) { argv_init[0] = init_filename; - return do_execve(init_filename, + return do_execve(getname_kernel(init_filename), (const char __user *const __user *)argv_init, (const char __user *const __user *)envp_init); } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 10176cd..7aef2f4 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name) struct audit_context *context = current->audit_context; BUG_ON(!context); - if (!context->in_syscall) { + if (!name->aname || !context->in_syscall) { #if AUDIT_DEBUG == 2 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", __FILE__, __LINE__, context->serial, name); diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 4a1fef0..07cbdfe 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER # Generic configurable interrupt chip implementation config GENERIC_IRQ_CHIP bool + select IRQ_DOMAIN # Generic irq_domain hw <--> linux irq number translation config IRQ_DOMAIN diff --git a/kernel/kmod.c b/kernel/kmod.c index b086006..6b375af 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data) commit_creds(new); - retval = do_execve(sub_info->path, + retval = do_execve(getname_kernel(sub_info->path), (const char __user *const __user *)sub_info->argv, (const char __user *const __user *)sub_info->envp); if (!retval) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dbf94a7..a48abea 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options" config DEBUG_INFO bool "Compile the kernel with debug info" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !COMPILE_TEST help If you say Y here the resulting kernel image will include debugging info resulting in a larger kernel image. diff --git a/lib/Makefile b/lib/Makefile index 126b34f..48140e3 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o +GCOV_PROFILE_hweight.o := n CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o diff --git a/mm/filemap.c b/mm/filemap.c index d56d3c1..7a13f6a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2553,8 +2553,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (ret > 0) { ssize_t err; - err = generic_write_sync(file, pos, ret); - if (err < 0 && ret > 0) + err = generic_write_sync(file, iocb->ki_pos - ret, ret); + if (err < 0) ret = err; } return ret; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2d30e2c..7106cb1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page) if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; + unsigned long flags; if (!mapping) return 1; - spin_lock_irq(&mapping->tree_lock); + spin_lock_irqsave(&mapping->tree_lock, flags); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); @@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page) radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - spin_unlock_irq(&mapping->tree_lock); + spin_unlock_irqrestore(&mapping->tree_lock, flags); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); diff --git a/mm/swap_state.c b/mm/swap_state.c index 98e85e9..e76ace3 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void) return ret; } +static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); + void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages()); @@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry) page = find_get_page(swap_address_space(entry), entry.val); - if (page) + if (page) { INC_CACHE_INFO(find_success); + if (TestClearPageReadahead(page)) + atomic_inc(&swapin_readahead_hits); + } INC_CACHE_INFO(find_total); return page; @@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, return found_page; } +static unsigned long swapin_nr_pages(unsigned long offset) +{ + static unsigned long prev_offset; + unsigned int pages, max_pages, last_ra; + static atomic_t last_readahead_pages; + + max_pages = 1 << ACCESS_ONCE(page_cluster); + if (max_pages <= 1) + return 1; + + /* + * This heuristic has been found to work well on both sequential and + * random loads, swapping to hard disk or to SSD: please don't ask + * what the "+ 2" means, it just happens to work well, that's all. + */ + pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; + if (pages == 2) { + /* + * We can have no readahead hits to judge by: but must not get + * stuck here forever, so check for an adjacent offset instead + * (and don't even bother to check whether swap type is same). + */ + if (offset != prev_offset + 1 && offset != prev_offset - 1) + pages = 1; + prev_offset = offset; + } else { + unsigned int roundup = 4; + while (roundup < pages) + roundup <<= 1; + pages = roundup; + } + + if (pages > max_pages) + pages = max_pages; + + /* Don't shrink readahead too fast */ + last_ra = atomic_read(&last_readahead_pages) / 2; + if (pages < last_ra) + pages = last_ra; + atomic_set(&last_readahead_pages, pages); + + return pages; +} + /** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory @@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { struct page *page; - unsigned long offset = swp_offset(entry); + unsigned long entry_offset = swp_offset(entry); + unsigned long offset = entry_offset; unsigned long start_offset, end_offset; - unsigned long mask = (1UL << page_cluster) - 1; + unsigned long mask; struct blk_plug plug; + mask = swapin_nr_pages(offset) - 1; + if (!mask) + goto skip; + /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; @@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, gfp_mask, vma, addr); if (!page) continue; + if (offset != entry_offset) + SetPageReadahead(page); page_cache_release(page); } blk_finish_plug(&plug); lru_add_drain(); /* Push any new pages onto the LRU now */ +skip: return read_swap_cache_async(entry, gfp_mask, vma, addr); } diff --git a/mm/swapfile.c b/mm/swapfile.c index c6c13b05..4a7f7e6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1923,7 +1923,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) p->swap_map = NULL; cluster_info = p->cluster_info; p->cluster_info = NULL; - p->flags = 0; frontswap_map = frontswap_map_get(p); spin_unlock(&p->lock); spin_unlock(&swap_lock); @@ -1949,6 +1948,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) mutex_unlock(&inode->i_mutex); } filp_close(swap_file, NULL); + + /* + * Clear the SWP_USED flag after all resources are freed so that swapon + * can reuse this swap_info in alloc_swap_info() safely. It is ok to + * not hold p->lock after we cleared its SWP_WRITEOK. + */ + spin_lock(&swap_lock); + p->flags = 0; + spin_unlock(&swap_lock); + err = 0; atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); diff --git a/mm/vmstat.c b/mm/vmstat.c index 7249614..def5dd2 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -851,12 +851,14 @@ const char * const vmstat_text[] = { "thp_zero_page_alloc", "thp_zero_page_alloc_failed", #endif +#ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP "nr_tlb_remote_flush", "nr_tlb_remote_flush_received", -#endif +#endif /* CONFIG_SMP */ "nr_tlb_local_flush_all", "nr_tlb_local_flush_one", +#endif /* CONFIG_DEBUG_TLBFLUSH */ #endif /* CONFIG_VM_EVENTS_COUNTERS */ }; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0e478a0..30efc5c 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, if (!cursor->bvec_iter.bi_size) { bio = bio->bi_next; - cursor->bvec_iter = bio->bi_iter; + cursor->bio = bio; + if (bio) + cursor->bvec_iter = bio->bi_iter; + else + memset(&cursor->bvec_iter, 0, + sizeof(cursor->bvec_iter)); } - cursor->bio = bio; if (!cursor->last_piece) { BUG_ON(!cursor->resid); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 010ff3b..0676f2b 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1427,6 +1427,40 @@ static void __send_queued(struct ceph_osd_client *osdc) } /* + * Caller should hold map_sem for read and request_mutex. + */ +static int __ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail) +{ + int rc; + + __register_request(osdc, req); + req->r_sent = 0; + req->r_got_reply = 0; + rc = __map_request(osdc, req, 0); + if (rc < 0) { + if (nofail) { + dout("osdc_start_request failed map, " + " will retry %lld\n", req->r_tid); + rc = 0; + } else { + __unregister_request(osdc, req); + } + return rc; + } + + if (req->r_osd == NULL) { + dout("send_request %p no up osds in pg\n", req); + ceph_monc_request_next_osdmap(&osdc->client->monc); + } else { + __send_queued(osdc); + } + + return 0; +} + +/* * Timeout callback, called every N seconds when 1 or more osd * requests has been active for more than N seconds. When this * happens, we ping all OSDs with requests who have timed out to @@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, osdmap_epoch = ceph_decode_32(&p); /* lookup */ + down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (req == NULL) { @@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, dout("redirect pool %lld\n", redir.oloc.pool); __unregister_request(osdc, req); - mutex_unlock(&osdc->request_mutex); req->r_target_oloc = redir.oloc; /* struct */ @@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, * successfully. In the future we might want to follow * original request's nofail setting here. */ - err = ceph_osdc_start_request(osdc, req, true); + err = __ceph_osdc_start_request(osdc, req, true); BUG_ON(err); - goto done; + goto out_unlock; } already_completed = req->r_got_reply; @@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, req->r_got_reply = 1; } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { dout("handle_reply tid %llu dup ack\n", tid); - mutex_unlock(&osdc->request_mutex); - goto done; + goto out_unlock; } dout("handle_reply tid %llu flags %d\n", tid, flags); @@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, __unregister_request(osdc, req); mutex_unlock(&osdc->request_mutex); + up_read(&osdc->map_sem); if (!already_completed) { if (req->r_unsafe_callback && @@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, complete_request(req); } -done: +out: dout("req=%p req->r_linger=%d\n", req, req->r_linger); ceph_osdc_put_request(req); return; +out_unlock: + mutex_unlock(&osdc->request_mutex); + up_read(&osdc->map_sem); + goto out; bad_put: req->r_result = -EIO; @@ -1791,6 +1829,7 @@ bad_put: ceph_osdc_put_request(req); bad_mutex: mutex_unlock(&osdc->request_mutex); + up_read(&osdc->map_sem); bad: pr_err("corrupt osd_op_reply got %d %d\n", (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); @@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail) { - int rc = 0; + int rc; down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); - __register_request(osdc, req); - req->r_sent = 0; - req->r_got_reply = 0; - rc = __map_request(osdc, req, 0); - if (rc < 0) { - if (nofail) { - dout("osdc_start_request failed map, " - " will retry %lld\n", req->r_tid); - rc = 0; - } else { - __unregister_request(osdc, req); - } - goto out_unlock; - } - if (req->r_osd == NULL) { - dout("send_request %p no up osds in pg\n", req); - ceph_monc_request_next_osdmap(&osdc->client->monc); - } else { - __send_queued(osdc); - } - rc = 0; -out_unlock: + + rc = __ceph_osdc_start_request(osdc, req, nofail); + mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); + return rc; } EXPORT_SYMBOL(ceph_osdc_start_request); @@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) err = -ENOMEM; osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); if (!osdc->notify_wq) - goto out_msgpool; + goto out_msgpool_reply; + return 0; +out_msgpool_reply: + ceph_msgpool_destroy(&osdc->msgpool_op_reply); out_msgpool: ceph_msgpool_destroy(&osdc->msgpool_op); out_mempool: diff --git a/security/Kconfig b/security/Kconfig index e9c6ac7..beb86b5 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -103,7 +103,7 @@ config INTEL_TXT config LSM_MMAP_MIN_ADDR int "Low address space for LSM to protect from user allocation" depends on SECURITY && SECURITY_SELINUX - default 32768 if ARM + default 32768 if ARM || (ARM64 && COMPAT) default 65536 help This is the portion of low virtual memory which should be protected diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 332ac8a..2df7b90 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -17,6 +17,7 @@ #include <linux/inet_diag.h> #include <linux/xfrm.h> #include <linux/audit.h> +#include <linux/sock_diag.h> #include "flask.h" #include "av_permissions.h" @@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] = { { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, }; static struct nlmsg_perm nlmsg_xfrm_perms[] = diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index c93c211..5d0144e 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1232,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len, struct context context; int rc = 0; + /* An empty security context is never valid. */ + if (!scontext_len) + return -EINVAL; + if (!ss_initialized) { int i; diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 7a426ed..df3652a 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -244,6 +244,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec, } } +/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */ +static void ad1986a_fixup_eapd(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct ad198x_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + codec->inv_eapd = 0; + spec->gen.keep_eapd_on = 1; + spec->eapd_nid = 0x1b; + } +} + enum { AD1986A_FIXUP_INV_JACK_DETECT, AD1986A_FIXUP_ULTRA, @@ -251,6 +264,7 @@ enum { AD1986A_FIXUP_3STACK, AD1986A_FIXUP_LAPTOP, AD1986A_FIXUP_LAPTOP_IMIC, + AD1986A_FIXUP_EAPD, }; static const struct hda_fixup ad1986a_fixups[] = { @@ -311,6 +325,10 @@ static const struct hda_fixup ad1986a_fixups[] = { .chained_before = 1, .chain_id = AD1986A_FIXUP_LAPTOP, }, + [AD1986A_FIXUP_EAPD] = { + .type = HDA_FIXUP_FUNC, + .v.func = ad1986a_fixup_eapd, + }, }; static const struct snd_pci_quirk ad1986a_fixup_tbl[] = { @@ -318,6 +336,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = { SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK), SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK), SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK), + SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD), SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP), SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG), SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA), @@ -472,6 +491,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec) static int patch_ad1983(struct hda_codec *codec) { struct ad198x_spec *spec; + static hda_nid_t conn_0c[] = { 0x08 }; + static hda_nid_t conn_0d[] = { 0x09 }; int err; err = alloc_ad_spec(codec); @@ -479,8 +500,14 @@ static int patch_ad1983(struct hda_codec *codec) return err; spec = codec->spec; + spec->gen.mixer_nid = 0x0e; spec->gen.beep_nid = 0x10; set_beep_amp(spec, 0x10, 0, HDA_OUTPUT); + + /* limit the loopback routes not to confuse the parser */ + snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c); + snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d); + err = ad198x_parse_auto_config(codec, false); if (err < 0) goto error; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 56a8f18..d9693ca 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1821,6 +1821,7 @@ enum { ALC889_FIXUP_IMAC91_VREF, ALC889_FIXUP_MBA11_VREF, ALC889_FIXUP_MBA21_VREF, + ALC889_FIXUP_MP11_VREF, ALC882_FIXUP_INV_DMIC, ALC882_FIXUP_NO_PRIMARY_HP, ALC887_FIXUP_ASUS_BASS, @@ -2190,6 +2191,12 @@ static const struct hda_fixup alc882_fixups[] = { .chained = true, .chain_id = ALC889_FIXUP_MBP_VREF, }, + [ALC889_FIXUP_MP11_VREF] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc889_fixup_mba11_vref, + .chained = true, + .chain_id = ALC885_FIXUP_MACPRO_GPIO, + }, [ALC882_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic_0x12, @@ -2253,7 +2260,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF), - SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO), + SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF), SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO), SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO), SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF), @@ -4427,6 +4434,9 @@ static void alc269_fill_coef(struct hda_codec *codec) if (spec->codec_variant != ALC269_TYPE_ALC269VB) return; + /* ALC271X doesn't seem to support these COEFs (bko#52181) */ + if (!strcmp(codec->chip_name, "ALC271X")) + return; if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { alc_write_coef_idx(codec, 0xf, 0x960b); diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig index de9408b..e05a86b 100644 --- a/sound/usb/Kconfig +++ b/sound/usb/Kconfig @@ -14,6 +14,7 @@ config SND_USB_AUDIO select SND_HWDEP select SND_RAWMIDI select SND_PCM + select BITREVERSE help Say Y here to include support for USB audio and USB MIDI devices. diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index cfede86..b22dbb1 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz) return 0; } +static bool same_kallsyms_reloc(const char *from_dir, char *to_dir) +{ + char from[PATH_MAX]; + char to[PATH_MAX]; + const char *name; + u64 addr1 = 0, addr2 = 0; + int i; + + scnprintf(from, sizeof(from), "%s/kallsyms", from_dir); + scnprintf(to, sizeof(to), "%s/kallsyms", to_dir); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr1 = kallsyms__get_function_start(from, name); + if (addr1) + break; + } + + if (name) + addr2 = kallsyms__get_function_start(to, name); + + return addr1 == addr2; +} + static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, size_t to_dir_sz) { char from[PATH_MAX]; char to[PATH_MAX]; + char to_subdir[PATH_MAX]; struct dirent *dent; int ret = -1; DIR *d; @@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, continue; scnprintf(to, sizeof(to), "%s/%s/modules", to_dir, dent->d_name); - if (!compare_proc_modules(from, to)) { - scnprintf(to, sizeof(to), "%s/%s", to_dir, - dent->d_name); - strlcpy(to_dir, to, to_dir_sz); + scnprintf(to_subdir, sizeof(to_subdir), "%s/%s", + to_dir, dent->d_name); + if (!compare_proc_modules(from, to) && + same_kallsyms_reloc(from_dir, to_subdir)) { + strlcpy(to_dir, to_subdir, to_dir_sz); ret = 0; break; } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3c394bf..af47531 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) * have no _text sometimes. */ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record kernel reference relocation symbol\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" diff --git a/tools/perf/design.txt b/tools/perf/design.txt index 67e5d0c..63a0e6f 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you will need at least this: - asm/perf_event.h - a basic stub will suffice at first - support for atomic64 types (and associated helper functions) - - set_perf_event_pending() implemented If your architecture does have hardware capabilities, you can override the weak stub hw_perf_event_init() to register hardware counters. diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 7daa806..e84fa26 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -100,8 +100,8 @@ #ifdef __aarch64__ #define mb() asm volatile("dmb ish" ::: "memory") -#define wmb() asm volatile("dmb ishld" ::: "memory") -#define rmb() asm volatile("dmb ishst" ::: "memory") +#define wmb() asm volatile("dmb ishst" ::: "memory") +#define rmb() asm volatile("dmb ishld" ::: "memory") #define cpu_relax() asm volatile("yield" ::: "memory") #endif diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 2bd13ed..3d90880 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void) struct map *kallsyms_map, *vmlinux_map; struct machine kallsyms, vmlinux; enum map_type type = MAP__FUNCTION; - struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; u64 mem_start, mem_end; /* @@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void) */ kallsyms_map = machine__kernel_map(&kallsyms, type); - sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); - if (sym == NULL) { - pr_debug("dso__find_symbol_by_name "); - goto out; - } - - ref_reloc_sym.addr = UM(sym->start); - /* * Step 5: * @@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void) } vmlinux_map = machine__kernel_map(&vmlinux, type); - map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; /* * Step 6: diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1fc1c2f..b0f3ca8 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type, return 1; } +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name) +{ + struct process_symbol_args args = { .name = symbol_name, }; + + if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) + return 0; + + return args.start; +} + int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name) + struct machine *machine) { size_t size; - const char *filename, *mmap_name; - char path[PATH_MAX]; + const char *mmap_name; char name_buff[PATH_MAX]; struct map *map; + struct kmap *kmap; int err; /* * We should get this from /sys/kernel/sections/.text, but till that is * available use this, and after it is use this as a fallback for older * kernels. */ - struct process_symbol_args args = { .name = symbol_name, }; union perf_event *event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); if (event == NULL) { @@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, * see kernel/perf_event.c __perf_event_mmap */ event->header.misc = PERF_RECORD_MISC_KERNEL; - filename = "/proc/kallsyms"; } else { event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - if (machine__is_default_guest(machine)) - filename = (char *) symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } - } - - if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) { - free(event); - return -ENOENT; } map = machine->vmlinux_maps[MAP__FUNCTION]; + kmap = map__kmap(map); size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), - "%s%s", mmap_name, symbol_name) + 1; + "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; size = PERF_ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); - event->mmap.pgoff = args.start; + event->mmap.pgoff = kmap->ref_reloc_sym->addr; event->mmap.start = map->start; event->mmap.len = map->end - event->mmap.start; event->mmap.pid = machine->pid; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index faf6e21..851fa06 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, struct machine *machine, bool mmap_data); int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name); + struct machine *machine); int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, @@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); size_t perf_event__fprintf(union perf_event *event, FILE *fp); +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name); + #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h new file mode 100644 index 0000000..d82b170b --- /dev/null +++ b/tools/perf/util/include/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ded7459..c872991 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name, return 1; } +static void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) +{ + if (machine__is_default_guest(machine)) + scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); + else + scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); +} + /* Figure out the start address of kernel map from /proc/kallsyms */ static u64 machine__get_kernel_start_addr(struct machine *machine) { - const char *filename; - char path[PATH_MAX]; + char filename[PATH_MAX]; struct process_args args; - if (machine__is_default_guest(machine)) - filename = (char *)symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } + machine__get_kallsyms_filename(machine, filename, PATH_MAX); if (symbol__restricted_filename(filename, "/proc/kallsyms")) return 0; @@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine) return 0; } +const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; + int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); + char filename[PATH_MAX]; + const char *name; + u64 addr = 0; + int i; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr = kallsyms__get_function_start(filename, name); + if (addr) + break; + } + if (!addr) + return -1; if (kernel == NULL || __machine__create_kernel_maps(machine, kernel) < 0) @@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine) * Now that we have all the maps created, just set the ->end of them: */ map_groups__fixup_end(&machine->kmaps); + + if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, + addr)) { + machine__destroy_kernel_maps(machine); + return -1; + } + return 0; } diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 4771330..f77e91e 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -18,6 +18,8 @@ union perf_event; #define HOST_KERNEL_ID (-1) #define DEFAULT_GUEST_KERNEL_ID (0) +extern const char *ref_reloc_sym_names[]; + struct machine { struct rb_node rb_node; pid_t pid; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 3b97513..39cd2d0 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type, map->start = start; map->end = end; map->pgoff = pgoff; + map->reloc = 0; map->dso = dso; map->map_ip = map__map_ip; map->unmap_ip = map__unmap_ip; @@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip) if (map->dso->rel) return rip - map->pgoff; - return map->unmap_ip(map, rip); + return map->unmap_ip(map, rip) - map->reloc; } /** @@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip) if (map->dso->rel) return map->unmap_ip(map, ip + map->pgoff); - return ip; + return ip + map->reloc; } void map_groups__init(struct map_groups *mg) diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 18068c6..257e513 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -36,6 +36,7 @@ struct map { bool erange_warned; u32 priv; u64 pgoff; + u64 reloc; u32 maj, min; /* only valid for MMAP2 record */ u64 ino; /* only valid for MMAP2 record */ u64 ino_generation;/* only valid for MMAP2 record */ diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 7594567..3e9f336 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map, if (strcmp(elf_name, kmap->ref_reloc_sym->name)) continue; kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; + map->reloc = kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; break; } } @@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } +new_symbol: /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map, if (demangled != NULL) elf_name = demangled; } -new_symbol: f = symbol__new(sym.st_value, sym.st_size, GELF_ST_BIND(sym.st_info), elf_name); free(demangled); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 39ce9ad..a9d758a 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, +static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, char dso_name[PATH_MAX]; struct dso *ndso; + if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; + } + if (count == 0) { curr_map = map; goto filter_symbol; @@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; map_groups__insert(kmaps, curr_map); ++kernel_range; + } else if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; } filter_symbol: if (filter && filter(curr_map, pos)) { @@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename, return 0; } +static int validate_kcore_addresses(const char *kallsyms_filename, + struct map *map) +{ + struct kmap *kmap = map__kmap(map); + + if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { + u64 start; + + start = kallsyms__get_function_start(kallsyms_filename, + kmap->ref_reloc_sym->name); + if (start != kmap->ref_reloc_sym->addr) + return -EINVAL; + } + + return validate_kcore_modules(kallsyms_filename, map); +} + struct kcore_mapfn_data { struct dso *dso; enum map_type type; @@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, kallsyms_filename)) return -EINVAL; - /* All modules must be present at their original addresses */ - if (validate_kcore_modules(kallsyms_filename, map)) + /* Modules and kernel must be present at their original addresses */ + if (validate_kcore_addresses(kallsyms_filename, map)) return -EINVAL; md.dso = dso; @@ -1113,15 +1140,41 @@ out_err: return -EINVAL; } +/* + * If the kernel is relocated at boot time, kallsyms won't match. Compute the + * delta based on the relocation reference symbol. + */ +static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +{ + struct kmap *kmap = map__kmap(map); + u64 addr; + + if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) + return 0; + + addr = kallsyms__get_function_start(filename, + kmap->ref_reloc_sym->name); + if (!addr) + return -1; + + *delta = addr - kmap->ref_reloc_sym->addr; + return 0; +} + int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter) { + u64 delta = 0; + if (symbol__restricted_filename(filename, "/proc/kallsyms")) return -1; if (dso__load_all_kallsyms(dso, filename, map) < 0) return -1; + if (kallsyms__delta(map, filename, &delta)) + return -1; + symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); @@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, if (!dso__load_kcore(dso, map, filename)) return dso__split_kallsyms_for_kcore(dso, map, filter); else - return dso__split_kallsyms(dso, map, filter); + return dso__split_kallsyms(dso, map, delta, filter); } static int dso__load_perf_map(struct dso *dso, struct map *map, @@ -1424,7 +1477,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) continue; scnprintf(kallsyms_filename, sizeof(kallsyms_filename), "%s/%s/kallsyms", dir, dent->d_name); - if (!validate_kcore_modules(kallsyms_filename, map)) { + if (!validate_kcore_addresses(kallsyms_filename, map)) { strlcpy(dir, kallsyms_filename, dir_sz); ret = 0; break; @@ -1479,7 +1532,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) if (fd != -1) { close(fd); /* If module maps match go with /proc/kallsyms */ - if (!validate_kcore_modules("/proc/kallsyms", map)) + if (!validate_kcore_addresses("/proc/kallsyms", map)) goto proc_kallsyms; } |