diff options
157 files changed, 1247 insertions, 608 deletions
diff --git a/Documentation/networking/dpaa2/overview.rst b/Documentation/networking/dpaa2/overview.rst index 79fede4..d638b5a 100644 --- a/Documentation/networking/dpaa2/overview.rst +++ b/Documentation/networking/dpaa2/overview.rst @@ -1,5 +1,6 @@ .. include:: <isonum.txt> +========================================================= DPAA2 (Data Path Acceleration Architecture Gen2) Overview ========================================================= diff --git a/MAINTAINERS b/MAINTAINERS index f6a9b08..7cebd5b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5444,6 +5444,7 @@ F: drivers/iommu/exynos-iommu.c EZchip NPS platform support M: Vineet Gupta <vgupta@synopsys.com> +M: Ofer Levi <oferle@mellanox.com> S: Supported F: arch/arc/plat-eznps F: arch/arc/boot/dts/eznps.dts diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 9cf59fc..5151d81 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -50,6 +50,9 @@ config ARC select HAVE_KERNEL_LZMA select ARCH_HAS_PTE_SPECIAL +config ARCH_HAS_CACHE_LINE_SIZE + def_bool y + config MIGHT_HAVE_PCI bool diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 8486f32..ff7d323 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -48,7 +48,9 @@ }) /* Largest line length for either L1 or L2 is 128 bytes */ -#define ARCH_DMA_MINALIGN 128 +#define SMP_CACHE_BYTES 128 +#define cache_line_size() SMP_CACHE_BYTES +#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index d5da211..03d6bb0 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h @@ -17,8 +17,11 @@ #ifndef __ASM_ARC_UDELAY_H #define __ASM_ARC_UDELAY_H +#include <asm-generic/types.h> #include <asm/param.h> /* HZ */ +extern unsigned long loops_per_jiffy; + static inline void __delay(unsigned long loops) { __asm__ __volatile__( diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 9dbe645..25c6319 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, unsigned long pfn) { - unsigned int paddr = pfn << PAGE_SHIFT; + phys_addr_t paddr = pfn << PAGE_SHIFT; u_vaddr &= PAGE_MASK; @@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long u_vaddr) { /* TBD: do we really need to clear the kernel mapping */ - __flush_dcache_page(page_address(page), u_vaddr); - __flush_dcache_page(page_address(page), page_address(page)); + __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr); + __flush_dcache_page((phys_addr_t)page_address(page), + (phys_addr_t)page_address(page)); } @@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void) } } + /* + * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger + * or equal to any cache line length. + */ + BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES, + "SMP_CACHE_BYTES must be >= any cache line length"); + if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES)) + panic("L2 Cache line [%d] > kernel Config [%d]\n", + l2_line_sz, SMP_CACHE_BYTES); + /* Note that SLC disable not formally supported till HS 3.0 */ if (is_isa_arcv2() && l2_line_sz && !slc_enable) arc_slc_disable(); diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 8c10718..ec47e60 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } +/* + * Cache operations depending on function and direction argument, inspired by + * https://lkml.org/lkml/2018/5/18/979 + * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20] + * dma-mapping: provide a generic dma-noncoherent implementation)" + * + * | map == for_device | unmap == for_cpu + * |---------------------------------------------------------------- + * TO_DEV | writeback writeback | none none + * FROM_DEV | invalidate invalidate | invalidate* invalidate* + * BIDIR | writeback+inv writeback+inv | invalidate invalidate + * + * [*] needed for CPU speculative prefetches + * + * NOTE: we don't check the validity of direction argument as it is done in + * upper layer functions (in include/linux/dma-mapping.h) + */ + void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - dma_cache_wback(paddr, size); + switch (dir) { + case DMA_TO_DEVICE: + dma_cache_wback(paddr, size); + break; + + case DMA_FROM_DEVICE: + dma_cache_inv(paddr, size); + break; + + case DMA_BIDIRECTIONAL: + dma_cache_wback_inv(paddr, size); + break; + + default: + break; + } } void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - dma_cache_inv(paddr, size); + switch (dir) { + case DMA_TO_DEVICE: + break; + + /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + dma_cache_inv(paddr, size); + break; + + default: + break; + } } diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index 0c7d110..4f6a167 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -21,6 +21,7 @@ #error "Incorrect ctop.h include" #endif +#include <linux/types.h> #include <soc/nps/common.h> /* core auxiliary registers */ @@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst { }; /* AUX registers definition */ +struct nps_host_reg_aux_dpc { + union { + struct { + u32 ien:1, men:1, hen:1, reserved:29; + }; + u32 value; + }; +}; + struct nps_host_reg_aux_udmc { union { struct { diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c index 2388de3..ed0077e 100644 --- a/arch/arc/plat-eznps/mtm.c +++ b/arch/arc/plat-eznps/mtm.c @@ -15,6 +15,8 @@ */ #include <linux/smp.h> +#include <linux/init.h> +#include <linux/kernel.h> #include <linux/io.h> #include <linux/log2.h> #include <asm/arcregs.h> @@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu) /* Verify and set the value of the mtm hs counter */ static int __init set_mtm_hs_ctr(char *ctr_str) { - long hs_ctr; + int hs_ctr; int ret; - ret = kstrtol(ctr_str, 0, &hs_ctr); + ret = kstrtoint(ctr_str, 0, &hs_ctr); if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 106a146..746565a 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -48,6 +48,7 @@ saved_pc .req lr * from those features make this path too inefficient. */ ret_fast_syscall: +__ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts @@ -78,6 +79,7 @@ fast_work_pending: * call. */ ret_fast_syscall: +__ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 @@ -255,7 +257,7 @@ local_restart: tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? bne __sys_trace - invoke_syscall tbl, scno, r10, ret_fast_syscall + invoke_syscall tbl, scno, r10, __ret_fast_syscall add r1, sp, #S_OFF 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index 8db62cc..04b2f22 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex); */ static void ecard_init_pgtables(struct mm_struct *mm) { - struct vm_area_struct vma; + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC); /* We want to set up the page tables for the following mapping: * Virtual Physical @@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm) memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); - vma_init(&vma, mm); - vma.vm_flags = VM_EXEC; - flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); } diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 7cf0b1a..8a10f1d 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -488,9 +488,13 @@ static int gcm_decrypt(struct aead_request *req) err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - if (walk.nbytes) - pmull_gcm_encrypt_block(iv, iv, NULL, + if (walk.nbytes) { + kernel_neon_begin(); + pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc, num_rounds(&ctx->aes_key)); + kernel_neon_end(); + } + } else { __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, num_rounds(&ctx->aes_key)); diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index d87f2d6..0ad1cf2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -37,9 +37,7 @@ static inline void __tlb_remove_table(void *_table) static inline void tlb_flush(struct mmu_gather *tlb) { - struct vm_area_struct vma; - - vma_init(&vma, tlb->mm); + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); /* * The ASID allocator will either invalidate the ASID or mark diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 1854e49..192b3ba 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -108,13 +108,10 @@ static pte_t get_clear_flush(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma; pte_t orig_pte = huge_ptep_get(ptep); bool valid = pte_valid(orig_pte); unsigned long i, saddr = addr; - vma_init(&vma, mm); - for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { pte_t pte = ptep_get_and_clear(mm, addr, ptep); @@ -127,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm, orig_pte = pte_mkdirty(orig_pte); } - if (valid) + if (valid) { + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); flush_tlb_range(&vma, saddr, addr); + } return orig_pte; } @@ -147,10 +146,9 @@ static void clear_flush(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma; + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); unsigned long i, saddr = addr; - vma_init(&vma, mm); for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index db89e73..516355a 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned flush_tlb_all(); } else { /* - * XXX fix me: flush_tlb_range() should take an mm pointer instead of a - * vma pointer. + * flush_tlb_range() takes a vma instead of a mm pointer because + * some architectures want the vm_flags for ITLB/DTLB flush. */ - struct vm_area_struct vma; + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); - vma_init(&vma, tlb->mm); /* flush the address range from the tlb: */ flush_tlb_range(&vma, start, end); /* now flush the virt. page-table area mapping the address range: */ diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index e6c6dfd..3b85c3e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -116,6 +116,7 @@ ia64_init_addr_space (void) */ vma = vm_area_alloc(current->mm); if (vma) { + vma_set_anonymous(vma); vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; @@ -133,6 +134,7 @@ ia64_init_addr_space (void) if (!(current->personality & MMAP_PAGE_ZERO)) { vma = vm_area_alloc(current->mm); if (vma) { + vma_set_anonymous(vma); vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 79d570c..b2f89b6 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm) { int c; - c = atomic_dec_if_positive(&mm->context.copros); - - /* Detect imbalance between add and remove */ - WARN_ON(c < 0); - /* - * Need to broadcast a global flush of the full mm before - * decrementing active_cpus count, as the next TLBI may be - * local and the nMMU and/or PSL need to be cleaned up. - * Should be rare enough so that it's acceptable. + * When removing the last copro, we need to broadcast a global + * flush of the full mm, as the next TLBI may be local and the + * nMMU and/or PSL need to be cleaned up. + * + * Both the 'copros' and 'active_cpus' counts are looked at in + * flush_all_mm() to determine the scope (local/global) of the + * TLBIs, so we need to flush first before decrementing + * 'copros'. If this API is used by several callers for the + * same context, it can lead to over-flushing. It's hopefully + * not common enough to be a problem. * * Skip on hash, as we don't know how to do the proper flush * for the time being. Invalidations will remain global if - * used on hash. + * used on hash. Note that we can't drop 'copros' either, as + * it could make some invalidations local with no flush + * in-between. */ - if (c == 0 && radix_enabled()) { + if (radix_enabled()) { flush_all_mm(mm); - dec_mm_active_cpus(mm); + + c = atomic_dec_if_positive(&mm->context.copros); + /* Detect imbalance between add and remove */ + WARN_ON(c < 0); + + if (c == 0) + dec_mm_active_cpus(mm); } } #else diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index fe9733f..471aac3 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -42,6 +42,8 @@ #include <asm/ppc-pci.h> #include <asm/eeh.h> +#include "../../../drivers/pci/pci.h" + /* hose_spinlock protects accesses to the the phb_bitmap. */ static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); @@ -1014,7 +1016,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus) /* Cardbus can call us to add new devices to a bus, so ignore * those who are already fully discovered */ - if (dev->is_added) + if (pci_dev_is_added(dev)) continue; pcibios_setup_device(dev); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 5bd0eb6..70b2e1e 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -46,6 +46,7 @@ #include "powernv.h" #include "pci.h" +#include "../../../../drivers/pci/pci.h" #define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */ #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ @@ -3138,7 +3139,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) struct pci_dn *pdn; int mul, total_vfs; - if (!pdev->is_physfn || pdev->is_added) + if (!pdev->is_physfn || pci_dev_is_added(pdev)) return; pdn = pci_get_pdn(pdev); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 139f0af..8a4868a 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -71,6 +71,7 @@ #include <asm/security_features.h> #include "pseries.h" +#include "../../../../drivers/pci/pci.h" int CMO_PrPSP = -1; int CMO_SecPSP = -1; @@ -664,7 +665,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev) const int *indexes; struct device_node *dn = pci_device_to_OF_node(pdev); - if (!pdev->is_physfn || pdev->is_added) + if (!pdev->is_physfn || pci_dev_is_added(pdev)) return; /*Firmware must support open sriov otherwise dont configure*/ indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL); diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index ac67828..410b263 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += module.h +generic-y += msi.h generic-y += preempt.h generic-y += rwsem.h generic-y += serial.h diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h deleted file mode 100644 index 3c17c10..0000000 --- a/arch/sparc/include/asm/msi.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * msi.h: Defines specific to the MBus - Sbus - Interface. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - */ - -#ifndef _SPARC_MSI_H -#define _SPARC_MSI_H - -/* - * Locations of MSI Registers. - */ -#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ - -/* - * Useful bits in the MSI Registers. - */ -#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ - - -static inline void msi_set_sync(void) -{ - __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" - "andn %%g3, %2, %%g3\n\t" - "sta %%g3, [%0] %1\n\t" : : - "r" (MSI_MBUS_ARBEN), - "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); -} - -#endif /* !(_SPARC_MSI_H) */ diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 2ef8cfa..f0eba72 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -814,7 +814,7 @@ static void __init get_tick_patch(void) } } -static void init_tick_ops(struct sparc64_tick_ops *ops) +static void __init init_tick_ops(struct sparc64_tick_ops *ops) { unsigned long freq, quotient, tick; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 1d70c3f..be9cb00 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -37,7 +37,6 @@ #include <asm/mbus.h> #include <asm/page.h> #include <asm/asi.h> -#include <asm/msi.h> #include <asm/smp.h> #include <asm/io.h> @@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) set_pte((pte_t *)ctxp, pte); } +/* + * Locations of MSI Registers. + */ +#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ + +/* + * Useful bits in the MSI Registers. + */ +#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ + +static void msi_set_sync(void) +{ + __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" + "andn %%g3, %2, %%g3\n\t" + "sta %%g3, [%0] %1\n\t" : : + "r" (MSI_MBUS_ARBEN), + "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); +} + void pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e30da9a..5d8e317 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7893,6 +7893,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; + vmx->nested.vpid02 = allocate_vpid(); + vmx->nested.vmxon = true; return 0; @@ -8480,21 +8482,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - gva_t vmcs_gva; + unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); + u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; struct x86_exception e; + gva_t gva; if (!nested_vmx_check_permission(vcpu)) return 1; - if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &vmcs_gva)) + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) return 1; /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, vmcs_gva, - (void *)&to_vmx(vcpu)->nested.current_vmptr, - sizeof(u64), &e)) { + if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, + sizeof(gpa_t), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -10370,11 +10371,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto free_vmcs; } - if (nested) { + if (nested) nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, kvm_vcpu_apicv_active(&vmx->vcpu)); - vmx->nested.vpid02 = allocate_vpid(); - } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; @@ -10391,7 +10390,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return &vmx->vcpu; free_vmcs: - free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 5579987..8f6cc71 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth) /* sub esp,STACK_SIZE */ EMIT2_off32(0x81, 0xEC, STACK_SIZE); - /* sub ebp,SCRATCH_SIZE+4+12*/ - EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16); + /* sub ebp,SCRATCH_SIZE+12*/ + EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12); /* xor ebx,ebx */ EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX)); @@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth) /* mov edx,dword ptr [ebp+off]*/ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1])); - /* add ebp,SCRATCH_SIZE+4+12*/ - EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16); + /* add ebp,SCRATCH_SIZE+12*/ + EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12); /* mov ebx,dword ptr [ebp-12]*/ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12); diff --git a/block/blk-core.c b/block/blk-core.c index f84a9b7..ee33590 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2155,11 +2155,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) if (part->policy && op_is_write(bio_op(bio))) { char b[BDEVNAME_SIZE]; - printk(KERN_ERR + WARN_ONCE(1, "generic_make_request: Trying to write " "to read-only block-device %s (partno %d)\n", bio_devname(bio, b), part->partno); - return true; + /* Older lvm-tools actually trigger this */ + return false; } return false; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 09b2ee6..3de0836 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) * test and set the bit before assining ->rqs[]. */ rq = tags->rqs[bitnr]; - if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) + if (rq && blk_mq_request_started(rq)) iter_data->fn(rq, iter_data->data, reserved); return true; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f8fecfe..9706613 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) #define LPSS_GPIODEF0_DMA_LLP BIT(13) static DEFINE_MUTEX(lpss_iosf_mutex); +static bool lpss_iosf_d3_entered; static void lpss_iosf_enter_d3_state(void) { @@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void) iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, LPSS_IOSF_GPIODEF0, value1, mask1); + + lpss_iosf_d3_entered = true; + exit: mutex_unlock(&lpss_iosf_mutex); } @@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void) mutex_lock(&lpss_iosf_mutex); + if (!lpss_iosf_d3_entered) + goto exit; + + lpss_iosf_d3_entered = false; + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, LPSS_IOSF_GPIODEF0, value1, mask1); @@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void) iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, LPSS_IOSF_PMCSR, value2, mask2); +exit: mutex_unlock(&lpss_iosf_mutex); } -static int acpi_lpss_suspend(struct device *dev, bool runtime) +static int acpi_lpss_suspend(struct device *dev, bool wakeup) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - bool wakeup = runtime || device_may_wakeup(dev); int ret; if (pdata->dev_desc->flags & LPSS_SAVE_CTX) @@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime) * wrong status for devices being about to be powered off. See * lpss_iosf_enter_d3_state() for further information. */ - if ((runtime || !pm_suspend_via_firmware()) && + if (acpi_target_system_state() == ACPI_STATE_S0 && lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_enter_d3_state(); return ret; } -static int acpi_lpss_resume(struct device *dev, bool runtime) +static int acpi_lpss_resume(struct device *dev) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; @@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime) * This call is kept first to be in symmetry with * acpi_lpss_runtime_suspend() one. */ - if ((runtime || !pm_resume_via_firmware()) && - lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_exit_d3_state(); ret = acpi_dev_resume(dev); @@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev) return 0; ret = pm_generic_suspend_late(dev); - return ret ? ret : acpi_lpss_suspend(dev, false); + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); } static int acpi_lpss_resume_early(struct device *dev) { - int ret = acpi_lpss_resume(dev, false); + int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } @@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) static int acpi_lpss_runtime_resume(struct device *dev) { - int ret = acpi_lpss_resume(dev, true); + int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_runtime_resume(dev); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index ee840be..44f35ab 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -709,15 +709,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) } else if ((walk_state-> parse_flags & ACPI_PARSE_MODULE_LEVEL) + && status != AE_CTRL_TRANSFER && ACPI_FAILURE(status)) { /* - * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by - * executing it as a control method. However, if we encounter - * an error while loading the table, we need to keep trying to - * load the table rather than aborting the table load. Set the - * status to AE_OK to proceed with the table load. If we get a - * failure at this point, it means that the dispatcher got an - * error while processing Op (most likely an AML operand error. + * ACPI_PARSE_MODULE_LEVEL flag means that we are currently + * loading a table by executing it as a control method. + * However, if we encounter an error while loading the table, + * we need to keep trying to load the table rather than + * aborting the table load (setting the status to AE_OK + * continues the table load). If we get a failure at this + * point, it means that the dispatcher got an error while + * processing Op (most likely an AML operand error) or a + * control method was called from module level and the + * dispatcher returned AE_CTRL_TRANSFER. In the latter case, + * leave the status alone, there's nothing wrong with it. */ status = AE_OK; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3c39712..d4ed002 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock); #ifdef CONFIG_ACPI -static bool intel_pstate_get_ppc_enable_status(void) +static bool intel_pstate_acpi_pm_profile_server(void) { if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) return true; + return false; +} + +static bool intel_pstate_get_ppc_enable_status(void) +{ + if (intel_pstate_acpi_pm_profile_server()) + return true; + return acpi_ppc; } @@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) { } + +static inline bool intel_pstate_acpi_pm_profile_server(void) +{ + return false; +} #endif static inline void update_turbo_state(void) @@ -1841,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_hwp_enable(cpu); id = x86_match_cpu(intel_pstate_hwp_boost_ids); - if (id) + if (id && intel_pstate_acpi_pm_profile_server()) hwp_boost = true; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1c6cbda..09d823d 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count - initial)); + : "d"(control_word), "b"(key), "c"(count)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (count < cbc_fetch_blocks) return cbc_crypt(input, output, key, iv, control_word, count); + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) @@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count-initial)); + : "d" (control_word), "b" (key), "c" (count)); return iv; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 73021b3..dd3ff2f 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work) else status = connector_status_disconnected; + /* + * The bridge resets its registers on unplug. So when we get a plug + * event and we're already supposed to be powered, cycle the bridge to + * restore its state. + */ + if (status == connector_status_connected && + adv7511->connector.status == connector_status_disconnected && + adv7511->powered) { + regcache_mark_dirty(adv7511->regmap); + adv7511_power_on(adv7511); + } + if (adv7511->connector.status != status) { adv7511->connector.status = status; if (status == connector_status_disconnected) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 130da51..81e3219 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev, { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane *plane = NULL; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; const struct drm_plane_helper_funcs *funcs; int i, n_planes = 0; @@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev, if (n_planes != 1) return -EINVAL; - if (!new_plane_state->crtc) + if (!new_plane_state->crtc || + old_plane_state->crtc != new_plane_state->crtc) return -EINVAL; funcs = plane->helper_private; diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 3c4000f..f973d28 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data, ctx->handle = drm_legacy_ctxbitmap_next(dev); } DRM_DEBUG("%d\n", ctx->handle); - if (ctx->handle == -1) { + if (ctx->handle < 0) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 1d34619..a951ec7 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->x_scaling[0] = VC4_SCALING_TPZ; if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + } else { + vc4_state->x_scaling[1] = VC4_SCALING_NONE; + vc4_state->y_scaling[1] = VC4_SCALING_NONE; } vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index cc06e84..583d3a1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1984,15 +1984,64 @@ static int modify_qp(struct ib_uverbs_file *file, goto release_qp; } - if ((cmd->base.attr_mask & IB_QP_AV) && - !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { - ret = -EINVAL; - goto release_qp; + if ((cmd->base.attr_mask & IB_QP_AV)) { + if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { + ret = -EINVAL; + goto release_qp; + } + + if (cmd->base.attr_mask & IB_QP_STATE && + cmd->base.qp_state == IB_QPS_RTR) { + /* We are in INIT->RTR TRANSITION (if we are not, + * this transition will be rejected in subsequent checks). + * In the INIT->RTR transition, we cannot have IB_QP_PORT set, + * but the IB_QP_STATE flag is required. + * + * Since kernel 3.14 (commit dbf727de7440), the uverbs driver, + * when IB_QP_AV is set, has required inclusion of a valid + * port number in the primary AV. (AVs are created and handled + * differently for infiniband and ethernet (RoCE) ports). + * + * Check the port number included in the primary AV against + * the port number in the qp struct, which was set (and saved) + * in the RST->INIT transition. + */ + if (cmd->base.dest.port_num != qp->real_qp->port) { + ret = -EINVAL; + goto release_qp; + } + } else { + /* We are in SQD->SQD. (If we are not, this transition will + * be rejected later in the verbs layer checks). + * Check for both IB_QP_PORT and IB_QP_AV, these can be set + * together in the SQD->SQD transition. + * + * If only IP_QP_AV was set, add in IB_QP_PORT as well (the + * verbs layer driver does not track primary port changes + * resulting from path migration. Thus, in SQD, if the primary + * AV is modified, the primary port should also be modified). + * + * Note that in this transition, the IB_QP_STATE flag + * is not allowed. + */ + if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == (IB_QP_AV | IB_QP_PORT)) && + cmd->base.port_num != cmd->base.dest.port_num) { + ret = -EINVAL; + goto release_qp; + } + if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == IB_QP_AV) { + cmd->base.attr_mask |= IB_QP_PORT; + cmd->base.port_num = cmd->base.dest.port_num; + } + } } if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || - !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { + !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) || + cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) { ret = -EINVAL; goto release_qp; } diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index edb35a5..a99fc0c 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -728,9 +728,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif); */ void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index) { - struct vsp1_device *vsp1 = dev_get_drvdata(dev); - - mutex_lock(&vsp1->drm->lock); } EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); @@ -846,6 +843,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, drm_pipe->crc = cfg->crc; + mutex_lock(&vsp1->drm->lock); vsp1_du_pipeline_setup_inputs(vsp1, pipe); vsp1_du_pipeline_configure(pipe); mutex_unlock(&vsp1->drm->lock); diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index fcfab66..81b150e 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog) rcu_assign_pointer(raw->progs, new_array); bpf_prog_array_free(old_array); + bpf_prog_put(prog); unlock: mutex_unlock(&ir_raw_handler_lock); return ret; diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index 2e0066b..e794890 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data) while (kfifo_out(&raw->kfifo, &ev, 1)) { if (is_timing_event(ev)) { if (ev.duration == 0) - dev_err(&dev->dev, "nonsensical timing event of duration 0"); + dev_warn_once(&dev->dev, "nonsensical timing event of duration 0"); if (is_timing_event(raw->prev_ev) && !is_transition(&ev, &raw->prev_ev)) - dev_err(&dev->dev, "two consecutive events of type %s", - TO_STR(ev.pulse)); + dev_warn_once(&dev->dev, "two consecutive events of type %s", + TO_STR(ev.pulse)); if (raw->prev_ev.reset && ev.pulse == 0) - dev_err(&dev->dev, "timing event after reset should be pulse"); + dev_warn_once(&dev->dev, "timing event after reset should be pulse"); } list_for_each_entry(handler, &ir_raw_handler_list, list) if (dev->enabled_protocols & diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 2e222d9..ca68e1d 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t) spin_unlock_irqrestore(&dev->keylock, flags); } +static unsigned int repeat_period(int protocol) +{ + if (protocol >= ARRAY_SIZE(protocols)) + return 100; + + return protocols[protocol].repeat_period; +} + /** * rc_repeat() - signals that a key is still pressed * @dev: the struct rc_dev descriptor of the device @@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev) { unsigned long flags; unsigned int timeout = nsecs_to_jiffies(dev->timeout) + - msecs_to_jiffies(protocols[dev->last_protocol].repeat_period); + msecs_to_jiffies(repeat_period(dev->last_protocol)); struct lirc_scancode sc = { .scancode = dev->last_scancode, .rc_proto = dev->last_protocol, .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED, @@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, if (dev->keypressed) { dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) + - msecs_to_jiffies(protocols[protocol].repeat_period); + msecs_to_jiffies(repeat_period(protocol)); mod_timer(&dev->timer_keyup, dev->keyup_jiffies); } spin_unlock_irqrestore(&dev->keylock, flags); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 75f781c..de4e6e5 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data) int i; for_each_sg(data->sg, sg, data->sg_len, i) { - void *buf = kmap_atomic(sg_page(sg) + sg->offset; + void *buf = kmap_atomic(sg_page(sg) + sg->offset); buffer_swap32(buf, sg->length); kunmap_atomic(buf); + } } #else static inline void mxcmci_swap_buffers(struct mmc_data *data) {} diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 63e3844..217b790 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_upper_unlink; } + bond->nest_level = dev_get_nest_level(bond_dev) + 1; + /* If the mode uses primary, then the following is handled by * bond_change_active_slave(). */ @@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); - bond->nest_level = dev_get_nest_level(bond_dev); netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, @@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } +static int bond_get_nest_level(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + + return bond->nest_level; +} + static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; - spin_lock(&bond->stats_lock); + spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); @@ -4227,6 +4235,7 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_neigh_setup = bond_neigh_setup, .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, + .ndo_get_lock_subclass = bond_get_nest_level, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, @@ -4725,6 +4734,7 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->nest_level = SINGLE_DEPTH_NESTING; netdev_lockdep_set_classes(bond_dev); list_add_tail(&bond->bond_list, &bn->dev_list); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 12ff002..b7dfd41 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf) usb_free_urb(dev->intr_urb); kfree(dev->intr_in_buffer); + kfree(dev->tx_msg_buffer); } } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 9ef07a0..bb28c70 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2617,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6095_ops = { @@ -2783,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, }; @@ -2960,7 +2960,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6176_ops = { @@ -3336,6 +3335,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, }; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 1b9d3130..17f12c1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); + io_sq->dma_addr_bits = ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4b5d625..8a3a60b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; - pdata->hw_if.config_tx_flow_control(pdata); pdata->tx_pause = pdata->phy.tx_pause; + pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; - pdata->hw_if.config_rx_flow_control(pdata); pdata->rx_pause = pdata->phy.rx_pause; + pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index bc03c17..a8926e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3072,6 +3072,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev, adapter->geneve_port = 0; t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); + break; default: return; } @@ -3157,6 +3158,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev, t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); + break; default: return; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 90c645b..60641e20 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev) return 0; } +static int _enic_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool running = netif_running(netdev); + int err = 0; + + ASSERT_RTNL(); + if (running) { + err = enic_stop(netdev); + if (err) + return err; + } + + netdev->mtu = new_mtu; + + if (running) { + err = enic_open(netdev); + if (err) + return err; + } + + return 0; +} + static int enic_change_mtu(struct net_device *netdev, int new_mtu) { struct enic *enic = netdev_priv(netdev); - int running = netif_running(netdev); if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; - if (running) - enic_stop(netdev); - - netdev->mtu = new_mtu; - if (netdev->mtu > enic->port_mtu) netdev_warn(netdev, - "interface MTU (%d) set higher than port MTU (%d)\n", - netdev->mtu, enic->port_mtu); + "interface MTU (%d) set higher than port MTU (%d)\n", + netdev->mtu, enic->port_mtu); - if (running) - enic_open(netdev); - - return 0; + return _enic_change_mtu(netdev, new_mtu); } static void enic_change_mtu_work(struct work_struct *work) @@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work) struct enic *enic = container_of(work, struct enic, change_mtu_work); struct net_device *netdev = enic->netdev; int new_mtu = vnic_dev_mtu(enic->vdev); - int err; - unsigned int i; - - new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); rtnl_lock(); - - /* Stop RQ */ - del_timer_sync(&enic->notify_timer); - - for (i = 0; i < enic->rq_count; i++) - napi_disable(&enic->napi[i]); - - vnic_intr_mask(&enic->intr[0]); - enic_synchronize_irqs(enic); - err = vnic_rq_disable(&enic->rq[0]); - if (err) { - rtnl_unlock(); - netdev_err(netdev, "Unable to disable RQ.\n"); - return; - } - vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); - vnic_cq_clean(&enic->cq[0]); - vnic_intr_clean(&enic->intr[0]); - - /* Fill RQ with new_mtu-sized buffers */ - netdev->mtu = new_mtu; - vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); - /* Need at least one buffer on ring to get going */ - if (vnic_rq_desc_used(&enic->rq[0]) == 0) { - rtnl_unlock(); - netdev_err(netdev, "Unable to alloc receive buffers.\n"); - return; - } - - /* Start RQ */ - vnic_rq_enable(&enic->rq[0]); - napi_enable(&enic->napi[0]); - vnic_intr_unmask(&enic->intr[0]); - enic_notify_timer_start(enic); - + (void)_enic_change_mtu(netdev, new_mtu); rtnl_unlock(); netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); @@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ enic->port_mtu = enic->config.mtu; - (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { @@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 68 - 9000 */ netdev->min_mtu = ENIC_MIN_MTU; netdev->max_mtu = ENIC_MAX_MTU; + netdev->mtu = enic->port_mtu; err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 5b12272..09e9da1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev) hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, nic_dev, link_status_event_handler); + SET_NETDEV_DEV(netdev, &pdev->dev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 86bc9ac..e33afa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1172,6 +1172,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; + if (!MLX5_DSCP_SUPPORTED(mdev)) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dae4156..c592678 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3712,7 +3712,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (!reset) { params->sw_mtu = new_mtu; - set_mtu_cb(priv); + if (set_mtu_cb) + set_mtu_cb(priv); netdev->mtu = params->sw_mtu; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index dd01ad4..40dba9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; esw_info(dev, @@ -1765,7 +1765,7 @@ abort: void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) + if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index af3bb2f..b7c21eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev, void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); + u16 max_mtu; /* priv init */ priv->mdev = mdev; @@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->ppriv = ppriv; mutex_init(&priv->state_lock); + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->mtu = max_mtu; + mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 3c0d882..f6f6a56 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block, list_add(&resource->list, &block->resource_list); } +static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource) +{ + list_del(&resource->list); +} + static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block) { struct mlxsw_afa_resource *resource, *tmp; list_for_each_entry_safe(resource, tmp, &block->resource_list, list) { - list_del(&resource->list); resource->destructor(block, resource); } } @@ -530,6 +534,7 @@ static void mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) { + mlxsw_afa_resource_del(&fwd_entry_ref->resource); mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); kfree(fwd_entry_ref); } @@ -579,6 +584,7 @@ static void mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block, struct mlxsw_afa_counter *counter) { + mlxsw_afa_resource_del(&counter->resource); block->afa->ops->counter_index_put(block->afa->ops_priv, counter->counter_index); kfree(counter); @@ -626,8 +632,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, char *oneact; char *actions; - if (WARN_ON(block->finished)) - return NULL; + if (block->finished) + return ERR_PTR(-EINVAL); if (block->cur_act_index + action_size > block->afa->max_acts_per_set) { struct mlxsw_afa_set *set; @@ -637,7 +643,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, */ set = mlxsw_afa_set_create(false); if (!set) - return NULL; + return ERR_PTR(-ENOBUFS); set->prev = block->cur_set; block->cur_act_index = 0; block->cur_set->next = set; @@ -724,8 +730,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, MLXSW_AFA_VLAN_CODE, MLXSW_AFA_VLAN_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, @@ -806,8 +812,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0); return 0; @@ -820,8 +826,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id) MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, trap_id); @@ -836,8 +842,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, trap_id); @@ -856,6 +862,7 @@ static void mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block, struct mlxsw_afa_mirror *mirror) { + mlxsw_afa_resource_del(&mirror->resource); block->afa->ops->mirror_del(block->afa->ops_priv, mirror->local_in_port, mirror->span_id, @@ -908,8 +915,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0); mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent); @@ -996,8 +1003,8 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, MLXSW_AFA_FORWARD_SIZE); - if (!act) { - err = -ENOBUFS; + if (IS_ERR(act)) { + err = PTR_ERR(act); goto err_append_action; } mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, @@ -1052,8 +1059,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE, MLXSW_AFA_POLCNT_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES, counter_index); return 0; @@ -1123,8 +1130,8 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VIRFWD_CODE, MLXSW_AFA_VIRFWD_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); return 0; } @@ -1193,8 +1200,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_MCROUTER_CODE, MLXSW_AFA_MCROUTER_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, expected_irif, min_mtu, rmid_valid, kvdl_index); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 1decf3a..e57d237 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) return NFP_REPR_TYPE_VF; } - return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; + return __NFP_REPR_TYPE_MAX; } static struct net_device * @@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id) u8 port = 0; repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); + if (repr_type > NFP_REPR_TYPE_MAX) + return NULL; reprs = rcu_dereference(app->reprs[repr_type]); if (!reprs) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 60f59ab..ef6a8d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -53,7 +53,7 @@ #include "dwmac1000.h" #include "hwif.h" -#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 8d375e5..6a393b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pcim_enable_device(pdev); + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev, static void stmmac_pci_remove(struct pci_dev *pdev) { stmmac_dvr_remove(&pdev->dev); + pci_disable_device(pdev); } -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); +static int stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + return 0; +} + +static int stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); /* synthetic ID, no official vendor */ #define PCI_VENDOR_ID_STMMAC 0x700 diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c index ba663e5..5135fc3 100644 --- a/drivers/net/netdevsim/devlink.c +++ b/drivers/net/netdevsim/devlink.c @@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns) struct net *net = nsim_to_net(ns); bool *reg_devlink = net_generic(net, nsim_devlink_id); + devlink_resources_unregister(ns->devlink, NULL); devlink_unregister(ns->devlink); devlink_free(ns->devlink); ns->devlink = NULL; diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0831b71..0c5b68e 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -218,7 +218,7 @@ out: static int mdio_mux_iproc_remove(struct platform_device *pdev) { - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index ed10d49..aeca484 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } + + tasklet_schedule(&dev->bh); } return ret; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 53085c6..2b6ec92 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; + *rbytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, - unsigned int len) + unsigned int len, + unsigned int *rbytes) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + *rbytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; @@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; + *rbytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } + *rbytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -932,6 +939,7 @@ err_skb: dev->stats.rx_length_errors++; break; } + *rbytes += len; page = virt_to_head_page(buf); put_page(page); } @@ -942,14 +950,13 @@ xdp_xmit: return NULL; } -static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit) +static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit, unsigned int *rbytes) { struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; - int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else { put_page(virt_to_head_page(buf)); } - return 0; + return; } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, + rbytes); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len); + skb = receive_big(dev, vi, rq, buf, len, rbytes); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); if (unlikely(!skb)) - return 0; + return; hdr = skb_vnet_hdr(skb); - ret = skb->len; - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return ret; + return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); - return 0; } /* Unlike mergeable buffers, all buffers are allocated to the @@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget, while (received < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); received++; } } else { while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); received++; } } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 90a4ad9..b3a1b6f 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ case 0x001: printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); break; - case 0x010: + case 0x002: printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); break; default: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 45928b5..4fffa69 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; - fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); + /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */ + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; fwreq->bus_nr = devinfo->pdev->bus->number; return fwreq; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e20c30b..c8ea63d 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwl9260_killer_2ac_cfg = { + .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", .fw_name_pre = IWL9260A_FW_PRE, @@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { .soc_latency = 5000, }; +const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9460", .fw_name_pre = IWL9000A_FW_PRE, @@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; +const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c503b26..84a8168 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; @@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 38234bd..8520523 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, @@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, @@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, @@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, @@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, @@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a57daec..2d8812d 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,7 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_load_q); static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); struct netfront_stats { @@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); xenbus_switch_state(dev, XenbusStateInitialising); + wait_event(module_load_q, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 35b7fc8..5cb40b2 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev) return; } - dev->is_added = 1; + pci_dev_assign_added(dev, true); } EXPORT_SYMBOL_GPL(pci_bus_add_device); @@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip already-added devices */ - if (dev->is_added) + if (pci_dev_is_added(dev)) continue; pci_bus_add_device(dev); } list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip if device attach failed */ - if (!dev->is_added) + if (!pci_dev_is_added(dev)) continue; child = dev->subordinate; if (child) diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index 4d6c20e..cf0aa7c 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -107,7 +107,7 @@ #define CFG_WINDOW_TYPE 0 #define IO_WINDOW_TYPE 1 #define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) #define MAX_PIO_WINDOWS 8 /* Parameters for the waiting for link up routine */ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3a17b29..ef0b1b6 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot) list_for_each_entry(dev, &bus->devices, bus_list) { /* Assume that newly added devices are powered on already. */ - if (!dev->is_added) + if (!pci_dev_is_added(dev)) dev->current_state = PCI_D0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 882f1f9..0881725 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -288,6 +288,7 @@ struct pci_sriov { /* pci_dev priv_flags */ #define PCI_DEV_DISCONNECTED 0 +#define PCI_DEV_ADDED 1 static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) { @@ -300,6 +301,16 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); } +static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) +{ + assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added); +} + +static inline bool pci_dev_is_added(const struct pci_dev *dev) +{ + return test_bit(PCI_DEV_ADDED, &dev->priv_flags); +} + #ifdef CONFIG_PCI_ATS void pci_restore_ats_state(struct pci_dev *dev); #else diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ac876e3..611adcd 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2433,13 +2433,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) dev = pci_scan_single_device(bus, devfn); if (!dev) return 0; - if (!dev->is_added) + if (!pci_dev_is_added(dev)) nr++; for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { dev = pci_scan_single_device(bus, devfn + fn); if (dev) { - if (!dev->is_added) + if (!pci_dev_is_added(dev)) nr++; dev->multifunction = 1; } diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 6f072ea..5e3d0dc 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,11 +19,12 @@ static void pci_stop_dev(struct pci_dev *dev) { pci_pme_active(dev, false); - if (dev->is_added) { + if (pci_dev_is_added(dev)) { device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - dev->is_added = 0; + + pci_dev_assign_added(dev, false); } if (dev->bus->self) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d609383..c972cc2 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { iscsi_conn_printk(KERN_INFO, conn, - "task [op %x/%x itt " + "task [op %x itt " "0x%x/0x%x] " "rejected.\n", - task->hdr->opcode, opcode, - task->itt, task->hdr_itt); + opcode, task->itt, + task->hdr_itt); return -EACCES; } /* @@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (conn->session->fast_abort) { iscsi_conn_printk(KERN_INFO, conn, - "task [op %x/%x itt " + "task [op %x itt " "0x%x/0x%x] fast abort.\n", - task->hdr->opcode, opcode, - task->itt, task->hdr_itt); + opcode, task->itt, + task->hdr_itt); return -EACCES; } break; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 89a4999..c873156 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) msleep(1000); qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); vha->flags.delete_progress = 1; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f68eb60..2660a48 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_abort_cmd(srb_t *); int qla24xx_post_relogin_work(struct scsi_qla_host *vha); +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); /* * Global Functions in qla_mid.c source file. diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 2c35b0b..7a37440 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3708,6 +3708,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) return rval; done_free_sp: + spin_lock_irqsave(&vha->hw->vport_slock, flags); + list_del(&sp->elem); + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); + if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index db0e327..1b19b954 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1489,11 +1489,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, wait_for_completion(&tm_iocb->u.tmf.comp); - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? - QLA_SUCCESS : QLA_FUNCTION_FAILED; + rval = tm_iocb->u.tmf.data; - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { - ql_dbg(ql_dbg_taskm, vha, 0x8030, + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 37ae0f6..59fd5a9 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) sp->fcport = fcport; sp->iocbs = 1; sp->vha = qpair->vha; + INIT_LIST_HEAD(&sp->elem); + done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9fa5a25..7756106 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) unsigned long flags; fc_port_t *fcport = NULL; + if (!vha->hw->flags.fw_started) + return; + /* Setup to process RIO completion. */ handle_cnt = 0; if (IS_CNA_CAPABLE(ha)) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 7e875f5..f0ec13d 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -4220,6 +4220,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + if (!ha->flags.fw_started) + return QLA_SUCCESS; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); @@ -4289,6 +4292,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + if (!ha->flags.fw_started) + return QLA_SUCCESS; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f6f0a75..aa727d0 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -152,11 +152,18 @@ int qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; - int ret; + int ret = QLA_SUCCESS; + fc_port_t *fcport; + + if (vha->hw->flags.fw_started) + ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + + qla2x00_mark_all_devices_lost(vha, 0); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 9f309e5..1fbd16c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *); static int qla2xxx_map_queues(struct Scsi_Host *shost); static void qla2x00_destroy_deferred_work(struct qla_hw_data *); + struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, .name = QLA2XXX_DRIVER_NAME, @@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) * qla2x00_wait_for_sess_deletion can only be called from remove_one. * it has dependency on UNLOADING flag to stop device discovery */ -static void +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { qla2x00_mark_all_devices_lost(vha, 0); @@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; + ql_log(ql_log_info, base_vha, 0xb079, + "Removing driver\n"); /* Indicate device removal to prevent future board_disable and wait * until any pending board_disable has completed. */ @@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { + if (ha->flags.fw_started) + qla2x00_abort_isp_cleanup(base_vha); + } else if (!IS_QLAFX00(ha)) { + if (IS_QLA8031(ha)) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, + "Clearing fcoe driver presence.\n"); + if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_p3p, base_vha, 0xb079, + "Error while clearing DRV-Presence.\n"); + } + + qla2x00_try_to_stop_firmware(base_vha); + } + qla2x00_wait_for_sess_deletion(base_vha); /* @@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_delete_all_vps(ha, base_vha); - if (IS_QLA8031(ha)) { - ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, - "Clearing fcoe driver presence.\n"); - if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) - ql_dbg(ql_dbg_p3p, base_vha, 0xb079, - "Error while clearing DRV-Presence.\n"); - } - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); qla2x00_dfs_remove(base_vha); @@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha) qla2x00_stop_timer(vha); qla25xx_delete_queues(vha); - - if (ha->flags.fce_enabled) - qla2x00_disable_fce_trace(vha, NULL, NULL); - - if (ha->eft) - qla2x00_disable_eft_trace(vha); - - if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { - if (ha->flags.fw_started) - qla2x00_abort_isp_cleanup(vha); - } else { - if (ha->flags.fw_started) { - /* Stop currently executing firmware. */ - qla2x00_try_to_stop_firmware(vha); - ha->flags.fw_started = 0; - } - } - vha->flags.online = 0; /* turn-off interrupts on the card */ @@ -6028,8 +6020,9 @@ qla2x00_do_dpc(void *data) set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); } - if (test_and_clear_bit(ISP_ABORT_NEEDED, - &base_vha->dpc_flags)) { + if (test_and_clear_bit + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + !test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 04458eb..4499c78 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; + if (!ha->flags.fw_started) + return QLA_SUCCESS; + ha->beacon_blink_led = 0; if (IS_QLA2031(ha) || IS_QLA27XX(ha)) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cd2fdac..ba9ba0e 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1741,15 +1741,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the - * preallocated requests are already in use, then using GFP_ATOMIC with - * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL - * will cause blk_get_request() to sleep until an active command - * completes, freeing up a request. Neither option is ideal, but - * GFP_KERNEL is the better choice to prevent userspace from getting an - * unexpected EWOULDBLOCK. - * - * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually - * does not sleep except under memory pressure. + * preallocated requests are already in use, then blk_get_request() + * will sleep until an active command completes, freeing up a request. + * Although waiting in an asynchronous interface is less than ideal, we + * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might + * not expect an EWOULDBLOCK from this condition. */ rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); @@ -2185,6 +2181,7 @@ sg_add_sfp(Sg_device * sdp) write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); + kfree(sfp); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a1a0025..d5d33e1 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -402,6 +402,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) fput(asma->file); goto out; } + } else { + vma_set_anonymous(vma); } if (vma->vm_file) diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 514986b..25eb389 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) struct iscsi_param *param; u32 mrdsl, mbl; u32 max_npdu, max_iso_npdu; + u32 max_iso_payload; if (conn->login->leading_connection) { param = iscsi_find_param_from_key(MAXBURSTLENGTH, @@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) mrdsl = conn_ops->MaxRecvDataSegmentLength; max_npdu = mbl / mrdsl; - max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / - (ISCSI_HDR_LEN + mrdsl + + max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); + + max_iso_npdu = max_iso_payload / + (ISCSI_HDR_LEN + mrdsl + cxgbit_digest_len[csk->submode]); csk->max_iso_npdu = min(max_npdu, max_iso_npdu); @@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn) if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; + if (cxgbit_set_digest(csk)) + return -1; + if (conn->login->leading_connection) { param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, conn->param_list); @@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) if (is_t5(cdev->lldi.adapter_type)) goto enable_ddp; else - goto enable_digest; + return 0; } if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { @@ -781,10 +787,6 @@ enable_ddp: } } -enable_digest: - if (cxgbit_set_digest(csk)) - return -1; - return 0; } diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 6b237e3..3988c09 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -513,7 +513,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, tell_host(vb, vb->inflate_vq); /* balloon's page migration 2nd step -- deflate "page" */ + spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_delete(page); + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb, vb->pfns, page); tell_host(vb, vb->deflate_vq); @@ -1443,7 +1443,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno, const struct iomap_ops *ops) { struct inode *inode = mapping->host; - loff_t pos = bno >> inode->i_blkbits; + loff_t pos = bno << inode->i_blkbits; unsigned blocksize = i_blocksize(inode); if (filemap_write_and_wait(mapping)) diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h index 395c4c0..1682a87 100644 --- a/fs/jfs/jfs_dinode.h +++ b/fs/jfs/jfs_dinode.h @@ -115,6 +115,13 @@ struct dinode { dxd_t _dxd; /* 16: */ union { __le32 _rdev; /* 4: */ + /* + * The fast symlink area + * is expected to overflow + * into _inlineea when + * needed (which will clear + * INLINEEA). + */ u8 _fastsymlink[128]; } _u; u8 _inlineea[128]; diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index 1f26d19..9940a1e 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h @@ -87,6 +87,7 @@ struct jfs_inode_info { struct { unchar _unused[16]; /* 16: */ dxd_t _dxd; /* 16: */ + /* _inline may overflow into _inline_ea when needed */ unchar _inline[128]; /* 128: inline symlink */ /* _inline_ea may overlay the last part of * file._xtroot if maxentry = XTROOTINITSLOT diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 1b9264f..f085714 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -967,8 +967,7 @@ static int __init init_jfs_fs(void) jfs_inode_cachep = kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT, - offsetof(struct jfs_inode_info, i_inline), - sizeof_field(struct jfs_inode_info, i_inline), + offsetof(struct jfs_inode_info, i_inline), IDATASIZE, init_once); if (jfs_inode_cachep == NULL) return -ENOMEM; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6dd1468..f6c4ccd 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6466,34 +6466,34 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) if (data->arg.new_lock && !data->cancelled) { data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) - break; + goto out_restart; } - if (data->arg.new_lock_owner != 0) { nfs_confirm_seqid(&lsp->ls_seqid, 0); nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); - goto out_done; - } else if (nfs4_update_lock_stateid(lsp, &data->res.stateid)) - goto out_done; - + } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) + goto out_restart; break; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: if (data->arg.new_lock_owner != 0) { - if (nfs4_stateid_match(&data->arg.open_stateid, + if (!nfs4_stateid_match(&data->arg.open_stateid, &lsp->ls_state->open_stateid)) - goto out_done; - } else if (nfs4_stateid_match(&data->arg.lock_stateid, + goto out_restart; + } else if (!nfs4_stateid_match(&data->arg.lock_stateid, &lsp->ls_stateid)) - goto out_done; + goto out_restart; } - if (!data->cancelled) - rpc_restart_call_prepare(task); out_done: dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); + return; +out_restart: + if (!data->cancelled) + rpc_restart_call_prepare(task); + goto out_done; } static void nfs4_lock_release(void *calldata) @@ -6502,7 +6502,7 @@ static void nfs4_lock_release(void *calldata) dprintk("%s: begin!\n", __func__); nfs_free_seqid(data->arg.open_seqid); - if (data->cancelled) { + if (data->cancelled && data->rpc_status == 0) { struct rpc_task *task; task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, data->arg.lock_seqid); diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2751476..f098b9f 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -167,6 +167,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, } if (compressed) { + if (!msblk->stream) + goto read_failure; length = squashfs_decompress(msblk, bh, b, offset, length, output); if (length < 0) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index fcff2e0..f1c1430 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) return squashfs_block_size(size); } +void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) +{ + int copied; + void *pageaddr; + + pageaddr = kmap_atomic(page); + copied = squashfs_copy_data(pageaddr, buffer, offset, avail); + memset(pageaddr + copied, 0, PAGE_SIZE - copied); + kunmap_atomic(pageaddr); + + flush_dcache_page(page); + if (copied == avail) + SetPageUptodate(page); + else + SetPageError(page); +} + /* Copy data into page cache */ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, int bytes, int offset) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - void *pageaddr; int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int start_index = page->index & ~mask, end_index = start_index | mask; @@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, if (PageUptodate(push_page)) goto skip_page; - pageaddr = kmap_atomic(push_page); - squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_SIZE - avail); - kunmap_atomic(pageaddr); - flush_dcache_page(push_page); - SetPageUptodate(push_page); + squashfs_fill_page(push_page, buffer, offset, avail); skip_page: unlock_page(push_page); if (i != page->index) @@ -420,10 +431,9 @@ skip_page: } /* Read datablock stored packed inside a fragment (tail-end packed block) */ -static int squashfs_readpage_fragment(struct page *page) +static int squashfs_readpage_fragment(struct page *page, int expected) { struct inode *inode = page->mapping->host; - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(page, buffer, i_size_read(inode) & - (msblk->block_size - 1), + squashfs_copy_cache(page, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } -static int squashfs_readpage_sparse(struct page *page, int index, int file_end) +static int squashfs_readpage_sparse(struct page *page, int expected) { - struct inode *inode = page->mapping->host; - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int bytes = index == file_end ? - (i_size_read(inode) & (msblk->block_size - 1)) : - msblk->block_size; - - squashfs_copy_cache(page, NULL, bytes, 0); + squashfs_copy_cache(page, NULL, expected, 0); return 0; } @@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int index = page->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; + int expected = index == file_end ? + (i_size_read(inode) & (msblk->block_size - 1)) : + msblk->block_size; int res; void *pageaddr; @@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page) goto error_out; if (bsize == 0) - res = squashfs_readpage_sparse(page, index, file_end); + res = squashfs_readpage_sparse(page, expected); else - res = squashfs_readpage_block(page, block, bsize); + res = squashfs_readpage_block(page, block, bsize, expected); } else - res = squashfs_readpage_fragment(page); + res = squashfs_readpage_fragment(page, expected); if (!res) return 0; diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index f2310d2..a9ba8d9 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -20,7 +20,7 @@ #include "squashfs.h" /* Read separately compressed datablock and memcopy into page cache */ -int squashfs_readpage_block(struct page *page, u64 block, int bsize) +int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) { struct inode *i = page->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, @@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize) ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(page, buffer, buffer->length, 0); + squashfs_copy_cache(page, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index cb485d8..80db1b8 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -21,10 +21,11 @@ #include "page_actor.h" static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, - int pages, struct page **page); + int pages, struct page **page, int bytes); /* Read separately compressed datablock directly into page cache */ -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) +int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, + int expected) { struct inode *inode = target_page->mapping->host; @@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) * using an intermediate buffer. */ res = squashfs_read_cache(target_page, block, bsize, pages, - page); + page, expected); if (res < 0) goto mark_errored; @@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) if (res < 0) goto mark_errored; + if (res != expected) { + res = -EIO; + goto mark_errored; + } + /* Last page may have trailing bytes not filled */ bytes = res % PAGE_SIZE; if (bytes) { @@ -138,13 +144,12 @@ out: static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, - int pages, struct page **page) + int pages, struct page **page, int bytes) { struct inode *i = target_page->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); - int bytes = buffer->length, res = buffer->error, n, offset = 0; - void *pageaddr; + int res = buffer->error, n, offset = 0; if (res) { ERROR("Unable to read page, block %llx, size %x\n", block, @@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, if (page[n] == NULL) continue; - pageaddr = kmap_atomic(page[n]); - squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_SIZE - avail); - kunmap_atomic(pageaddr); - flush_dcache_page(page[n]); - SetPageUptodate(page[n]); + squashfs_fill_page(page[n], buffer, offset, avail); unlock_page(page[n]); if (page[n] != target_page) put_page(page[n]); diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c index 86ad9a4..0681fea 100644 --- a/fs/squashfs/fragment.c +++ b/fs/squashfs/fragment.c @@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, u64 *fragment_block) { struct squashfs_sb_info *msblk = sb->s_fs_info; - int block = SQUASHFS_FRAGMENT_INDEX(fragment); - int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); - u64 start_block = le64_to_cpu(msblk->fragment_index[block]); + int block, offset, size; struct squashfs_fragment_entry fragment_entry; - int size; + u64 start_block; + + if (fragment >= msblk->fragments) + return -EIO; + block = SQUASHFS_FRAGMENT_INDEX(fragment); + offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); + + start_block = le64_to_cpu(msblk->fragment_index[block]); size = squashfs_read_metadata(sb, &fragment_entry, &start_block, &offset, sizeof(fragment_entry)); diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 887d6d2..f89f8a7 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); /* file.c */ +void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, int); /* file_xxx.c */ -extern int squashfs_readpage_block(struct page *, u64, int); +extern int squashfs_readpage_block(struct page *, u64, int, int); /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 1da565c..ef69c31 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -75,6 +75,7 @@ struct squashfs_sb_info { unsigned short block_log; long long bytes_used; unsigned int inodes; + unsigned int fragments; int xattr_ids; }; #endif diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 8a73b97..40e6573 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) msblk->inode_table = le64_to_cpu(sblk->inode_table_start); msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %pg\n", sb->s_bdev); @@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); - TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); + TRACE("Number of fragments %d\n", msblk->fragments); TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); @@ -272,7 +273,7 @@ allocate_id_index_table: sb->s_export_op = &squashfs_export_ops; handle_fragments: - fragments = le32_to_cpu(sblk->fragments); + fragments = msblk->fragments; if (fragments == 0) goto check_directory_table; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 594d192..bad9cea 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -633,8 +633,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, /* the various vma->vm_userfaultfd_ctx still points to it */ down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) - if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + } up_write(&mm->mmap_sem); userfaultfd_ctx_put(release_new_ctx); diff --git a/include/linux/mm.h b/include/linux/mm.h index 7ba6d35..68a5121 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -466,6 +466,9 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) vma->vm_ops = NULL; } +/* flush_tlb_range() takes a vma, not a mm, and can care about flags */ +#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } + struct mmu_gather; struct inode; diff --git a/include/linux/pci.h b/include/linux/pci.h index abd5d5e..c133ccf 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -368,7 +368,6 @@ struct pci_dev { unsigned int transparent:1; /* Subtractive decode bridge */ unsigned int multifunction:1; /* Multi-function device */ - unsigned int is_added:1; unsigned int is_busmaster:1; /* Is busmaster */ unsigned int no_msi:1; /* May not use MSI */ unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ @@ -427,6 +427,17 @@ static int shm_split(struct vm_area_struct *vma, unsigned long addr) return 0; } +static unsigned long shm_pagesize(struct vm_area_struct *vma) +{ + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); + + if (sfd->vm_ops->pagesize) + return sfd->vm_ops->pagesize(vma); + + return PAGE_SIZE; +} + #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { @@ -554,6 +565,7 @@ static const struct vm_operations_struct shm_vm_ops = { .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, .split = shm_split, + .pagesize = shm_pagesize, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, diff --git a/kernel/auditsc.c b/kernel/auditsc.c index ceb1c45..80d672a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic) break; case AUDIT_KERN_MODULE: audit_log_format(ab, "name="); - audit_log_untrustedstring(ab, context->module.name); - kfree(context->module.name); + if (context->module.name) { + audit_log_untrustedstring(ab, context->module.name); + kfree(context->module.name); + } else + audit_log_format(ab, "(null)"); + break; } audit_log_end(ab); @@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name) { struct audit_context *context = audit_context(); - context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL); - strcpy(context->module.name, name); + context->module.name = kstrdup(name, GFP_KERNEL); + if (!context->module.name) + audit_log_lost("out of memory in __audit_log_kern_module"); context->type = AUDIT_KERN_MODULE; } diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 544e58f..2aa55d030 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf, return -EINVAL; value_type = btf_type_id_size(btf, &btf_value_id, &value_size); - if (!value_type || value_size > map->value_size) + if (!value_type || value_size != map->value_size) return -EINVAL; return 0; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 9704934..2590700 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1519,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, { bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; const struct btf_member *member; + u32 meta_needed, last_offset; struct btf *btf = env->btf; u32 struct_size = t->size; - u32 meta_needed; u16 i; meta_needed = btf_type_vlen(t) * sizeof(*member); @@ -1534,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, btf_verifier_log_type(env, t, NULL); + last_offset = 0; for_each_member(i, t, member) { if (!btf_name_offset_valid(btf, member->name_off)) { btf_verifier_log_member(env, t, member, @@ -1555,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, return -EINVAL; } + /* + * ">" instead of ">=" because the last member could be + * "char a[0];" + */ + if (last_offset > member->offset) { + btf_verifier_log_member(env, t, member, + "Invalid member bits_offset"); + return -EINVAL; + } + if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) { btf_verifier_log_member(env, t, member, "Memmber bits_offset exceeds its struct size"); @@ -1562,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, } btf_verifier_log_member(env, t, member, NULL); + last_offset = member->offset; } return meta_needed; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index daeabd7..9a8b7ba 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1068,6 +1068,13 @@ static int irq_setup_forced_threading(struct irqaction *new) if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) return 0; + /* + * No further action required for interrupts which are requested as + * threaded interrupts already + */ + if (new->handler == irq_default_primary_handler) + return 0; + new->flags |= IRQF_ONESHOT; /* @@ -1075,7 +1082,7 @@ static int irq_setup_forced_threading(struct irqaction *new) * thread handler. We force thread them as well by creating a * secondary action. */ - if (new->handler != irq_default_primary_handler && new->thread_fn) { + if (new->handler && new->thread_fn) { /* Allocate the secondary action */ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!new->secondary) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 039ddbc..3103099 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3167,6 +3167,13 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) return 0; } +/* + * When a new function is introduced to vm_operations_struct and added + * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. + * This is because under System V memory model, mappings created via + * shmget/shmat with "huge page" specified are backed by hugetlbfs files, + * their original vm_ops are overwritten with shm_vm_ops. + */ const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8c0280b..b2173f7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4037,6 +4037,14 @@ static struct cftype mem_cgroup_legacy_files[] = { static DEFINE_IDR(mem_cgroup_idr); +static void mem_cgroup_id_remove(struct mem_cgroup *memcg) +{ + if (memcg->id.id > 0) { + idr_remove(&mem_cgroup_idr, memcg->id.id); + memcg->id.id = 0; + } +} + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) { VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); @@ -4047,8 +4055,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) { VM_BUG_ON(atomic_read(&memcg->id.ref) < n); if (atomic_sub_and_test(n, &memcg->id.ref)) { - idr_remove(&mem_cgroup_idr, memcg->id.id); - memcg->id.id = 0; + mem_cgroup_id_remove(memcg); /* Memcg ID pins CSS */ css_put(&memcg->css); @@ -4185,8 +4192,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); return memcg; fail: - if (memcg->id.id > 0) - idr_remove(&mem_cgroup_idr, memcg->id.id); + mem_cgroup_id_remove(memcg); __mem_cgroup_free(memcg); return NULL; } @@ -4245,6 +4251,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) return &memcg->css; fail: + mem_cgroup_id_remove(memcg); mem_cgroup_free(memcg); return ERR_PTR(-ENOMEM); } diff --git a/mm/memory.c b/mm/memory.c index 7206a63..dab1511 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1417,11 +1417,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, do { next = pmd_addr_end(addr, end); if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { - if (next - addr != HPAGE_PMD_SIZE) { - VM_BUG_ON_VMA(vma_is_anonymous(vma) && - !rwsem_is_locked(&tlb->mm->mmap_sem), vma); + if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); - } else if (zap_huge_pmd(tlb, vma, pmd, addr)) + else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; /* fall through */ } diff --git a/net/core/dev.c b/net/core/dev.c index a5aa1c7..559a912 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7149,16 +7149,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) dev->tx_queue_len = new_len; res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); res = notifier_to_errno(res); - if (res) { - netdev_err(dev, - "refused to change device tx_queue_len\n"); - dev->tx_queue_len = orig_len; - return res; - } - return dev_qdisc_change_tx_queue_len(dev); + if (res) + goto err_rollback; + res = dev_qdisc_change_tx_queue_len(dev); + if (res) + goto err_rollback; } return 0; + +err_rollback: + netdev_err(dev, "refused to change device tx_queue_len\n"); + dev->tx_queue_len = orig_len; + return res; } /** diff --git a/net/core/filter.c b/net/core/filter.c index 06da770..9dfd145 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1712,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { + u8 *end = skb_tail_pointer(skb); + u8 *net = skb_network_header(skb); + u8 *mac = skb_mac_header(skb); u8 *ptr; - if (unlikely(offset > 0xffff || len > skb_headlen(skb))) + if (unlikely(offset > 0xffff || len > (end - mac))) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: - ptr = skb_mac_header(skb) + offset; + ptr = mac + offset; break; case BPF_HDR_START_NET: - ptr = skb_network_header(skb) + offset; + ptr = net + offset; break; default: goto err_clear; } - if (likely(ptr >= skb_mac_header(skb) && - ptr + len <= skb_tail_pointer(skb))) { + if (likely(ptr >= mac && ptr + len <= end)) { memcpy(to, ptr, len); return 0; } diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index e7e626f..e450985 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) return -EINVAL; - prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); + prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); if (!prog->name) return -ENOMEM; diff --git a/net/core/xdp.c b/net/core/xdp.c index 9d1f220..6771f18 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -345,7 +345,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, rcu_read_lock(); /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); - xa->zc_alloc->free(xa->zc_alloc, handle); + if (!WARN_ON_ONCE(!xa)) + xa->zc_alloc->free(xa->zc_alloc, handle); rcu_read_unlock(); default: /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1e3b6a6..732369c 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev) { struct dsa_port *dp = dsa_slave_to_port(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_detach(slave_dev); rtnl_lock(); @@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev) { struct dsa_port *dp = dsa_slave_to_port(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_attach(slave_dev); rtnl_lock(); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e46cdd3..2998b0e 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -292,19 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) return ip_hdr(skb)->daddr; in_dev = __in_dev_get_rcu(dev); - BUG_ON(!in_dev); net = dev_net(dev); scope = RT_SCOPE_UNIVERSE; if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); struct flowi4 fl4 = { .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_scope = scope, - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, + .flowi4_mark = vmark ? skb->mark : 0, }; if (!fib_lookup(net, &fl4, &res, 0)) return FIB_RES_PREFSRC(net, res); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 28fef7d..75151be 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1387,7 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev, /* * A socket has joined a multicast group on device dev. */ -void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) +static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, + unsigned int mode) { struct ip_mc_list *im; #ifdef CONFIG_IP_MULTICAST diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 1e4cf3a..0d70608 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -157,9 +157,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, { struct inet_frag_queue *q; - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) - return NULL; - q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; @@ -204,6 +201,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) { struct inet_frag_queue *fq; + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) + return NULL; + rcu_read_lock(); fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 8e9528e..d14d741 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -383,11 +383,16 @@ found: int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ if (i < next->len) { + int delta = -next->truesize; + /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; + delta += next->truesize; + if (delta) + add_frag_mem_limit(qp->q.net, delta); next->ip_defrag_offset += i; qp->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 58e2f47..4bfff3c 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -354,6 +354,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ cwnd = (cwnd + 1) & ~1U; + /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ + if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) + cwnd += 2; + return cwnd; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3bcd30a..f9dcb29 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -246,8 +246,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp) static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { - if (tcp_hdr(skb)->cwr) + if (tcp_hdr(skb)->cwr) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + + /* If the sender is telling us it has entered CWR, then its + * cwnd may be very low (even just 1 packet), so we should ACK + * immediately. + */ + tcp_enter_quickack_mode((struct sock *)tp, 2); + } } static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 97513f3..88a7579 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -669,8 +669,10 @@ skip_cow: sg_init_table(sg, nfrags); ret = skb_to_sgvec(skb, sg, 0, skb->len); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + kfree(tmp); goto out; + } skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index b7f28de..c72ae3a 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_dst_release; } - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); - skb_dst_set(skb, dst); - skb->dev = skb_dst(skb)->dev; - mtu = dst_mtu(dst); if (!skb->ignore_df && skb->len > mtu) { skb_dst_update_pmtu(skb, mtu); @@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) htonl(mtu)); } - return -EMSGSIZE; + err = -EMSGSIZE; + goto tx_err_dst_release; } + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); + skb_dst_set(skb, dst); + skb->dev = skb_dst(skb)->dev; + err = dst_output(t->net, skb->sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e398797..cf6cca2 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1201,13 +1201,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, l2tp_session_get(sock_net(sk), tunnel, stats.session_id); - if (session && session->pwtype == L2TP_PWTYPE_PPP) { - err = pppol2tp_session_ioctl(session, cmd, - arg); + if (!session) { + err = -EBADR; + break; + } + if (session->pwtype != L2TP_PWTYPE_PPP) { l2tp_session_dec_refcount(session); - } else { err = -EBADR; + break; } + + err = pppol2tp_session_ioctl(session, cmd, arg); + l2tp_session_dec_refcount(session); break; } #ifdef CONFIG_XFRM diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 393573a..56704d9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -63,6 +63,7 @@ #include <linux/hash.h> #include <linux/genetlink.h> #include <linux/net_namespace.h> +#include <linux/nospec.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; + protocol = array_index_nospec(protocol, MAX_LINKS); netlink_lock_table(); #ifdef CONFIG_MODULES @@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } + if (nlk->ngroups == 0) + groups = 0; + else if (nlk->ngroups < 8*sizeof(groups)) + groups &= (1UL << nlk->ngroups) - 1; + bound = nlk->bound; if (bound) { /* Ensure nlk->portid is up-to-date. */ diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index b891a91..c038e02 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) if (!meter) return ERR_PTR(-ENOMEM); + meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]); meter->used = div_u64(ktime_get_ns(), 1000 * 1000); meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; @@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) u32 meter_id; bool failed; + if (!a[OVS_METER_ATTR_ID]) { + return -ENODEV; + } + meter = dp_meter_create(a); if (IS_ERR_OR_NULL(meter)) return PTR_ERR(meter); @@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) goto exit_unlock; } - if (!a[OVS_METER_ATTR_ID]) { - err = -ENODEV; - goto exit_unlock; - } - meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]); /* Cannot fail after this. */ diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 48332a6..d152e48 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev, struct rds_ib_frmr *frmr; int ret; + if (!ic) { + /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/ + return ERR_PTR(-EOPNOTSUPP); + } + do { if (ibmr) rds_ib_free_frmr(ibmr, true); diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 0ea4ab0..655f01d 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -115,7 +115,8 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, - struct rds_sock *rs, u32 *key_ret); + struct rds_sock *rs, u32 *key_ret, + struct rds_connection *conn); void rds_ib_sync_mr(void *trans_private, int dir); void rds_ib_free_mr(void *trans_private, int invalidate); void rds_ib_flush_mrs(void); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e678699..2e49a40 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -537,11 +537,12 @@ void rds_ib_flush_mrs(void) } void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, - struct rds_sock *rs, u32 *key_ret) + struct rds_sock *rs, u32 *key_ret, + struct rds_connection *conn) { struct rds_ib_device *rds_ibdev; struct rds_ib_mr *ibmr = NULL; - struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; + struct rds_ib_connection *ic = NULL; int ret; rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); @@ -550,6 +551,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, goto out; } + if (conn) + ic = conn->c_transport_data; + if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { ret = -ENODEV; goto out; @@ -559,17 +563,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); else ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); - if (ibmr) - rds_ibdev = NULL; - - out: - if (!ibmr) + if (IS_ERR(ibmr)) { + ret = PTR_ERR(ibmr); pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); + } else { + return ibmr; + } + out: if (rds_ibdev) rds_ib_dev_put(rds_ibdev); - return ibmr; + return ERR_PTR(ret); } void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 634cfcb..80920e4 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, } static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, - u64 *cookie_ret, struct rds_mr **mr_ret) + u64 *cookie_ret, struct rds_mr **mr_ret, + struct rds_conn_path *cp) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; @@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, - &mr->r_key); + &mr->r_key, + cp ? cp->cp_conn : NULL); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) @@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) sizeof(struct rds_get_mr_args))) return -EFAULT; - return __rds_rdma_map(rs, &args, NULL, NULL); + return __rds_rdma_map(rs, &args, NULL, NULL, NULL); } int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) @@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) new_args.cookie_addr = args.cookie_addr; new_args.flags = args.flags; - return __rds_rdma_map(rs, &new_args, NULL, NULL); + return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL); } /* @@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, rm->m_rdma_cookie != 0) return -EINVAL; - return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); + return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, + &rm->rdma.op_rdma_mr, rm->m_conn_path); } /* diff --git a/net/rds/rds.h b/net/rds/rds.h index f2272fb..60b3b78 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -464,6 +464,8 @@ struct rds_message { struct scatterlist *op_sg; } data; }; + + struct rds_conn_path *m_conn_path; }; /* @@ -544,7 +546,8 @@ struct rds_transport { unsigned int avail); void (*exit)(void); void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, - struct rds_sock *rs, u32 *key_ret); + struct rds_sock *rs, u32 *key_ret, + struct rds_connection *conn); void (*sync_mr)(void *trans_private, int direction); void (*free_mr)(void *trans_private, int invalidate); void (*flush_mrs)(void); diff --git a/net/rds/send.c b/net/rds/send.c index 94c7f74..59f17a2 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1169,6 +1169,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) rs->rs_conn = conn; } + if (conn->c_trans->t_mp_capable) + cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; + else + cpath = &conn->c_path[0]; + + rm->m_conn_path = cpath; + /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); if (ret) { @@ -1192,11 +1199,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) goto out; } - if (conn->c_trans->t_mp_capable) - cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; - else - cpath = &conn->c_path[0]; - if (rds_destroy_pending(conn)) { ret = -EAGAIN; goto out; diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index a9a9be5..9d1e298 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); - if (user_call_ID < call->user_call_ID) + if (user_call_ID < xcall->user_call_ID) pp = &(*pp)->rb_left; - else if (user_call_ID > call->user_call_ID) + else if (user_call_ID > xcall->user_call_ID) pp = &(*pp)->rb_right; else goto id_in_use; diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a7e8d63..9bde1e4 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -233,7 +233,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, /* force immediate tx of current consumer cursor, but * under send_lock to guarantee arrival in seqno-order */ - smc_tx_sndbuf_nonempty(conn); + if (smc->sk.sk_state != SMC_INIT) + smc_tx_sndbuf_nonempty(conn); } } diff --git a/net/socket.c b/net/socket.c index 8563362..8c24d5d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -89,6 +89,7 @@ #include <linux/magic.h> #include <linux/slab.h> #include <linux/xattr.h> +#include <linux/nospec.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -2522,6 +2523,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; + call = array_index_nospec(call, SYS_SENDMMSG + 1); len = nargs[call]; if (len > sizeof(a)) @@ -2688,7 +2690,8 @@ EXPORT_SYMBOL(sock_unregister); bool sock_is_registered(int family) { - return family < NPROTO && rcu_access_pointer(net_families[family]); + return family < NPROTO && + rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]); } static int __init sock_init(void) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 72335c2..4e937cd 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len); - if (err) { - xdp_return_buff(xdp); + if (err) xs->rx_dropped++; - } return err; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 52ecaf7..8a64b15 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -250,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q) static inline bool xskq_empty_desc(struct xsk_queue *q) { - return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; + return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; } void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f48251..7c5e897 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2286,6 +2286,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) return make_blackhole(net, dst_orig->ops->family, dst_orig); + if (IS_ERR(dst)) + dst_release(dst_orig); + return dst; } EXPORT_SYMBOL(xfrm_lookup_route); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 080035f..33878e6 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1025,10 +1025,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, { struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); - if (nlsk) - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); - else - return -1; + if (!nlsk) { + kfree_skb(skb); + return -EPIPE; + } + + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); } static inline unsigned int xfrm_spdinfo_msgsize(void) @@ -1671,9 +1673,11 @@ static inline unsigned int userpolicy_type_attrsize(void) #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { - struct xfrm_userpolicy_type upt = { - .type = type, - }; + struct xfrm_userpolicy_type upt; + + /* Sadly there are two holes in struct xfrm_userpolicy_type */ + memset(&upt, 0, sizeof(upt)); + upt.type = type; return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 097b1a5..f74a8bc 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -36,6 +36,7 @@ #include <assert.h> #include <errno.h> #include <fcntl.h> +#include <linux/kernel.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> @@ -90,7 +91,8 @@ static bool map_is_map_of_progs(__u32 type) static void *alloc_value(struct bpf_map_info *info) { if (map_is_per_cpu(info->type)) - return malloc(info->value_size * get_possible_cpus()); + return malloc(round_up(info->value_size, 8) * + get_possible_cpus()); else return malloc(info->value_size); } @@ -161,9 +163,10 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, jsonw_name(json_wtr, "value"); print_hex_data_json(value, info->value_size); } else { - unsigned int i, n; + unsigned int i, n, step; n = get_possible_cpus(); + step = round_up(info->value_size, 8); jsonw_name(json_wtr, "key"); print_hex_data_json(key, info->key_size); @@ -176,7 +179,7 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, jsonw_int_field(json_wtr, "cpu", i); jsonw_name(json_wtr, "value"); - print_hex_data_json(value + i * info->value_size, + print_hex_data_json(value + i * step, info->value_size); jsonw_end_object(json_wtr); @@ -207,9 +210,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key, printf("\n"); } else { - unsigned int i, n; + unsigned int i, n, step; n = get_possible_cpus(); + step = round_up(info->value_size, 8); printf("key:\n"); fprint_hex(stdout, key, info->key_size, " "); @@ -217,7 +221,7 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key, for (i = 0; i < n; i++) { printf("value (CPU %02d):%c", i, info->value_size > 16 ? '\n' : ' '); - fprint_hex(stdout, value + i * info->value_size, + fprint_hex(stdout, value + i * step, info->value_size, " "); printf("\n"); } diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h index 0b5ddbe..972265f 100644 --- a/tools/include/uapi/linux/btf.h +++ b/tools/include/uapi/linux/btf.h @@ -76,7 +76,7 @@ struct btf_type { */ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) -#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) +#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) /* Attributes stored in the BTF_INT_ENCODING */ #define BTF_INT_SIGNED (1 << 0) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 8c54a4b..2d270c5 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -2,7 +2,6 @@ /* Copyright (c) 2018 Facebook */ #include <stdlib.h> -#include <stdint.h> #include <string.h> #include <unistd.h> #include <errno.h> @@ -27,13 +26,13 @@ struct btf { struct btf_type **types; const char *strings; void *nohdr_data; - uint32_t nr_types; - uint32_t types_size; - uint32_t data_size; + __u32 nr_types; + __u32 types_size; + __u32 data_size; int fd; }; -static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset) +static const char *btf_name_by_offset(const struct btf *btf, __u32 offset) { if (offset < btf->hdr->str_len) return &btf->strings[offset]; @@ -45,7 +44,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t) { if (btf->types_size - btf->nr_types < 2) { struct btf_type **new_types; - u32 expand_by, new_size; + __u32 expand_by, new_size; if (btf->types_size == BTF_MAX_NR_TYPES) return -E2BIG; @@ -72,7 +71,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t) static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) { const struct btf_header *hdr = btf->hdr; - u32 meta_left; + __u32 meta_left; if (btf->data_size < sizeof(struct btf_header)) { elog("BTF header not found\n"); @@ -151,7 +150,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) while (next_type < end_type) { struct btf_type *t = next_type; - uint16_t vlen = BTF_INFO_VLEN(t->info); + __u16 vlen = BTF_INFO_VLEN(t->info); int err; next_type += sizeof(*t); @@ -190,8 +189,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) return 0; } -static const struct btf_type *btf_type_by_id(const struct btf *btf, - uint32_t type_id) +const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) { if (type_id > btf->nr_types) return NULL; @@ -209,7 +207,7 @@ static bool btf_type_is_void_or_null(const struct btf_type *t) return !t || btf_type_is_void(t); } -static int64_t btf_type_size(const struct btf_type *t) +static __s64 btf_type_size(const struct btf_type *t) { switch (BTF_INFO_KIND(t->info)) { case BTF_KIND_INT: @@ -226,15 +224,15 @@ static int64_t btf_type_size(const struct btf_type *t) #define MAX_RESOLVE_DEPTH 32 -int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) +__s64 btf__resolve_size(const struct btf *btf, __u32 type_id) { const struct btf_array *array; const struct btf_type *t; - uint32_t nelems = 1; - int64_t size = -1; + __u32 nelems = 1; + __s64 size = -1; int i; - t = btf_type_by_id(btf, type_id); + t = btf__type_by_id(btf, type_id); for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { size = btf_type_size(t); @@ -259,7 +257,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) return -EINVAL; } - t = btf_type_by_id(btf, type_id); + t = btf__type_by_id(btf, type_id); } if (size < 0) @@ -271,9 +269,9 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) return nelems * size; } -int32_t btf__find_by_name(const struct btf *btf, const char *type_name) +__s32 btf__find_by_name(const struct btf *btf, const char *type_name) { - uint32_t i; + __u32 i; if (!strcmp(type_name, "void")) return 0; @@ -302,10 +300,9 @@ void btf__free(struct btf *btf) free(btf); } -struct btf *btf__new(uint8_t *data, uint32_t size, - btf_print_fn_t err_log) +struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log) { - uint32_t log_buf_size = 0; + __u32 log_buf_size = 0; char *log_buf = NULL; struct btf *btf; int err; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 74bb344..e2a09a1 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -4,19 +4,21 @@ #ifndef __BPF_BTF_H #define __BPF_BTF_H -#include <stdint.h> +#include <linux/types.h> #define BTF_ELF_SEC ".BTF" struct btf; +struct btf_type; typedef int (*btf_print_fn_t)(const char *, ...) __attribute__((format(printf, 1, 2))); void btf__free(struct btf *btf); -struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log); -int32_t btf__find_by_name(const struct btf *btf, const char *type_name); -int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id); +struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log); +__s32 btf__find_by_name(const struct btf *btf, const char *type_name); +const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); +__s64 btf__resolve_size(const struct btf *btf, __u32 type_id); int btf__fd(const struct btf *btf); #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1e96b5..1aafdbe 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -36,6 +36,7 @@ #include <linux/err.h> #include <linux/kernel.h> #include <linux/bpf.h> +#include <linux/btf.h> #include <linux/list.h> #include <linux/limits.h> #include <sys/stat.h> @@ -216,8 +217,8 @@ struct bpf_map { size_t offset; int map_ifindex; struct bpf_map_def def; - uint32_t btf_key_type_id; - uint32_t btf_value_type_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; void *priv; bpf_map_clear_priv_t clear_priv; }; @@ -1014,68 +1015,72 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) { + const struct btf_type *container_type; + const struct btf_member *key, *value; struct bpf_map_def *def = &map->def; const size_t max_name = 256; - int64_t key_size, value_size; - int32_t key_id, value_id; - char name[max_name]; + char container_name[max_name]; + __s64 key_size, value_size; + __s32 container_id; - /* Find key type by name from BTF */ - if (snprintf(name, max_name, "%s_key", map->name) == max_name) { - pr_warning("map:%s length of BTF key_type:%s_key is too long\n", + if (snprintf(container_name, max_name, "____btf_map_%s", map->name) == + max_name) { + pr_warning("map:%s length of '____btf_map_%s' is too long\n", map->name, map->name); return -EINVAL; } - key_id = btf__find_by_name(btf, name); - if (key_id < 0) { - pr_debug("map:%s key_type:%s cannot be found in BTF\n", - map->name, name); - return key_id; + container_id = btf__find_by_name(btf, container_name); + if (container_id < 0) { + pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", + map->name, container_name); + return container_id; } - key_size = btf__resolve_size(btf, key_id); - if (key_size < 0) { - pr_warning("map:%s key_type:%s cannot get the BTF type_size\n", - map->name, name); - return key_size; + container_type = btf__type_by_id(btf, container_id); + if (!container_type) { + pr_warning("map:%s cannot find BTF type for container_id:%u\n", + map->name, container_id); + return -EINVAL; } - if (def->key_size != key_size) { - pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n", - map->name, name, (unsigned int)key_size, def->key_size); + if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT || + BTF_INFO_VLEN(container_type->info) < 2) { + pr_warning("map:%s container_name:%s is an invalid container struct\n", + map->name, container_name); return -EINVAL; } - /* Find value type from BTF */ - if (snprintf(name, max_name, "%s_value", map->name) == max_name) { - pr_warning("map:%s length of BTF value_type:%s_value is too long\n", - map->name, map->name); - return -EINVAL; + key = (struct btf_member *)(container_type + 1); + value = key + 1; + + key_size = btf__resolve_size(btf, key->type); + if (key_size < 0) { + pr_warning("map:%s invalid BTF key_type_size\n", + map->name); + return key_size; } - value_id = btf__find_by_name(btf, name); - if (value_id < 0) { - pr_debug("map:%s value_type:%s cannot be found in BTF\n", - map->name, name); - return value_id; + if (def->key_size != key_size) { + pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", + map->name, (__u32)key_size, def->key_size); + return -EINVAL; } - value_size = btf__resolve_size(btf, value_id); + value_size = btf__resolve_size(btf, value->type); if (value_size < 0) { - pr_warning("map:%s value_type:%s cannot get the BTF type_size\n", - map->name, name); + pr_warning("map:%s invalid BTF value_type_size\n", map->name); return value_size; } if (def->value_size != value_size) { - pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n", - map->name, name, (unsigned int)value_size, def->value_size); + pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", + map->name, (__u32)value_size, def->value_size); return -EINVAL; } - map->btf_key_type_id = key_id; - map->btf_value_type_id = value_id; + map->btf_key_type_id = key->type; + map->btf_value_type_id = value->type; return 0; } @@ -2089,12 +2094,12 @@ const char *bpf_map__name(struct bpf_map *map) return map ? map->name : NULL; } -uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map) +__u32 bpf_map__btf_key_type_id(const struct bpf_map *map) { return map ? map->btf_key_type_id : 0; } -uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map) +__u32 bpf_map__btf_value_type_id(const struct bpf_map *map) { return map ? map->btf_value_type_id : 0; } @@ -2268,8 +2273,8 @@ bpf_perf_event_read_simple(void *mem, unsigned long size, volatile struct perf_event_mmap_page *header = mem; __u64 data_tail = header->data_tail; __u64 data_head = header->data_head; + int ret = LIBBPF_PERF_EVENT_ERROR; void *base, *begin, *end; - int ret; asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ if (data_head == data_tail) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 0997653..b33ae02 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -244,8 +244,8 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj); int bpf_map__fd(struct bpf_map *map); const struct bpf_map_def *bpf_map__def(struct bpf_map *map); const char *bpf_map__name(struct bpf_map *map); -uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map); -uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map); +__u32 bpf_map__btf_key_type_id(const struct bpf_map *map); +__u32 bpf_map__btf_value_type_id(const struct bpf_map *map); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); int bpf_map__set_priv(struct bpf_map *map, void *priv, diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index d39e4ff..a6db83a 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -106,7 +106,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics \fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved. \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. -\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. +\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms. \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz. \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. @@ -114,7 +114,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics \fBCorWatt\fP Watts consumed by the core part of the package. \fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. \fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors. -\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. +\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system. Note that the meaning of this field is model specific. For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits. Comparing PkgWatt and PkgTmp to system limits is necessary. \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. .fi .SH TOO MUCH INFORMATION EXAMPLE diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4d14bbb..980bd9d 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1163,9 +1163,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ if (!printed || !summary_only) print_header("\t"); - if (topo.num_cpus > 1) - format_counters(&average.threads, &average.cores, - &average.packages); + format_counters(&average.threads, &average.cores, &average.packages); printed = 1; @@ -1692,7 +1690,7 @@ void get_apic_id(struct thread_data *t) t->x2apic_id = edx; if (debug && (t->apic_id != t->x2apic_id)) - fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); + fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); } /* @@ -2473,55 +2471,43 @@ int get_core_id(int cpu) void set_node_data(void) { - char path[80]; - FILE *filep; - int pkg, node, cpu; - - struct pkg_node_info { - int count; - int min; - } *pni; - - pni = calloc(topo.num_packages, sizeof(struct pkg_node_info)); - if (!pni) - err(1, "calloc pkg_node_count"); - - for (pkg = 0; pkg < topo.num_packages; pkg++) - pni[pkg].min = topo.num_cpus; - - for (node = 0; node <= topo.max_node_num; node++) { - /* find the "first" cpu in the node */ - sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node); - filep = fopen(path, "r"); - if (!filep) - continue; - fscanf(filep, "%d", &cpu); - fclose(filep); - - pkg = cpus[cpu].physical_package_id; - pni[pkg].count++; - - if (node < pni[pkg].min) - pni[pkg].min = node; - } - - for (pkg = 0; pkg < topo.num_packages; pkg++) - if (pni[pkg].count > topo.nodes_per_pkg) - topo.nodes_per_pkg = pni[0].count; - - /* Fake 1 node per pkg for machines that don't - * expose nodes and thus avoid -nan results - */ - if (topo.nodes_per_pkg == 0) - topo.nodes_per_pkg = 1; - - for (cpu = 0; cpu < topo.num_cpus; cpu++) { - pkg = cpus[cpu].physical_package_id; - node = cpus[cpu].physical_node_id; - cpus[cpu].logical_node_id = node - pni[pkg].min; + int pkg, node, lnode, cpu, cpux; + int cpu_count; + + /* initialize logical_node_id */ + for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) + cpus[cpu].logical_node_id = -1; + + cpu_count = 0; + for (pkg = 0; pkg < topo.num_packages; pkg++) { + lnode = 0; + for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) { + if (cpus[cpu].physical_package_id != pkg) + continue; + /* find a cpu with an unset logical_node_id */ + if (cpus[cpu].logical_node_id != -1) + continue; + cpus[cpu].logical_node_id = lnode; + node = cpus[cpu].physical_node_id; + cpu_count++; + /* + * find all matching cpus on this pkg and set + * the logical_node_id + */ + for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) { + if ((cpus[cpux].physical_package_id == pkg) && + (cpus[cpux].physical_node_id == node)) { + cpus[cpux].logical_node_id = lnode; + cpu_count++; + } + } + lnode++; + if (lnode > topo.nodes_per_pkg) + topo.nodes_per_pkg = lnode; + } + if (cpu_count >= topo.max_cpu_num) + break; } - free(pni); - } int get_physical_node_id(struct cpu_topology *thiscpu) @@ -4471,7 +4457,9 @@ void process_cpuid() family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; - if (family == 6 || family == 0xf) + if (family == 0xf) + family += (fms >> 20) & 0xff; + if (family >= 6) model += ((fms >> 16) & 0xf) << 4; if (!quiet) { @@ -4840,16 +4828,8 @@ void topology_probe() siblings = get_thread_siblings(&cpus[i]); if (siblings > max_siblings) max_siblings = siblings; - if (cpus[i].thread_id != -1) + if (cpus[i].thread_id == 0) topo.num_cores++; - - if (debug > 1) - fprintf(outf, - "cpu %d pkg %d node %d core %d thread %d\n", - i, cpus[i].physical_package_id, - cpus[i].physical_node_id, - cpus[i].physical_core_id, - cpus[i].thread_id); } topo.cores_per_node = max_core_id + 1; @@ -4875,6 +4855,20 @@ void topology_probe() topo.threads_per_core = max_siblings; if (debug > 1) fprintf(outf, "max_siblings %d\n", max_siblings); + + if (debug < 1) + return; + + for (i = 0; i <= topo.max_cpu_num; ++i) { + fprintf(outf, + "cpu %d pkg %d node %d lnode %d core %d thread %d\n", + i, cpus[i].physical_package_id, + cpus[i].physical_node_id, + cpus[i].logical_node_id, + cpus[i].physical_core_id, + cpus[i].thread_id); + } + } void @@ -5102,7 +5096,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 18.06.20" + fprintf(outf, "turbostat version 18.07.27" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index f2f28b6..810de20 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -158,6 +158,15 @@ struct bpf_map_def { unsigned int numa_node; }; +#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ + struct ____btf_map_##name { \ + type_key key; \ + type_val value; \ + }; \ + struct ____btf_map_##name \ + __attribute__ ((section(".maps." #name), used)) \ + ____btf_map_##name = { } + static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = (void *) BPF_FUNC_skb_load_bytes; static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 3619f30..ffdd277 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -247,6 +247,34 @@ static struct btf_raw_test raw_tests[] = { .max_entries = 4, }, +{ + .descr = "struct test #3 Invalid member offset", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int64 */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), + + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16), + BTF_MEMBER_ENC(NAME_TBD, 1, 64), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* int64 n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0", + .str_sec_size = sizeof("\0A\0m\0n\0"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_test3_map", + .key_size = sizeof(int), + .value_size = 16, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member bits_offset", +}, + /* Test member exceeds the size of struct. * * struct A { @@ -479,7 +507,7 @@ static struct btf_raw_test raw_tests[] = { .key_size = sizeof(int), .value_size = sizeof(void *) * 4, .key_type_id = 1, - .value_type_id = 4, + .value_type_id = 5, .max_entries = 4, }, @@ -1264,6 +1292,88 @@ static struct btf_raw_test raw_tests[] = { .err_str = "type != 0", }, +{ + .descr = "arraymap invalid btf key (a bit field)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* 32 bit int with 32 bit offset */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 2, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "arraymap invalid btf key (!= 32 bits)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* 16 bit int with 0 bit offset */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 2, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "arraymap invalid btf value (too small)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + /* btf_value_size < map->value_size */ + .value_size = sizeof(__u64), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "arraymap invalid btf value (too big)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + /* btf_value_size > map->value_size */ + .value_size = sizeof(__u16), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + }; /* struct btf_raw_test raw_tests[] */ static const char *get_next_str(const char *start, const char *end) @@ -2023,7 +2133,7 @@ static struct btf_raw_test pprint_test = { BTF_ENUM_ENC(NAME_TBD, 2), BTF_ENUM_ENC(NAME_TBD, 3), /* struct pprint_mapv */ /* [16] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 28), + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/test_btf_haskv.c index 8c7ca09..b21b876 100644 --- a/tools/testing/selftests/bpf/test_btf_haskv.c +++ b/tools/testing/selftests/bpf/test_btf_haskv.c @@ -10,11 +10,6 @@ struct ipv_counts { unsigned int v6; }; -typedef int btf_map_key; -typedef struct ipv_counts btf_map_value; -btf_map_key dumm_key; -btf_map_value dummy_value; - struct bpf_map_def SEC("maps") btf_map = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), @@ -22,6 +17,8 @@ struct bpf_map_def SEC("maps") btf_map = { .max_entries = 4, }; +BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts); + struct dummy_tracepoint_args { unsigned long long pad; struct sock *sock; diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 270fa8f..785eabf 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh @@ -115,14 +115,14 @@ ip netns exec ns2 ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o ip netns exec ns2 ip -6 route add fd00::1 dev veth3 via fb00::43 scope link ip netns exec ns3 ip -6 route add fc42::1 dev veth5 via fb00::65 -ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF obj test_lwt_seg6local.o sec add_egr_x dev veth4 +ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4 -ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF obj test_lwt_seg6local.o sec pop_egr dev veth6 +ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6 ip netns exec ns4 ip -6 addr add fc42::1 dev lo ip netns exec ns4 ip -6 route add fd00::3 dev veth7 via fb00::87 ip netns exec ns5 ip -6 route add fd00::4 table 117 dev veth9 via fb00::109 -ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF obj test_lwt_seg6local.o sec inspect_t dev veth8 +ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8 ip netns exec ns6 ip -6 addr add fb00::6/16 dev lo ip netns exec ns6 ip -6 addr add fd00::4/16 dev lo diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c index 77f7627..e8c5dff 100644 --- a/tools/testing/selftests/net/tcp_mmap.c +++ b/tools/testing/selftests/net/tcp_mmap.c @@ -402,7 +402,7 @@ int main(int argc, char *argv[]) exit(1); } - fd = socket(AF_INET6, SOCK_STREAM, 0); + fd = socket(cfg_family, SOCK_STREAM, 0); if (fd == -1) { perror("socket"); exit(1); diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h index 0ac3caf..d0351f8 100644 --- a/tools/virtio/asm/barrier.h +++ b/tools/virtio/asm/barrier.h @@ -13,8 +13,8 @@ } while (0); /* Weak barriers should be used. If not - it's a bug */ # define mb() abort() -# define rmb() abort() -# define wmb() abort() +# define dma_rmb() abort() +# define dma_wmb() abort() #else #error Please fill in barrier macros #endif diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index fca8381..fb22bcc 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -52,6 +52,11 @@ static inline void *kmalloc(size_t s, gfp_t gfp) return __kmalloc_fake; return malloc(s); } +static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) +{ + return kmalloc(n * s, gfp); +} + static inline void *kzalloc(size_t s, gfp_t gfp) { void *p = kmalloc(s, gfp); |