diff options
731 files changed, 18644 insertions, 13932 deletions
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt index d3bbdded..168f1be 100644 --- a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt +++ b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt @@ -6,11 +6,14 @@ Required properties: - spi-max-frequency: maximal bus speed, should be set to 7500000 depends sync or async operation mode - reg: the chipselect index - - interrupts: the interrupt generated by the device + - interrupts: the interrupt generated by the device. Non high-level + can occur deadlocks while handling isr. Optional properties: - reset-gpio: GPIO spec for the rstn pin - sleep-gpio: GPIO spec for the slp_tr pin + - xtal-trim: u8 value for fine tuning the internal capacitance + arrays of xtal pins: 0 = +0 pF, 0xf = +4.5 pF Example: @@ -18,6 +21,7 @@ Example: compatible = "atmel,at86rf231"; spi-max-frequency = <7500000>; reg = <0>; - interrupts = <19 1>; + interrupts = <19 4>; interrupt-parent = <&gpio3>; + xtal-trim = /bits/ 8 <0x06>; }; diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index aaa6964..ba19d67 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -2,10 +2,13 @@ Required properties: - compatible: Should be "cdns,[<chip>-]{macb|gem}" - Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs. + Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP + available on sama5d3 SoCs. Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb". Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on the Cadence GEM, or the generic form: "cdns,gem". + Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs. + Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs. - reg: Address and length of the register set for the device - interrupts: Should contain macb interrupt - phy-mode: See ethernet.txt file in the same directory. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 1b8c964..4412f69 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -388,6 +388,16 @@ tcp_mtu_probing - INTEGER 1 - Disabled by default, enabled when an ICMP black hole detected 2 - Always enabled, use initial MSS of tcp_base_mss. +tcp_probe_interval - INTEGER + Controls how often to start TCP Packetization-Layer Path MTU + Discovery reprobe. The default is reprobing every 10 minutes as + per RFC4821. + +tcp_probe_threshold - INTEGER + Controls when TCP Packetization-Layer Path MTU Discovery probing + will stop in respect to the width of search range in bytes. Default + is 8 bytes. + tcp_no_metrics_save - BOOLEAN By default, TCP saves various connection metrics in the route cache when the connection closes, so that connections established in the diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt index 7a3c047..3ba7095 100644 --- a/Documentation/networking/ipvs-sysctl.txt +++ b/Documentation/networking/ipvs-sysctl.txt @@ -22,6 +22,27 @@ backup_only - BOOLEAN If set, disable the director function while the server is in backup mode to avoid packet loops for DR/TUN methods. +conn_reuse_mode - INTEGER + 1 - default + + Controls how ipvs will deal with connections that are detected + port reuse. It is a bitmap, with the values being: + + 0: disable any special handling on port reuse. The new + connection will be delivered to the same real server that was + servicing the previous connection. This will effectively + disable expire_nodest_conn. + + bit 1: enable rescheduling of new connections when it is safe. + That is, whenever expire_nodest_conn and for TCP sockets, when + the connection is in TIME_WAIT state (which is only possible if + you use NAT mode). + + bit 2: it is bit 1 plus, for TCP connections, when connections + are in FIN_WAIT state, as this is the last state seen by load + balancer in Direct Routing mode. This bit helps on adding new + real servers to a very busy cluster. + conntrack - BOOLEAN 0 - disabled (default) not 0 - enabled diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt new file mode 100644 index 0000000..639ddf0 --- /dev/null +++ b/Documentation/networking/mpls-sysctl.txt @@ -0,0 +1,20 @@ +/proc/sys/net/mpls/* Variables: + +platform_labels - INTEGER + Number of entries in the platform label table. It is not + possible to configure forwarding for label values equal to or + greater than the number of platform labels. + + A dense utliziation of the entries in the platform label table + is possible and expected aas the platform labels are locally + allocated. + + If the number of platform label table entries is set to 0 no + label will be recognized by the kernel and mpls forwarding + will be disabled. + + Reducing this value will remove all label routing entries that + no longer fit in the table. + + Possible values: 0 - 1048575 + Default: 0 diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index 6915c6b..0344f1d 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt @@ -3,13 +3,11 @@ HOWTO for the linux packet generator ------------------------------------ -Date: 041221 - -Enable CONFIG_NET_PKTGEN to compile and build pktgen.o either in kernel -or as module. Module is preferred. insmod pktgen if needed. Once running -pktgen creates a thread on each CPU where each thread has affinity to its CPU. -Monitoring and controlling is done via /proc. Easiest to select a suitable -a sample script and configure. +Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel +or as a module. A module is preferred; modprobe pktgen if needed. Once +running, pktgen creates a thread for each CPU with affinity to that CPU. +Monitoring and controlling is done via /proc. It is easiest to select a +suitable sample script and configure that. On a dual CPU: @@ -27,7 +25,7 @@ For monitoring and control pktgen creates: Tuning NIC for max performance ============================== -The default NIC setting are (likely) not tuned for pktgen's artificial +The default NIC settings are (likely) not tuned for pktgen's artificial overload type of benchmarking, as this could hurt the normal use-case. Specifically increasing the TX ring buffer in the NIC: @@ -35,20 +33,20 @@ Specifically increasing the TX ring buffer in the NIC: A larger TX ring can improve pktgen's performance, while it can hurt in the general case, 1) because the TX ring buffer might get larger -than the CPUs L1/L2 cache, 2) because it allow more queueing in the +than the CPU's L1/L2 cache, 2) because it allows more queueing in the NIC HW layer (which is bad for bufferbloat). -One should be careful to conclude, that packets/descriptors in the HW +One should hesitate to conclude that packets/descriptors in the HW TX ring cause delay. Drivers usually delay cleaning up the -ring-buffers (for various performance reasons), thus packets stalling -the TX ring, might just be waiting for cleanup. +ring-buffers for various performance reasons, and packets stalling +the TX ring might just be waiting for cleanup. -This cleanup issues is specifically the case, for the driver ixgbe -(Intel 82599 chip). This driver (ixgbe) combine TX+RX ring cleanups, +This cleanup issue is specifically the case for the driver ixgbe +(Intel 82599 chip). This driver (ixgbe) combines TX+RX ring cleanups, and the cleanup interval is affected by the ethtool --coalesce setting of parameter "rx-usecs". -For ixgbe use e.g "30" resulting in approx 33K interrupts/sec (1/30*10^6): +For ixgbe use e.g. "30" resulting in approx 33K interrupts/sec (1/30*10^6): # ethtool -C ethX rx-usecs 30 @@ -60,15 +58,16 @@ Running: Stopped: eth1 Result: OK: max_before_softirq=10000 -Most important the devices assigned to thread. Note! A device can only belong -to one thread. +Most important are the devices assigned to the thread. Note that a +device can only belong to one thread. Viewing devices =============== -Parm section holds configured info. Current hold running stats. -Result is printed after run or after interruption. Example: +The Params section holds configured information. The Current section +holds running statistics. The Result is printed after a run or after +interruption. Example: /proc/net/pktgen/eth1 @@ -93,7 +92,8 @@ Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags) Configuring threads and devices ================================ -This is done via the /proc interface easiest done via pgset in the scripts +This is done via the /proc interface, and most easily done via pgset +as defined in the sample scripts. Examples: @@ -192,10 +192,11 @@ Examples: pgset "rate 300M" set rate to 300 Mb/s pgset "ratep 1000000" set rate to 1Mpps -Example scripts -=============== +Sample scripts +============== -A collection of small tutorial scripts for pktgen is in examples dir. +A collection of small tutorial scripts for pktgen is in the +samples/pktgen directory: pktgen.conf-1-1 # 1 CPU 1 dev pktgen.conf-1-2 # 1 CPU 2 dev @@ -206,25 +207,26 @@ pktgen.conf-1-1-ip6 # 1 CPU 1 dev ipv6 pktgen.conf-1-1-ip6-rdos # 1 CPU 1 dev ipv6 w. route DoS pktgen.conf-1-1-flows # 1 CPU 1 dev multiple flows. -Run in shell: ./pktgen.conf-X-Y It does all the setup including sending. +Run in shell: ./pktgen.conf-X-Y +This does all the setup including sending. Interrupt affinity =================== -Note when adding devices to a specific CPU there good idea to also assign -/proc/irq/XX/smp_affinity so the TX-interrupts gets bound to the same CPU. -as this reduces cache bouncing when freeing skb's. +Note that when adding devices to a specific CPU it is a good idea to +also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound +to the same CPU. This reduces cache bouncing when freeing skbs. Enable IPsec ============ -Default IPsec transformation with ESP encapsulation plus Transport mode -could be enabled by simply setting: +Default IPsec transformation with ESP encapsulation plus transport mode +can be enabled by simply setting: pgset "flag IPSEC" pgset "flows 1" To avoid breaking existing testbed scripts for using AH type and tunnel mode, -user could use "pgset spi SPI_VALUE" to specify which formal of transformation +you can use "pgset spi SPI_VALUE" to specify which transformation mode to employ. diff --git a/Documentation/networking/s2io.txt b/Documentation/networking/s2io.txt index d2a9f43..0362a42 100644 --- a/Documentation/networking/s2io.txt +++ b/Documentation/networking/s2io.txt @@ -38,7 +38,7 @@ The corresponding adapter's LED will blink multiple times. 3. Features supported: a. Jumbo frames. Xframe I/II supports MTU up to 9600 bytes, -modifiable using ifconfig command. +modifiable using ip command. b. Offloads. Supports checksum offload(TCP/UDP/IP) on transmit and receive, TSO. diff --git a/Documentation/networking/vxge.txt b/Documentation/networking/vxge.txt index bb76c66..abfec24 100644 --- a/Documentation/networking/vxge.txt +++ b/Documentation/networking/vxge.txt @@ -39,7 +39,7 @@ iii) PCI-SIG's I/O Virtualization iv) Jumbo frames X3100 Series supports MTU up to 9600 bytes, modifiable using - ifconfig command. + ip command. v) Offloads supported: (Enabled by default) Checksum offload (TCP/UDP/IP) on transmit and receive paths diff --git a/MAINTAINERS b/MAINTAINERS index 69cc89f..42e221c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8335,7 +8335,6 @@ F: block/partitions/ibm.c S390 NETWORK DRIVERS M: Ursula Braun <ursula.braun@de.ibm.com> -M: Frank Blaschka <blaschka@linux.vnet.ibm.com> M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -9810,7 +9809,7 @@ F: include/linux/wl12xx.h TIPC NETWORK LAYER M: Jon Maloy <jon.maloy@ericsson.com> -M: Allan Stephens <allan.stephens@windriver.com> +M: Ying Xue <ying.xue@windriver.com> L: netdev@vger.kernel.org (core kernel code) L: tipc-discussion@lists.sourceforge.net (user apps, general discussion) W: http://tipc.sourceforge.net/ diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index fff0ee6..9f7c737 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -842,7 +842,7 @@ }; macb0: ethernet@fffc4000 { - compatible = "cdns,at32ap7000-macb", "cdns,macb"; + compatible = "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xfffc4000 0x100>; interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 1f67bb4..340179e 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -845,7 +845,7 @@ }; macb0: ethernet@fffbc000 { - compatible = "cdns,at32ap7000-macb", "cdns,macb"; + compatible = "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xfffbc000 0x100>; interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index ee80aa9..586eab7 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -956,7 +956,7 @@ }; macb0: ethernet@fffbc000 { - compatible = "cdns,at32ap7000-macb", "cdns,macb"; + compatible = "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xfffbc000 0x100>; interrupts = <25 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/at91sam9x5_macb0.dtsi b/arch/arm/boot/dts/at91sam9x5_macb0.dtsi index 57e89d1..73d7e30 100644 --- a/arch/arm/boot/dts/at91sam9x5_macb0.dtsi +++ b/arch/arm/boot/dts/at91sam9x5_macb0.dtsi @@ -53,7 +53,7 @@ }; macb0: ethernet@f802c000 { - compatible = "cdns,at32ap7000-macb", "cdns,macb"; + compatible = "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xf802c000 0x100>; interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/at91sam9x5_macb1.dtsi b/arch/arm/boot/dts/at91sam9x5_macb1.dtsi index 663676c..d81980c 100644 --- a/arch/arm/boot/dts/at91sam9x5_macb1.dtsi +++ b/arch/arm/boot/dts/at91sam9x5_macb1.dtsi @@ -41,7 +41,7 @@ }; macb1: ethernet@f8030000 { - compatible = "cdns,at32ap7000-macb", "cdns,macb"; + compatible = "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xf8030000 0x100>; interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi index fe2af92..b4544cf 100644 --- a/arch/arm/boot/dts/sama5d3_emac.dtsi +++ b/arch/arm/boot/dts/sama5d3_emac.dtsi @@ -41,7 +41,7 @@ }; macb1: ethernet@f802c000 { - compatible = "cdns,at32ap7000-macb", "cdns,macb"; + compatible = "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xf802c000 0x100>; interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 22b0940..5084bdc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -126,7 +126,7 @@ config PPC select IRQ_FORCED_THREADING select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS - select HAVE_BPF_JIT if PPC64 + select HAVE_BPF_JIT select HAVE_ARCH_JUMP_LABEL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 21be8ae..dc85dcb 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -23,6 +23,8 @@ #define PPC_STL stringify_in_c(std) #define PPC_STLU stringify_in_c(stdu) #define PPC_LCMPI stringify_in_c(cmpdi) +#define PPC_LCMPLI stringify_in_c(cmpldi) +#define PPC_LCMP stringify_in_c(cmpd) #define PPC_LONG stringify_in_c(.llong) #define PPC_LONG_ALIGN stringify_in_c(.balign 8) #define PPC_TLNEI stringify_in_c(tdnei) @@ -52,6 +54,8 @@ #define PPC_STL stringify_in_c(stw) #define PPC_STLU stringify_in_c(stwu) #define PPC_LCMPI stringify_in_c(cmpwi) +#define PPC_LCMPLI stringify_in_c(cmplwi) +#define PPC_LCMP stringify_in_c(cmpw) #define PPC_LONG stringify_in_c(.long) #define PPC_LONG_ALIGN stringify_in_c(.balign 4) #define PPC_TLNEI stringify_in_c(twnei) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 03cd858..2eadde0 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -212,6 +212,8 @@ #define PPC_INST_LWZ 0x80000000 #define PPC_INST_STD 0xf8000000 #define PPC_INST_STDU 0xf8000001 +#define PPC_INST_STW 0x90000000 +#define PPC_INST_STWU 0x94000000 #define PPC_INST_MFLR 0x7c0802a6 #define PPC_INST_MTLR 0x7c0803a6 #define PPC_INST_CMPWI 0x2c000000 diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile index 266b395..1306a58 100644 --- a/arch/powerpc/net/Makefile +++ b/arch/powerpc/net/Makefile @@ -1,4 +1,4 @@ # # Arch-specific network modules # -obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index c406aa9..889fd19 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -10,12 +10,25 @@ #ifndef _BPF_JIT_H #define _BPF_JIT_H +#ifdef CONFIG_PPC64 +#define BPF_PPC_STACK_R3_OFF 48 #define BPF_PPC_STACK_LOCALS 32 #define BPF_PPC_STACK_BASIC (48+64) #define BPF_PPC_STACK_SAVE (18*8) #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ BPF_PPC_STACK_SAVE) #define BPF_PPC_SLOWPATH_FRAME (48+64) +#else +#define BPF_PPC_STACK_R3_OFF 24 +#define BPF_PPC_STACK_LOCALS 16 +#define BPF_PPC_STACK_BASIC (24+32) +#define BPF_PPC_STACK_SAVE (18*4) +#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ + BPF_PPC_STACK_SAVE) +#define BPF_PPC_SLOWPATH_FRAME (24+32) +#endif + +#define REG_SZ (BITS_PER_LONG/8) /* * Generated code register usage: @@ -57,7 +70,11 @@ DECLARE_LOAD_FUNC(sk_load_half); DECLARE_LOAD_FUNC(sk_load_byte); DECLARE_LOAD_FUNC(sk_load_byte_msh); +#ifdef CONFIG_PPC64 #define FUNCTION_DESCR_SIZE 24 +#else +#define FUNCTION_DESCR_SIZE 0 +#endif /* * 16-bit immediate helper macros: HA() is for use with sign-extending instrs @@ -86,7 +103,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) - +#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) @@ -98,6 +120,17 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); ___PPC_RA(base) | IMM_L(i)) #define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ ___PPC_RA(base) | ___PPC_RB(b)) + +#ifdef CONFIG_PPC64 +#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0) +#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0) +#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) +#else +#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) +#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) +#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) +#endif + /* Convenience helpers for the above with 'far' offsets: */ #define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ @@ -115,6 +148,29 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); else { PPC_ADDIS(r, base, IMM_HA(i)); \ PPC_LHZ(r, r, IMM_L(i)); } } while(0) +#ifdef CONFIG_PPC64 +#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0) +#else +#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0) +#endif + +#ifdef CONFIG_SMP +#ifdef CONFIG_PPC64 +#define PPC_BPF_LOAD_CPU(r) \ + do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \ + PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \ + } while (0) +#else +#define PPC_BPF_LOAD_CPU(r) \ + do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \ + PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \ + offsetof(struct thread_info, cpu)); \ + } while(0) +#endif +#else +#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0) +#endif + #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) @@ -196,6 +252,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ } } while (0); +#ifdef CONFIG_PPC64 +#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0) +#else +#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) +#endif + #define PPC_LHBRX_OFFS(r, base, i) \ do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) #ifdef __LITTLE_ENDIAN__ diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_asm.S index 8f87d92..8ff5a3b 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_asm.S @@ -34,13 +34,13 @@ */ .globl sk_load_word sk_load_word: - cmpdi r_addr, 0 + PPC_LCMPI r_addr, 0 blt bpf_slow_path_word_neg .globl sk_load_word_positive_offset sk_load_word_positive_offset: /* Are we accessing past headlen? */ subi r_scratch1, r_HL, 4 - cmpd r_scratch1, r_addr + PPC_LCMP r_scratch1, r_addr blt bpf_slow_path_word /* Nope, just hitting the header. cr0 here is eq or gt! */ #ifdef __LITTLE_ENDIAN__ @@ -52,12 +52,12 @@ sk_load_word_positive_offset: .globl sk_load_half sk_load_half: - cmpdi r_addr, 0 + PPC_LCMPI r_addr, 0 blt bpf_slow_path_half_neg .globl sk_load_half_positive_offset sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 - cmpd r_scratch1, r_addr + PPC_LCMP r_scratch1, r_addr blt bpf_slow_path_half #ifdef __LITTLE_ENDIAN__ lhbrx r_A, r_D, r_addr @@ -68,11 +68,11 @@ sk_load_half_positive_offset: .globl sk_load_byte sk_load_byte: - cmpdi r_addr, 0 + PPC_LCMPI r_addr, 0 blt bpf_slow_path_byte_neg .globl sk_load_byte_positive_offset sk_load_byte_positive_offset: - cmpd r_HL, r_addr + PPC_LCMP r_HL, r_addr ble bpf_slow_path_byte lbzx r_A, r_D, r_addr blr @@ -83,11 +83,11 @@ sk_load_byte_positive_offset: */ .globl sk_load_byte_msh sk_load_byte_msh: - cmpdi r_addr, 0 + PPC_LCMPI r_addr, 0 blt bpf_slow_path_byte_msh_neg .globl sk_load_byte_msh_positive_offset sk_load_byte_msh_positive_offset: - cmpd r_HL, r_addr + PPC_LCMP r_HL, r_addr ble bpf_slow_path_byte_msh lbzx r_X, r_D, r_addr rlwinm r_X, r_X, 2, 32-4-2, 31-2 @@ -101,13 +101,13 @@ sk_load_byte_msh_positive_offset: */ #define bpf_slow_path_common(SIZE) \ mflr r0; \ - std r0, 16(r1); \ + PPC_STL r0, PPC_LR_STKOFF(r1); \ /* R3 goes in parameter space of caller's frame */ \ - std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ - std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ - addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \ - stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ + PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ + PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ + addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \ + PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ /* R3 = r_skb, as passed */ \ mr r4, r_addr; \ li r6, SIZE; \ @@ -115,19 +115,19 @@ sk_load_byte_msh_positive_offset: nop; \ /* R3 = 0 on success */ \ addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ - ld r0, 16(r1); \ - ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + PPC_LL r0, PPC_LR_STKOFF(r1); \ + PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ + PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ mtlr r0; \ - cmpdi r3, 0; \ + PPC_LCMPI r3, 0; \ blt bpf_error; /* cr0 = LT */ \ - ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ /* Great success! */ bpf_slow_path_word: bpf_slow_path_common(4) /* Data value is on stack, and cr0 != LT */ - lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1) blr bpf_slow_path_half: @@ -154,12 +154,12 @@ bpf_slow_path_byte_msh: */ #define sk_negative_common(SIZE) \ mflr r0; \ - std r0, 16(r1); \ + PPC_STL r0, PPC_LR_STKOFF(r1); \ /* R3 goes in parameter space of caller's frame */ \ - std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ - std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ - stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ + PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ + PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ + PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ /* R3 = r_skb, as passed */ \ mr r4, r_addr; \ li r5, SIZE; \ @@ -167,19 +167,19 @@ bpf_slow_path_byte_msh: nop; \ /* R3 != 0 on success */ \ addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ - ld r0, 16(r1); \ - ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + PPC_LL r0, PPC_LR_STKOFF(r1); \ + PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ + PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ mtlr r0; \ - cmpldi r3, 0; \ + PPC_LCMPLI r3, 0; \ beq bpf_error_slow; /* cr0 = EQ */ \ mr r_addr, r3; \ - ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ /* Great success! */ bpf_slow_path_word_neg: lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ + PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ blt bpf_error /* cr0 = LT */ .globl sk_load_word_negative_offset sk_load_word_negative_offset: @@ -189,7 +189,7 @@ sk_load_word_negative_offset: bpf_slow_path_half_neg: lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ + PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ blt bpf_error /* cr0 = LT */ .globl sk_load_half_negative_offset sk_load_half_negative_offset: @@ -199,7 +199,7 @@ sk_load_half_negative_offset: bpf_slow_path_byte_neg: lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ + PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ blt bpf_error /* cr0 = LT */ .globl sk_load_byte_negative_offset sk_load_byte_negative_offset: @@ -209,7 +209,7 @@ sk_load_byte_negative_offset: bpf_slow_path_byte_msh_neg: lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ + PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ blt bpf_error /* cr0 = LT */ .globl sk_load_byte_msh_negative_offset sk_load_byte_msh_negative_offset: @@ -221,7 +221,7 @@ sk_load_byte_msh_negative_offset: bpf_error_slow: /* fabricate a cr0 = lt */ li r_scratch1, -1 - cmpdi r_scratch1, 0 + PPC_LCMPI r_scratch1, 0 bpf_error: /* Entered with cr0 = lt */ li r3, 0 diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index d1916b5..17cea18 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -1,8 +1,9 @@ -/* bpf_jit_comp.c: BPF JIT compiler for PPC64 +/* bpf_jit_comp.c: BPF JIT compiler * * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation * * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) + * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, if (ctx->seen & SEEN_DATAREF) { /* If we call any helpers (for loads), save LR */ EMIT(PPC_INST_MFLR | __PPC_RT(R0)); - PPC_STD(0, 1, 16); + PPC_BPF_STL(0, 1, PPC_LR_STKOFF); /* Back up non-volatile regs. */ - PPC_STD(r_D, 1, -(8*(32-r_D))); - PPC_STD(r_HL, 1, -(8*(32-r_HL))); + PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D))); + PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL))); } if (ctx->seen & SEEN_MEM) { /* @@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, */ for (i = r_M; i < (r_M+16); i++) { if (ctx->seen & (1 << (i-r_M))) - PPC_STD(i, 1, -(8*(32-i))); + PPC_BPF_STL(i, 1, -(REG_SZ*(32-i))); } } - EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | - (-BPF_PPC_STACKFRAME & 0xfffc)); + PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME); } if (ctx->seen & SEEN_DATAREF) { @@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, data_len)); PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); PPC_SUB(r_HL, r_HL, r_scratch1); - PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); + PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); } if (ctx->seen & SEEN_XREG) { @@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); if (ctx->seen & SEEN_DATAREF) { - PPC_LD(0, 1, 16); + PPC_BPF_LL(0, 1, PPC_LR_STKOFF); PPC_MTLR(0); - PPC_LD(r_D, 1, -(8*(32-r_D))); - PPC_LD(r_HL, 1, -(8*(32-r_HL))); + PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D))); + PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL))); } if (ctx->seen & SEEN_MEM) { /* Restore any saved non-vol registers */ for (i = r_M; i < (r_M+16); i++) { if (ctx->seen & (1 << (i-r_M))) - PPC_LD(i, 1, -(8*(32-i))); + PPC_BPF_LL(i, 1, -(REG_SZ*(32-i))); } } } @@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); - PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, + PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); if (ctx->pc_ret0 != -1) { @@ -411,20 +411,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_SRWI(r_A, r_A, 5); break; case BPF_ANC | SKF_AD_CPU: -#ifdef CONFIG_SMP - /* - * PACA ptr is r13: - * raw_smp_processor_id() = local_paca->paca_index - */ - BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, - paca_index) != 2); - PPC_LHZ_OFFS(r_A, 13, - offsetof(struct paca_struct, paca_index)); -#else - PPC_LI(r_A, 0); -#endif + PPC_BPF_LOAD_CPU(r_A); break; - /*** Absolute loads from packet header/data ***/ case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_word); @@ -437,7 +425,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, common_load: /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - PPC_LI64(r_scratch1, func); + PPC_FUNC_ADDR(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); PPC_BLRL(); @@ -463,7 +451,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; - PPC_LI64(r_scratch1, func); + PPC_FUNC_ADDR(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_ADDI(r_addr, r_X, IMM_L(K)); if (K >= 32768) @@ -685,9 +673,11 @@ void bpf_jit_compile(struct bpf_prog *fp) if (image) { bpf_flush_icache(code_base, code_base + (proglen/4)); +#ifdef CONFIG_PPC64 /* Function descriptor nastiness: Address + TOC */ ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; +#endif fp->bpf_func = (void *)image; fp->jited = true; } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f238720..0220e7d 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -79,7 +79,6 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"}, {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"}, {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"}, - {.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"}, {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"}, {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"}, {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"}, diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 01da360..0a465e0 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -34,8 +34,8 @@ struct hash_ctx { struct ahash_request req; }; -static int hash_sendmsg(struct kiocb *unused, struct socket *sock, - struct msghdr *msg, size_t ignored) +static int hash_sendmsg(struct socket *sock, struct msghdr *msg, + size_t ignored) { int limit = ALG_MAX_PAGES * PAGE_SIZE; struct sock *sk = sock->sk; @@ -139,8 +139,8 @@ unlock: return err ?: size; } -static int hash_recvmsg(struct kiocb *unused, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 67f612c..3acba0a 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -55,8 +55,8 @@ struct rng_ctx { struct crypto_rng *drng; }; -static int rng_recvmsg(struct kiocb *unused, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 0c8a1e5..b9743dc 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -239,8 +239,8 @@ static void skcipher_data_wakeup(struct sock *sk) rcu_read_unlock(); } -static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, - struct msghdr *msg, size_t size) +static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, + size_t size) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -424,8 +424,8 @@ unlock: return err ?: size; } -static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, - struct msghdr *msg, size_t ignored, int flags) +static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index 0ee48be..9be17d3 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -1,6 +1,6 @@ config BCMA_POSSIBLE bool - depends on HAS_IOMEM && HAS_DMA + depends on HAS_IOMEM && HAS_DMA && PCI default y menu "Broadcom specific AMBA" @@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE config BCMA_HOST_PCI bool "Support for BCMA on PCI-host bus" depends on BCMA_HOST_PCI_POSSIBLE + select BCMA_DRIVER_PCI default y config BCMA_DRIVER_PCI_HOSTMODE @@ -44,6 +45,22 @@ config BCMA_HOST_SOC If unsure, say N +# TODO: make it depend on PCI when ready +config BCMA_DRIVER_PCI + bool + default y + help + BCMA bus may have many versions of PCIe core. This driver + supports: + 1) PCIe core working in clientmode + 2) PCIe Gen 2 clientmode core + + In general PCIe (Gen 2) clientmode core is required on PCIe + hosted buses. It's responsible for initialization and basic + hardware management. + This driver is also prerequisite for a hostmode PCIe core + support. + config BCMA_DRIVER_MIPS bool "BCMA Broadcom MIPS core driver" depends on BCMA && MIPS diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile index 838b4b9..f32af9b 100644 --- a/drivers/bcma/Makefile +++ b/drivers/bcma/Makefile @@ -3,8 +3,8 @@ bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o bcma-y += driver_chipcommon_b.o bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o -bcma-y += driver_pci.o -bcma-y += driver_pcie2.o +bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pci.o +bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pcie2.o bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index ac6c5fc..5a1d224 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -26,6 +26,7 @@ bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout); void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core); void bcma_init_bus(struct bcma_bus *bus); +void bcma_unregister_cores(struct bcma_bus *bus); int bcma_bus_register(struct bcma_bus *bus); void bcma_bus_unregister(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus); @@ -42,6 +43,9 @@ int bcma_bus_scan(struct bcma_bus *bus); int bcma_sprom_get(struct bcma_bus *bus); /* driver_chipcommon.c */ +void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); +void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); +void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); #ifdef CONFIG_BCMA_DRIVER_MIPS void bcma_chipco_serial_init(struct bcma_drv_cc *cc); extern struct platform_device bcma_pflash_dev; @@ -52,6 +56,8 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb); /* driver_chipcommon_pmu.c */ +void bcma_pmu_early_init(struct bcma_drv_cc *cc); +void bcma_pmu_init(struct bcma_drv_cc *cc); u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc); u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc); @@ -101,6 +107,14 @@ static inline void __exit bcma_host_soc_unregister_driver(void) /* driver_pci.c */ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); +void bcma_core_pci_early_init(struct bcma_drv_pci *pc); +void bcma_core_pci_init(struct bcma_drv_pci *pc); +void bcma_core_pci_up(struct bcma_drv_pci *pc); +void bcma_core_pci_down(struct bcma_drv_pci *pc); + +/* driver_pcie2.c */ +void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); +void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2); extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); @@ -117,6 +131,39 @@ static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) } #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ +/************************************************** + * driver_mips.c + **************************************************/ + +#ifdef CONFIG_BCMA_DRIVER_MIPS +unsigned int bcma_core_mips_irq(struct bcma_device *dev); +void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); +void bcma_core_mips_init(struct bcma_drv_mips *mcore); +#else +static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) +{ + return 0; +} +static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) +{ +} +static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) +{ +} +#endif + +/************************************************** + * driver_gmac_cmn.c + **************************************************/ + +#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN +void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); +#else +static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) +{ +} +#endif + #ifdef CONFIG_BCMA_DRIVER_GPIO /* driver_gpio.c */ int bcma_gpio_init(struct bcma_drv_cc *cc); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 598a6cd..dce34fb 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -76,7 +76,7 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio) bcma_chipco_gpio_pullup(cc, 1 << gpio, 0); } -#if IS_BUILTIN(CONFIG_BCM47XX) +#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); @@ -215,7 +215,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->set = bcma_gpio_set_value; chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; -#if IS_BUILTIN(CONFIG_BCM47XX) +#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) chip->to_irq = bcma_gpio_to_irq; #endif #if IS_BUILTIN(CONFIG_OF) diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index 7866664..cfd35bc 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -282,21 +282,21 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) } EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); -int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, +int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core, bool enable) { struct pci_dev *pdev; u32 coremask, tmp; int err = 0; - if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) { + if (bus->hosttype != BCMA_HOSTTYPE_PCI) { /* This bcma device is not on a PCI host-bus. So the IRQs are * not routed through the PCI core. * So we must not enable routing through the PCI core. */ goto out; } - pdev = pc->core->bus->host_pci; + pdev = bus->host_pci; err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); if (err) @@ -328,28 +328,12 @@ static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); } -void bcma_core_pci_up(struct bcma_bus *bus) +void bcma_core_pci_up(struct bcma_drv_pci *pc) { - struct bcma_drv_pci *pc; - - if (bus->hosttype != BCMA_HOSTTYPE_PCI) - return; - - pc = &bus->drv_pci[0]; - bcma_core_pci_extend_L1timer(pc, true); } -EXPORT_SYMBOL_GPL(bcma_core_pci_up); -void bcma_core_pci_down(struct bcma_bus *bus) +void bcma_core_pci_down(struct bcma_drv_pci *pc) { - struct bcma_drv_pci *pc; - - if (bus->hosttype != BCMA_HOSTTYPE_PCI) - return; - - pc = &bus->drv_pci[0]; - bcma_core_pci_extend_L1timer(pc, false); } -EXPORT_SYMBOL_GPL(bcma_core_pci_down); diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index c8a6b74..c42cec7 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -11,6 +11,7 @@ #include "bcma_private.h" #include <linux/pci.h> +#include <linux/slab.h> #include <linux/export.h> #include <linux/bcma/bcma.h> #include <asm/paccess.h> diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c index e4be537..b1a6e32 100644 --- a/drivers/bcma/driver_pcie2.c +++ b/drivers/bcma/driver_pcie2.c @@ -10,6 +10,7 @@ #include "bcma_private.h" #include <linux/bcma/bcma.h> +#include <linux/pci.h> /************************************************** * R/W ops. @@ -156,14 +157,23 @@ static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2) void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2) { - struct bcma_chipinfo *ci = &pcie2->core->bus->chipinfo; + struct bcma_bus *bus = pcie2->core->bus; + struct bcma_chipinfo *ci = &bus->chipinfo; u32 tmp; tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54)); if ((tmp & 0xe) >> 1 == 2) bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17); - /* TODO: Do we need pcie_reqsize? */ + switch (bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4360: + case BCMA_CHIP_ID_BCM4352: + pcie2->reqsize = 1024; + break; + default: + pcie2->reqsize = 128; + break; + } if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3) bcma_core_pcie2_war_delay_perst_enab(pcie2, true); @@ -173,3 +183,18 @@ void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2) pciedev_crwlpciegen2_180(pcie2); pciedev_crwlpciegen2_182(pcie2); } + +/************************************************** + * Runtime ops. + **************************************************/ + +void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2) +{ + struct bcma_bus *bus = pcie2->core->bus; + struct pci_dev *dev = bus->host_pci; + int err; + + err = pcie_set_readrq(dev, pcie2->reqsize); + if (err) + bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err); +} diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 53c6a8a..a62a2f9 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -213,16 +213,26 @@ static int bcma_host_pci_probe(struct pci_dev *dev, /* Initialize struct, detect chip */ bcma_init_bus(bus); + /* Scan bus to find out generation of PCIe core */ + err = bcma_bus_scan(bus); + if (err) + goto err_pci_unmap_mmio; + + if (bcma_find_core(bus, BCMA_CORE_PCIE2)) + bus->host_is_pcie2 = true; + /* Register */ err = bcma_bus_register(bus); if (err) - goto err_pci_unmap_mmio; + goto err_unregister_cores; pci_set_drvdata(dev, bus); out: return err; +err_unregister_cores: + bcma_unregister_cores(bus); err_pci_unmap_mmio: pci_iounmap(dev, bus->mmio); err_pci_release_regions: @@ -283,9 +293,12 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */ @@ -310,3 +323,31 @@ void __exit bcma_host_pci_exit(void) { pci_unregister_driver(&bcma_pci_bridge_driver); } + +/************************************************** + * Runtime ops for drivers. + **************************************************/ + +/* See also pcicore_up */ +void bcma_host_pci_up(struct bcma_bus *bus) +{ + if (bus->hosttype != BCMA_HOSTTYPE_PCI) + return; + + if (bus->host_is_pcie2) + bcma_core_pcie2_up(&bus->drv_pcie2); + else + bcma_core_pci_up(&bus->drv_pci[0]); +} +EXPORT_SYMBOL_GPL(bcma_host_pci_up); + +/* See also pcicore_down */ +void bcma_host_pci_down(struct bcma_bus *bus) +{ + if (bus->hosttype != BCMA_HOSTTYPE_PCI) + return; + + if (!bus->host_is_pcie2) + bcma_core_pci_down(&bus->drv_pci[0]); +} +EXPORT_SYMBOL_GPL(bcma_host_pci_down); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 38bde6e..9635f10 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -363,7 +363,7 @@ static int bcma_register_devices(struct bcma_bus *bus) return 0; } -static void bcma_unregister_cores(struct bcma_bus *bus) +void bcma_unregister_cores(struct bcma_bus *bus) { struct bcma_device *core, *tmp; diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index de4c849..288547a 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = { /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0489, 0xE027) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x04F2, 0xAFF1) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x0CF3, 0xE019) }, diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index e75f8ee..086f0ec 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -111,6 +111,7 @@ struct btmrvl_private { /* Vendor specific Bluetooth commands */ #define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03 +#define BT_CMD_ROUTE_SCO_TO_HOST 0xFC1D #define BT_CMD_SET_BDADDR 0xFC22 #define BT_CMD_AUTO_SLEEP_MODE 0xFC23 #define BT_CMD_HOST_SLEEP_CONFIG 0xFC59 diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 4135977..de05deb 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -230,6 +230,18 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd) } EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); +static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv) +{ + int ret; + u8 subcmd = 0; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1); + if (ret) + BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret); + + return ret; +} + int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; @@ -558,6 +570,8 @@ static int btmrvl_setup(struct hci_dev *hdev) btmrvl_check_device_tree(priv); + btmrvl_enable_sco_routing_to_host(priv); + btmrvl_pscan_window_reporting(priv, 0x01); priv->btmrvl_dev.psmode = 1; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8bfc4c2..8c1bf61 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = { /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, @@ -339,16 +340,6 @@ struct btusb_data { int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); }; -static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout, - unsigned mode) -{ - might_sleep(); - if (!test_bit(bit, word)) - return 0; - return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, - mode, timeout); -} - static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@ -2197,9 +2188,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * and thus just timeout if that happens and fail the setup * of this device. */ - err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, - msecs_to_jiffies(5000), - TASK_INTERRUPTIBLE); + err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(5000)); if (err == 1) { BT_ERR("%s: Firmware loading interrupted", hdev->name); err = -EINTR; @@ -2250,9 +2241,9 @@ done: */ BT_INFO("%s: Waiting for device to boot", hdev->name); - err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, - msecs_to_jiffies(1000), - TASK_INTERRUPTIBLE); + err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); if (err == 1) { BT_ERR("%s: Device boot interrupted", hdev->name); @@ -2332,6 +2323,27 @@ static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) return 0; } +static int btusb_shutdown_intel(struct hci_dev *hdev) +{ + struct sk_buff *skb; + long ret; + + /* Some platforms have an issue with BT LED when the interface is + * down or BT radio is turned off, which takes 5 seconds to BT LED + * goes off. This command turns off the BT LED immediately. + */ + skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + BT_ERR("%s: turning off Intel device LED failed (%ld)", + hdev->name, ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, const bdaddr_t *bdaddr) { @@ -2355,6 +2367,23 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, return 0; } +static const struct { + u16 subver; + const char *name; +} bcm_subver_table[] = { + { 0x210b, "BCM43142A0" }, /* 001.001.011 */ + { 0x2112, "BCM4314A0" }, /* 001.001.018 */ + { 0x2118, "BCM20702A0" }, /* 001.001.024 */ + { 0x2126, "BCM4335A0" }, /* 001.001.038 */ + { 0x220e, "BCM20702A1" }, /* 001.002.014 */ + { 0x230f, "BCM4354A2" }, /* 001.003.015 */ + { 0x4106, "BCM4335B0" }, /* 002.001.006 */ + { 0x410e, "BCM20702B0" }, /* 002.001.014 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ + { 0x610c, "BCM4354" }, /* 003.001.012 */ + { } +}; + #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) static int btusb_setup_bcm_patchram(struct hci_dev *hdev) @@ -2367,29 +2396,20 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) size_t fw_size; const struct hci_command_hdr *cmd; const u8 *cmd_param; - u16 opcode; + u16 opcode, subver, rev; + const char *hw_name = NULL; struct sk_buff *skb; struct hci_rp_read_local_version *ver; struct hci_rp_read_bd_addr *bda; long ret; - - snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd", - udev->product ? udev->product : "BCM", - le16_to_cpu(udev->descriptor.idVendor), - le16_to_cpu(udev->descriptor.idProduct)); - - ret = request_firmware(&fw, fw_name, &hdev->dev); - if (ret < 0) { - BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); - return 0; - } + int i; /* Reset */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret); - goto done; + return ret; } kfree_skb(skb); @@ -2400,23 +2420,43 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", hdev->name, ret); - goto done; + return ret; } if (skb->len != sizeof(*ver)) { BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", hdev->name); kfree_skb(skb); - ret = -EIO; - goto done; + return -EIO; } ver = (struct hci_rp_read_local_version *)skb->data; - BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x " - "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, - ver->lmp_ver, ver->lmp_subver); + rev = le16_to_cpu(ver->hci_rev); + subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); + for (i = 0; bcm_subver_table[i].name; i++) { + if (subver == bcm_subver_table[i].subver) { + hw_name = bcm_subver_table[i].name; + break; + } + } + + BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, + hw_name ? : "BCM", (subver & 0x7000) >> 13, + (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); + + snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd", + hw_name ? : "BCM", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct)); + + ret = request_firmware(&fw, fw_name, &hdev->dev); + if (ret < 0) { + BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); + return 0; + } + /* Start Download */ skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { @@ -2494,11 +2534,14 @@ reset_fw: } ver = (struct hci_rp_read_local_version *)skb->data; - BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x " - "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, - ver->lmp_ver, ver->lmp_subver); + rev = le16_to_cpu(ver->hci_rev); + subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); + BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, + hw_name ? : "BCM", (subver & 0x7000) >> 13, + (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); + /* Read BD Address */ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_INIT_TIMEOUT); @@ -2709,6 +2752,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_INTEL) { hdev->setup = btusb_setup_intel; + hdev->shutdown = btusb_shutdown_intel; hdev->set_bdaddr = btusb_set_bdaddr_intel; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 2c68da1..f4ea80d 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -237,18 +237,6 @@ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, return -net->hard_header_len; } -static int fwnet_header_rebuild(struct sk_buff *skb) -{ - struct fwnet_header *h = (struct fwnet_header *)skb->data; - - if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) - return arp_find((unsigned char *)&h->h_dest, skb); - - dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n", - be16_to_cpu(h->h_proto)); - return 0; -} - static int fwnet_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { @@ -282,7 +270,6 @@ static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) static const struct header_ops fwnet_header_ops = { .create = fwnet_header_create, - .rebuild = fwnet_header_rebuild, .cache = fwnet_header_cache, .cache_update = fwnet_header_cache_update, .parse = fwnet_header_parse, diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 94affa5..546b7e8 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1951,38 +1951,6 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, return len; } -/* We don't need to send arp, because we have point-to-point connections. */ -static int -isdn_net_rebuild_header(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - isdn_net_local *lp = netdev_priv(dev); - int ret = 0; - - if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { - struct ethhdr *eth = (struct ethhdr *) skb->data; - - /* - * Only ARP/IP is currently supported - */ - - if (eth->h_proto != htons(ETH_P_IP)) { - printk(KERN_WARNING - "isdn_net: %s don't know how to resolve type %d addresses?\n", - dev->name, (int) eth->h_proto); - memcpy(eth->h_source, dev->dev_addr, dev->addr_len); - return 0; - } - /* - * Try to get ARP to resolve the header. - */ -#ifdef CONFIG_INET - ret = arp_find(eth->h_dest, skb); -#endif - } - return ret; -} - static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { @@ -2005,7 +1973,6 @@ static void isdn_header_cache_update(struct hh_cache *hh, static const struct header_ops isdn_header_ops = { .create = isdn_net_header, - .rebuild = isdn_net_rebuild_header, .cache = isdn_header_cache, .cache_update = isdn_header_cache_update, }; diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 84b3592..8dc7290 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -112,8 +112,8 @@ mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) } static int -mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; @@ -173,8 +173,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } static int -mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 686d327..4a77cb0 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -1190,7 +1190,6 @@ static int dvb_net_stop(struct net_device *dev) static const struct header_ops dvb_header_ops = { .create = eth_header, .parse = eth_header_parse, - .rebuild = eth_rebuild_header, }; diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 09de683..10f71c73 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -104,7 +104,6 @@ EXPORT_SYMBOL(arcnet_timeout); static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); -static int arcnet_rebuild_header(struct sk_buff *skb); static int go_tx(struct net_device *dev); static int debug = ARCNET_DEBUG; @@ -312,7 +311,6 @@ static int choose_mtu(void) static const struct header_ops arcnet_header_ops = { .create = arcnet_header, - .rebuild = arcnet_rebuild_header, }; static const struct net_device_ops arcnet_netdev_ops = { @@ -538,59 +536,6 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, return proto->build_header(skb, dev, type, _daddr); } - -/* - * Rebuild the ARCnet hard header. This is called after an ARP (or in the - * future other address resolution) has completed on this sk_buff. We now - * let ARP fill in the destination field. - */ -static int arcnet_rebuild_header(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - struct arcnet_local *lp = netdev_priv(dev); - int status = 0; /* default is failure */ - unsigned short type; - uint8_t daddr=0; - struct ArcProto *proto; - /* - * XXX: Why not use skb->mac_len? - */ - if (skb->network_header - skb->mac_header != 2) { - BUGMSG(D_NORMAL, - "rebuild_header: shouldn't be here! (hdrsize=%d)\n", - (int)(skb->network_header - skb->mac_header)); - return 0; - } - type = *(uint16_t *) skb_pull(skb, 2); - BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type); - - if (type == ETH_P_IP) { -#ifdef CONFIG_INET - BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type); - status = arp_find(&daddr, skb) ? 1 : 0; - BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n", - daddr, type); -#endif - } else { - BUGMSG(D_NORMAL, - "I don't understand ethernet protocol %Xh addresses!\n", type); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } - - /* if we couldn't resolve the address... give up. */ - if (!status) - return 0; - - /* add the _real_ header this time! */ - proto = arc_proto_map[lp->default_proto[daddr]]; - proto->build_header(skb, dev, type, daddr); - - return 1; /* success */ -} - - - /* Called by the kernel in order to transmit a packet. */ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index cfc4a9c..f61b287 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -38,6 +38,7 @@ #define AD_STANDBY 0x2 #define AD_MAX_TX_IN_SECOND 3 #define AD_COLLECTOR_MAX_DELAY 0 +#define AD_MONITOR_CHURNED 0x1000 /* Timer definitions (43.4.4 in the 802.3ad standard) */ #define AD_FAST_PERIODIC_TIME 1 @@ -1013,16 +1014,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) /* check if state machine should change state */ /* first, check if port was reinitialized */ - if (port->sm_vars & AD_PORT_BEGIN) + if (port->sm_vars & AD_PORT_BEGIN) { port->sm_rx_state = AD_RX_INITIALIZE; + port->sm_vars |= AD_MONITOR_CHURNED; /* check if port is not enabled */ - else if (!(port->sm_vars & AD_PORT_BEGIN) + } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) port->sm_rx_state = AD_RX_PORT_DISABLED; /* check if new lacpdu arrived */ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { + if (port->sm_rx_state != AD_RX_CURRENT) + port->sm_vars |= AD_MONITOR_CHURNED; port->sm_rx_timer_counter = 0; port->sm_rx_state = AD_RX_CURRENT; } else { @@ -1100,9 +1104,11 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) */ port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION; port->sm_vars &= ~AD_PORT_MATCHED; + port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT; port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY; port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); port->actor_oper_port_state |= AD_STATE_EXPIRED; + port->sm_vars |= AD_MONITOR_CHURNED; break; case AD_RX_DEFAULTED: __update_default_selected(port); @@ -1132,6 +1138,45 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) } /** + * ad_churn_machine - handle port churn's state machine + * @port: the port we're looking at + * + */ +static void ad_churn_machine(struct port *port) +{ + if (port->sm_vars & AD_MONITOR_CHURNED) { + port->sm_vars &= ~AD_MONITOR_CHURNED; + port->sm_churn_actor_state = AD_CHURN_MONITOR; + port->sm_churn_partner_state = AD_CHURN_MONITOR; + port->sm_churn_actor_timer_counter = + __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0); + port->sm_churn_partner_timer_counter = + __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0); + return; + } + if (port->sm_churn_actor_timer_counter && + !(--port->sm_churn_actor_timer_counter) && + port->sm_churn_actor_state == AD_CHURN_MONITOR) { + if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) { + port->sm_churn_actor_state = AD_NO_CHURN; + } else { + port->churn_actor_count++; + port->sm_churn_actor_state = AD_CHURN; + } + } + if (port->sm_churn_partner_timer_counter && + !(--port->sm_churn_partner_timer_counter) && + port->sm_churn_partner_state == AD_CHURN_MONITOR) { + if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) { + port->sm_churn_partner_state = AD_NO_CHURN; + } else { + port->churn_partner_count++; + port->sm_churn_partner_state = AD_CHURN; + } + } +} + +/** * ad_tx_machine - handle a port's tx state machine * @port: the port we're looking at */ @@ -1745,6 +1790,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast) port->next_port_in_aggregator = NULL; port->transaction_id = 0; + port->sm_churn_actor_timer_counter = 0; + port->sm_churn_actor_state = 0; + port->churn_actor_count = 0; + port->sm_churn_partner_timer_counter = 0; + port->sm_churn_partner_state = 0; + port->churn_partner_count = 0; + memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu)); } } @@ -2164,6 +2216,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) ad_port_selection_logic(port, &update_slave_arr); ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); + ad_churn_machine(port); /* turn off the BEGIN bit, since we already handled it */ if (port->sm_vars & AD_PORT_BEGIN) @@ -2485,6 +2538,9 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, if (skb->protocol != PKT_TYPE_LACPDU) return RX_HANDLER_ANOTHER; + if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr)) + return RX_HANDLER_ANOTHER; + lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); if (!lacpdu) return RX_HANDLER_ANOTHER; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b979c26..c026ce9 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -928,6 +928,39 @@ static inline void slave_disable_netpoll(struct slave *slave) static void bond_poll_controller(struct net_device *bond_dev) { + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = NULL; + struct list_head *iter; + struct ad_info ad_info; + struct netpoll_info *ni; + const struct net_device_ops *ops; + + if (BOND_MODE(bond) == BOND_MODE_8023AD) + if (bond_3ad_get_active_agg_info(bond, &ad_info)) + return; + + rcu_read_lock_bh(); + bond_for_each_slave_rcu(bond, slave, iter) { + ops = slave->dev->netdev_ops; + if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller) + continue; + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + struct aggregator *agg = + SLAVE_AD_INFO(slave)->port.aggregator; + + if (agg && + agg->aggregator_identifier != ad_info.aggregator_id) + continue; + } + + ni = rcu_dereference_bh(slave->dev->npinfo); + if (down_trylock(&ni->dev_lock)) + continue; + ops->ndo_poll_controller(slave->dev); + up(&ni->dev_lock); + } + rcu_read_unlock_bh(); } static void bond_netpoll_cleanup(struct net_device *bond_dev) @@ -2900,6 +2933,8 @@ static int bond_slave_netdev_event(unsigned long event, if (old_duplex != slave->duplex) bond_3ad_adapter_duplex_changed(slave); } + /* Fallthrough */ + case NETDEV_DOWN: /* Refresh slave-array if applicable! * If the setup does not use miimon or arpmon (mode-specific!), * then these events will not cause the slave-array to be @@ -2911,10 +2946,6 @@ static int bond_slave_netdev_event(unsigned long event, if (bond_mode_uses_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); break; - case NETDEV_DOWN: - if (bond_mode_uses_xmit_hash(bond)) - bond_update_slave_arr(bond, NULL); - break; case NETDEV_CHANGEMTU: /* TODO: Should slaves be allowed to * independently alter their MTU? For diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 976f5ad..62694cf 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -176,18 +176,51 @@ static void bond_info_show_slave(struct seq_file *seq, slave->link_failure_count); seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); + seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); if (BOND_MODE(bond) == BOND_MODE_8023AD) { - const struct aggregator *agg - = SLAVE_AD_INFO(slave)->port.aggregator; + const struct port *port = &SLAVE_AD_INFO(slave)->port; + const struct aggregator *agg = port->aggregator; - if (agg) + if (agg) { seq_printf(seq, "Aggregator ID: %d\n", agg->aggregator_identifier); - else + seq_printf(seq, "Actor Churn State: %s\n", + bond_3ad_churn_desc(port->sm_churn_actor_state)); + seq_printf(seq, "Partner Churn State: %s\n", + bond_3ad_churn_desc(port->sm_churn_partner_state)); + seq_printf(seq, "Actor Churned Count: %d\n", + port->churn_actor_count); + seq_printf(seq, "Partner Churned Count: %d\n", + port->churn_partner_count); + + seq_puts(seq, "details actor lacp pdu:\n"); + seq_printf(seq, " system priority: %d\n", + port->actor_system_priority); + seq_printf(seq, " port key: %d\n", + port->actor_oper_port_key); + seq_printf(seq, " port priority: %d\n", + port->actor_port_priority); + seq_printf(seq, " port number: %d\n", + port->actor_port_number); + seq_printf(seq, " port state: %d\n", + port->actor_oper_port_state); + + seq_puts(seq, "details partner lacp pdu:\n"); + seq_printf(seq, " system priority: %d\n", + port->partner_oper.system_priority); + seq_printf(seq, " oper key: %d\n", + port->partner_oper.key); + seq_printf(seq, " port priority: %d\n", + port->partner_oper.port_priority); + seq_printf(seq, " port number: %d\n", + port->partner_oper.port_number); + seq_printf(seq, " port state: %d\n", + port->partner_oper.port_state); + } else { seq_puts(seq, "Aggregator ID: N/A\n"); + } } - seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); } static int bond_info_seq_show(struct seq_file *seq, void *v) diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index e7a6363..27ad312 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -20,13 +20,121 @@ #include <linux/can/dev.h> #include <linux/can/error.h> -#include <asm/bfin_can.h> #include <asm/portmux.h> #define DRV_NAME "bfin_can" #define BFIN_CAN_TIMEOUT 100 #define TX_ECHO_SKB_MAX 1 +/* transmit and receive channels */ +#define TRANSMIT_CHL 24 +#define RECEIVE_STD_CHL 0 +#define RECEIVE_EXT_CHL 4 +#define RECEIVE_RTR_CHL 8 +#define RECEIVE_EXT_RTR_CHL 12 +#define MAX_CHL_NUMBER 32 + +/* All Blackfin system MMRs are padded to 32bits even if the register + * itself is only 16bits. So use a helper macro to streamline this + */ +#define __BFP(m) u16 m; u16 __pad_##m + +/* bfin can registers layout */ +struct bfin_can_mask_regs { + __BFP(aml); + __BFP(amh); +}; + +struct bfin_can_channel_regs { + /* data[0,2,4,6] -> data{0,1,2,3} while data[1,3,5,7] is padding */ + u16 data[8]; + __BFP(dlc); + __BFP(tsv); + __BFP(id0); + __BFP(id1); +}; + +struct bfin_can_regs { + /* global control and status registers */ + __BFP(mc1); /* offset 0x00 */ + __BFP(md1); /* offset 0x04 */ + __BFP(trs1); /* offset 0x08 */ + __BFP(trr1); /* offset 0x0c */ + __BFP(ta1); /* offset 0x10 */ + __BFP(aa1); /* offset 0x14 */ + __BFP(rmp1); /* offset 0x18 */ + __BFP(rml1); /* offset 0x1c */ + __BFP(mbtif1); /* offset 0x20 */ + __BFP(mbrif1); /* offset 0x24 */ + __BFP(mbim1); /* offset 0x28 */ + __BFP(rfh1); /* offset 0x2c */ + __BFP(opss1); /* offset 0x30 */ + u32 __pad1[3]; + __BFP(mc2); /* offset 0x40 */ + __BFP(md2); /* offset 0x44 */ + __BFP(trs2); /* offset 0x48 */ + __BFP(trr2); /* offset 0x4c */ + __BFP(ta2); /* offset 0x50 */ + __BFP(aa2); /* offset 0x54 */ + __BFP(rmp2); /* offset 0x58 */ + __BFP(rml2); /* offset 0x5c */ + __BFP(mbtif2); /* offset 0x60 */ + __BFP(mbrif2); /* offset 0x64 */ + __BFP(mbim2); /* offset 0x68 */ + __BFP(rfh2); /* offset 0x6c */ + __BFP(opss2); /* offset 0x70 */ + u32 __pad2[3]; + __BFP(clock); /* offset 0x80 */ + __BFP(timing); /* offset 0x84 */ + __BFP(debug); /* offset 0x88 */ + __BFP(status); /* offset 0x8c */ + __BFP(cec); /* offset 0x90 */ + __BFP(gis); /* offset 0x94 */ + __BFP(gim); /* offset 0x98 */ + __BFP(gif); /* offset 0x9c */ + __BFP(control); /* offset 0xa0 */ + __BFP(intr); /* offset 0xa4 */ + __BFP(version); /* offset 0xa8 */ + __BFP(mbtd); /* offset 0xac */ + __BFP(ewr); /* offset 0xb0 */ + __BFP(esr); /* offset 0xb4 */ + u32 __pad3[2]; + __BFP(ucreg); /* offset 0xc0 */ + __BFP(uccnt); /* offset 0xc4 */ + __BFP(ucrc); /* offset 0xc8 */ + __BFP(uccnf); /* offset 0xcc */ + u32 __pad4[1]; + __BFP(version2); /* offset 0xd4 */ + u32 __pad5[10]; + + /* channel(mailbox) mask and message registers */ + struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ + struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ +}; + +#undef __BFP + +#define SRS 0x0001 /* Software Reset */ +#define SER 0x0008 /* Stuff Error */ +#define BOIM 0x0008 /* Enable Bus Off Interrupt */ +#define CCR 0x0080 /* CAN Configuration Mode Request */ +#define CCA 0x0080 /* Configuration Mode Acknowledge */ +#define SAM 0x0080 /* Sampling */ +#define AME 0x8000 /* Acceptance Mask Enable */ +#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */ +#define RMLIS 0x0080 /* RX Message Lost IRQ Status */ +#define RTR 0x4000 /* Remote Frame Transmission Request */ +#define BOIS 0x0008 /* Bus Off IRQ Status */ +#define IDE 0x2000 /* Identifier Extension */ +#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */ +#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */ +#define EWTIS 0x0001 /* TX Error Count IRQ Status */ +#define EWRIS 0x0002 /* RX Error Count IRQ Status */ +#define BEF 0x0040 /* Bit Error Flag */ +#define FER 0x0080 /* Form Error Flag */ +#define SMR 0x0020 /* Sleep Mode Request */ +#define SMACK 0x0008 /* Sleep Mode Acknowledge */ + /* * bfin can private data */ @@ -78,8 +186,8 @@ static int bfin_can_set_bittiming(struct net_device *dev) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) timing |= SAM; - bfin_write(®->clock, clk); - bfin_write(®->timing, timing); + writew(clk, ®->clock); + writew(timing, ®->timing); netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing); @@ -94,16 +202,14 @@ static void bfin_can_set_reset_mode(struct net_device *dev) int i; /* disable interrupts */ - bfin_write(®->mbim1, 0); - bfin_write(®->mbim2, 0); - bfin_write(®->gim, 0); + writew(0, ®->mbim1); + writew(0, ®->mbim2); + writew(0, ®->gim); /* reset can and enter configuration mode */ - bfin_write(®->control, SRS | CCR); - SSYNC(); - bfin_write(®->control, CCR); - SSYNC(); - while (!(bfin_read(®->control) & CCA)) { + writew(SRS | CCR, ®->control); + writew(CCR, ®->control); + while (!(readw(®->control) & CCA)) { udelay(10); if (--timeout == 0) { netdev_err(dev, "fail to enter configuration mode\n"); @@ -116,34 +222,33 @@ static void bfin_can_set_reset_mode(struct net_device *dev) * by writing to CAN Mailbox Configuration Registers 1 and 2 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled */ - bfin_write(®->mc1, 0); - bfin_write(®->mc2, 0); + writew(0, ®->mc1); + writew(0, ®->mc2); /* Set Mailbox Direction */ - bfin_write(®->md1, 0xFFFF); /* mailbox 1-16 are RX */ - bfin_write(®->md2, 0); /* mailbox 17-32 are TX */ + writew(0xFFFF, ®->md1); /* mailbox 1-16 are RX */ + writew(0, ®->md2); /* mailbox 17-32 are TX */ /* RECEIVE_STD_CHL */ for (i = 0; i < 2; i++) { - bfin_write(®->chl[RECEIVE_STD_CHL + i].id0, 0); - bfin_write(®->chl[RECEIVE_STD_CHL + i].id1, AME); - bfin_write(®->chl[RECEIVE_STD_CHL + i].dlc, 0); - bfin_write(®->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF); - bfin_write(®->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF); + writew(0, ®->chl[RECEIVE_STD_CHL + i].id0); + writew(AME, ®->chl[RECEIVE_STD_CHL + i].id1); + writew(0, ®->chl[RECEIVE_STD_CHL + i].dlc); + writew(0x1FFF, ®->msk[RECEIVE_STD_CHL + i].amh); + writew(0xFFFF, ®->msk[RECEIVE_STD_CHL + i].aml); } /* RECEIVE_EXT_CHL */ for (i = 0; i < 2; i++) { - bfin_write(®->chl[RECEIVE_EXT_CHL + i].id0, 0); - bfin_write(®->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE); - bfin_write(®->chl[RECEIVE_EXT_CHL + i].dlc, 0); - bfin_write(®->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF); - bfin_write(®->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF); + writew(0, ®->chl[RECEIVE_EXT_CHL + i].id0); + writew(AME | IDE, ®->chl[RECEIVE_EXT_CHL + i].id1); + writew(0, ®->chl[RECEIVE_EXT_CHL + i].dlc); + writew(0x1FFF, ®->msk[RECEIVE_EXT_CHL + i].amh); + writew(0xFFFF, ®->msk[RECEIVE_EXT_CHL + i].aml); } - bfin_write(®->mc2, BIT(TRANSMIT_CHL - 16)); - bfin_write(®->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL)); - SSYNC(); + writew(BIT(TRANSMIT_CHL - 16), ®->mc2); + writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), ®->mc1); priv->can.state = CAN_STATE_STOPPED; } @@ -157,9 +262,9 @@ static void bfin_can_set_normal_mode(struct net_device *dev) /* * leave configuration mode */ - bfin_write(®->control, bfin_read(®->control) & ~CCR); + writew(readw(®->control) & ~CCR, ®->control); - while (bfin_read(®->status) & CCA) { + while (readw(®->status) & CCA) { udelay(10); if (--timeout == 0) { netdev_err(dev, "fail to leave configuration mode\n"); @@ -170,26 +275,25 @@ static void bfin_can_set_normal_mode(struct net_device *dev) /* * clear _All_ tx and rx interrupts */ - bfin_write(®->mbtif1, 0xFFFF); - bfin_write(®->mbtif2, 0xFFFF); - bfin_write(®->mbrif1, 0xFFFF); - bfin_write(®->mbrif2, 0xFFFF); + writew(0xFFFF, ®->mbtif1); + writew(0xFFFF, ®->mbtif2); + writew(0xFFFF, ®->mbrif1); + writew(0xFFFF, ®->mbrif2); /* * clear global interrupt status register */ - bfin_write(®->gis, 0x7FF); /* overwrites with '1' */ + writew(0x7FF, ®->gis); /* overwrites with '1' */ /* * Initialize Interrupts * - set bits in the mailbox interrupt mask register * - global interrupt mask */ - bfin_write(®->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL)); - bfin_write(®->mbim2, BIT(TRANSMIT_CHL - 16)); + writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), ®->mbim1); + writew(BIT(TRANSMIT_CHL - 16), ®->mbim2); - bfin_write(®->gim, EPIM | BOIM | RMLIM); - SSYNC(); + writew(EPIM | BOIM | RMLIM, ®->gim); } static void bfin_can_start(struct net_device *dev) @@ -226,7 +330,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev, struct bfin_can_priv *priv = netdev_priv(dev); struct bfin_can_regs __iomem *reg = priv->membase; - u16 cec = bfin_read(®->cec); + u16 cec = readw(®->cec); bec->txerr = cec >> 8; bec->rxerr = cec; @@ -252,28 +356,28 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev) /* fill id */ if (id & CAN_EFF_FLAG) { - bfin_write(®->chl[TRANSMIT_CHL].id0, id); + writew(id, ®->chl[TRANSMIT_CHL].id0); val = ((id & 0x1FFF0000) >> 16) | IDE; } else val = (id << 2); if (id & CAN_RTR_FLAG) val |= RTR; - bfin_write(®->chl[TRANSMIT_CHL].id1, val | AME); + writew(val | AME, ®->chl[TRANSMIT_CHL].id1); /* fill payload */ for (i = 0; i < 8; i += 2) { val = ((7 - i) < dlc ? (data[7 - i]) : 0) + ((6 - i) < dlc ? (data[6 - i] << 8) : 0); - bfin_write(®->chl[TRANSMIT_CHL].data[i], val); + writew(val, ®->chl[TRANSMIT_CHL].data[i]); } /* fill data length code */ - bfin_write(®->chl[TRANSMIT_CHL].dlc, dlc); + writew(dlc, ®->chl[TRANSMIT_CHL].dlc); can_put_echo_skb(skb, dev, 0); /* set transmit request */ - bfin_write(®->trs2, BIT(TRANSMIT_CHL - 16)); + writew(BIT(TRANSMIT_CHL - 16), ®->trs2); return 0; } @@ -296,26 +400,26 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc) /* get id */ if (isrc & BIT(RECEIVE_EXT_CHL)) { /* extended frame format (EFF) */ - cf->can_id = ((bfin_read(®->chl[RECEIVE_EXT_CHL].id1) + cf->can_id = ((readw(®->chl[RECEIVE_EXT_CHL].id1) & 0x1FFF) << 16) - + bfin_read(®->chl[RECEIVE_EXT_CHL].id0); + + readw(®->chl[RECEIVE_EXT_CHL].id0); cf->can_id |= CAN_EFF_FLAG; obj = RECEIVE_EXT_CHL; } else { /* standard frame format (SFF) */ - cf->can_id = (bfin_read(®->chl[RECEIVE_STD_CHL].id1) + cf->can_id = (readw(®->chl[RECEIVE_STD_CHL].id1) & 0x1ffc) >> 2; obj = RECEIVE_STD_CHL; } - if (bfin_read(®->chl[obj].id1) & RTR) + if (readw(®->chl[obj].id1) & RTR) cf->can_id |= CAN_RTR_FLAG; /* get data length code */ - cf->can_dlc = get_can_dlc(bfin_read(®->chl[obj].dlc) & 0xF); + cf->can_dlc = get_can_dlc(readw(®->chl[obj].dlc) & 0xF); /* get payload */ for (i = 0; i < 8; i += 2) { - val = bfin_read(®->chl[obj].data[i]); + val = readw(®->chl[obj].data[i]); cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0; cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; } @@ -369,7 +473,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || state == CAN_STATE_ERROR_PASSIVE)) { - u16 cec = bfin_read(®->cec); + u16 cec = readw(®->cec); u8 rxerr = cec; u8 txerr = cec >> 8; @@ -420,23 +524,23 @@ static irqreturn_t bfin_can_interrupt(int irq, void *dev_id) struct net_device_stats *stats = &dev->stats; u16 status, isrc; - if ((irq == priv->tx_irq) && bfin_read(®->mbtif2)) { + if ((irq == priv->tx_irq) && readw(®->mbtif2)) { /* transmission complete interrupt */ - bfin_write(®->mbtif2, 0xFFFF); + writew(0xFFFF, ®->mbtif2); stats->tx_packets++; - stats->tx_bytes += bfin_read(®->chl[TRANSMIT_CHL].dlc); + stats->tx_bytes += readw(®->chl[TRANSMIT_CHL].dlc); can_get_echo_skb(dev, 0); netif_wake_queue(dev); - } else if ((irq == priv->rx_irq) && bfin_read(®->mbrif1)) { + } else if ((irq == priv->rx_irq) && readw(®->mbrif1)) { /* receive interrupt */ - isrc = bfin_read(®->mbrif1); - bfin_write(®->mbrif1, 0xFFFF); + isrc = readw(®->mbrif1); + writew(0xFFFF, ®->mbrif1); bfin_can_rx(dev, isrc); - } else if ((irq == priv->err_irq) && bfin_read(®->gis)) { + } else if ((irq == priv->err_irq) && readw(®->gis)) { /* error interrupt */ - isrc = bfin_read(®->gis); - status = bfin_read(®->esr); - bfin_write(®->gis, 0x7FF); + isrc = readw(®->gis); + status = readw(®->esr); + writew(0x7FF, ®->gis); bfin_can_err(dev, isrc, status); } else { return IRQ_NONE; @@ -556,16 +660,10 @@ static int bfin_can_probe(struct platform_device *pdev) goto exit; } - if (!request_mem_region(res_mem->start, resource_size(res_mem), - dev_name(&pdev->dev))) { - err = -EBUSY; - goto exit; - } - /* request peripheral pins */ err = peripheral_request_list(pdata, dev_name(&pdev->dev)); if (err) - goto exit_mem_release; + goto exit; dev = alloc_bfin_candev(); if (!dev) { @@ -574,7 +672,13 @@ static int bfin_can_probe(struct platform_device *pdev) } priv = netdev_priv(dev); - priv->membase = (void __iomem *)res_mem->start; + + priv->membase = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(priv->membase)) { + err = PTR_ERR(priv->membase); + goto exit_peri_pin_free; + } + priv->rx_irq = rx_irq->start; priv->tx_irq = tx_irq->start; priv->err_irq = err_irq->start; @@ -606,8 +710,6 @@ exit_candev_free: free_candev(dev); exit_peri_pin_free: peripheral_free_list(pdata); -exit_mem_release: - release_mem_region(res_mem->start, resource_size(res_mem)); exit: return err; } @@ -616,15 +718,11 @@ static int bfin_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct bfin_can_priv *priv = netdev_priv(dev); - struct resource *res; bfin_can_set_reset_mode(dev); unregister_candev(dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - peripheral_free_list(priv->pin_list); free_candev(dev); @@ -641,9 +739,8 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) if (netif_running(dev)) { /* enter sleep mode */ - bfin_write(®->control, bfin_read(®->control) | SMR); - SSYNC(); - while (!(bfin_read(®->intr) & SMACK)) { + writew(readw(®->control) | SMR, ®->control); + while (!(readw(®->intr) & SMACK)) { udelay(10); if (--timeout == 0) { netdev_err(dev, "fail to enter sleep mode\n"); @@ -663,8 +760,7 @@ static int bfin_can_resume(struct platform_device *pdev) if (netif_running(dev)) { /* leave sleep mode */ - bfin_write(®->intr, 0); - SSYNC(); + writew(0, ®->intr); } return 0; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 4daffb2..cedb572 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -23,6 +23,7 @@ #include <linux/of_address.h> #include <net/dsa.h> #include <linux/ethtool.h> +#include <linux/if_bridge.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -299,10 +300,14 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, if (port == 7) intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); - /* Set this port, and only this one to be in the default VLAN */ + /* Set this port, and only this one to be in the default VLAN, + * if member of a bridge, restore its membership prior to + * bringing down this port. + */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); + reg |= priv->port_sts[port].vlan_ctl_mask; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); @@ -400,6 +405,151 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, return 0; } +/* Fast-ageing of ARL entries for a given port, equivalent to an ARL + * flush for that port. + */ +static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int timeout = 1000; + u32 reg; + + core_writel(priv, port, CORE_FAST_AGE_PORT); + + reg = core_readl(priv, CORE_FAST_AGE_CTRL); + reg |= EN_AGE_PORT | FAST_AGE_STR_DONE; + core_writel(priv, reg, CORE_FAST_AGE_CTRL); + + do { + reg = core_readl(priv, CORE_FAST_AGE_CTRL); + if (!(reg & FAST_AGE_STR_DONE)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, + u32 br_port_mask) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int i; + u32 reg, p_ctl; + + p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); + + for (i = 0; i < priv->hw_params.num_ports; i++) { + if (!((1 << i) & br_port_mask)) + continue; + + /* Add this local port to the remote port VLAN control + * membership and update the remote port bitmask + */ + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); + reg |= 1 << port; + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); + priv->port_sts[i].vlan_ctl_mask = reg; + + p_ctl |= 1 << i; + } + + /* Configure the local port VLAN control membership to include + * remote ports and update the local port bitmask + */ + core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); + priv->port_sts[port].vlan_ctl_mask = p_ctl; + + return 0; +} + +static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, + u32 br_port_mask) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int i; + u32 reg, p_ctl; + + p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); + + for (i = 0; i < priv->hw_params.num_ports; i++) { + /* Don't touch the remaining ports */ + if (!((1 << i) & br_port_mask)) + continue; + + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); + reg &= ~(1 << port); + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); + priv->port_sts[port].vlan_ctl_mask = reg; + + /* Prevent self removal to preserve isolation */ + if (port != i) + p_ctl &= ~(1 << i); + } + + core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); + priv->port_sts[port].vlan_ctl_mask = p_ctl; + + return 0; +} + +static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, + u8 state) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u8 hw_state, cur_hw_state; + int ret = 0; + u32 reg; + + reg = core_readl(priv, CORE_G_PCTL_PORT(port)); + cur_hw_state = reg >> G_MISTP_STATE_SHIFT; + + switch (state) { + case BR_STATE_DISABLED: + hw_state = G_MISTP_DIS_STATE; + break; + case BR_STATE_LISTENING: + hw_state = G_MISTP_LISTEN_STATE; + break; + case BR_STATE_LEARNING: + hw_state = G_MISTP_LEARN_STATE; + break; + case BR_STATE_FORWARDING: + hw_state = G_MISTP_FWD_STATE; + break; + case BR_STATE_BLOCKING: + hw_state = G_MISTP_BLOCK_STATE; + break; + default: + pr_err("%s: invalid STP state: %d\n", __func__, state); + return -EINVAL; + } + + /* Fast-age ARL entries if we are moving a port from Learning or + * Forwarding state to Disabled, Blocking or Listening state + */ + if (cur_hw_state != hw_state) { + if (cur_hw_state & 4 && !(hw_state & 4)) { + ret = bcm_sf2_sw_fast_age_port(ds, port); + if (ret) { + pr_err("%s: fast-ageing failed\n", __func__); + return ret; + } + } + } + + reg = core_readl(priv, CORE_G_PCTL_PORT(port)); + reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); + reg |= hw_state; + core_writel(priv, reg, CORE_G_PCTL_PORT(port)); + + return 0; +} + static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; @@ -916,6 +1066,9 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .port_disable = bcm_sf2_port_disable, .get_eee = bcm_sf2_sw_get_eee, .set_eee = bcm_sf2_sw_set_eee, + .port_join_bridge = bcm_sf2_sw_br_join, + .port_leave_bridge = bcm_sf2_sw_br_leave, + .port_stp_update = bcm_sf2_sw_br_set_stp_state, }; static int __init bcm_sf2_init(void) diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 7b7053d..22e2ebf 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -46,6 +46,8 @@ struct bcm_sf2_port_status { unsigned int link; struct ethtool_eee eee; + + u32 vlan_ctl_mask; }; struct bcm_sf2_priv { diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index cabdfa5..fa4e6e7 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -163,6 +163,21 @@ #define EN_CHIP_RST (1 << 6) #define EN_SW_RESET (1 << 4) +#define CORE_FAST_AGE_CTRL 0x00220 +#define EN_FAST_AGE_STATIC (1 << 0) +#define EN_AGE_DYNAMIC (1 << 1) +#define EN_AGE_PORT (1 << 2) +#define EN_AGE_VLAN (1 << 3) +#define EN_AGE_SPT (1 << 4) +#define EN_AGE_MCAST (1 << 5) +#define FAST_AGE_STR_DONE (1 << 7) + +#define CORE_FAST_AGE_PORT 0x00224 +#define AGE_PORT_MASK 0xf + +#define CORE_FAST_AGE_VID 0x00228 +#define AGE_VID_MASK 0x3fff + #define CORE_LNKSTS 0x00400 #define LNK_STS_MASK 0x1ff diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index aa33d16..9808c86 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -51,8 +51,11 @@ static int mv88e6171_switch_reset(struct dsa_switch *ds) /* Wait for transmit queues to drain. */ usleep_range(2000, 4000); - /* Reset the switch. */ - REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + /* Reset the switch. Keep PPU active. The PPU needs to be + * active to support indirect phy register accesses through + * global registers 0x18 and 0x19. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc000); /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; @@ -83,11 +86,10 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) int ret; int i; - /* Disable the PHY polling unit (since there won't be any - * external PHYs to poll), don't discard packets with - * excessive collisions, and mask all interrupt sources. + /* Discard packets with excessive collisions, mask all + * interrupt sources, enable PPU. */ - REG_WRITE(REG_GLOBAL, 0x04, 0x0000); + REG_WRITE(REG_GLOBAL, 0x04, 0x6000); /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message @@ -336,7 +338,7 @@ mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) int ret; mutex_lock(&ps->phy_mutex); - ret = mv88e6xxx_phy_read(ds, addr, regnum); + ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum); mutex_unlock(&ps->phy_mutex); return ret; } @@ -350,7 +352,7 @@ mv88e6171_phy_write(struct dsa_switch *ds, int ret; mutex_lock(&ps->phy_mutex); - ret = mv88e6xxx_phy_write(ds, addr, regnum, val); + ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); mutex_unlock(&ps->phy_mutex); return ret; } diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index e13adc7..7bc5998 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -22,59 +22,6 @@ #include <net/dsa.h> #include "mv88e6xxx.h" -static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) -{ - unsigned long timeout = jiffies + HZ / 10; - - while (time_before(jiffies, timeout)) { - int ret; - - ret = REG_READ(reg, offset); - if (!(ret & mask)) - return 0; - - usleep_range(1000, 2000); - } - return -ETIMEDOUT; -} - -static inline int mv88e6352_phy_wait(struct dsa_switch *ds) -{ - return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000); -} - -static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) -{ - return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800); -} - -static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) -{ - return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000); -} - -static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) -{ - int ret; - - REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); - - ret = mv88e6352_phy_wait(ds); - if (ret < 0) - return ret; - - return REG_READ(REG_GLOBAL2, 0x19); -} - -static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, - u16 val) -{ - REG_WRITE(REG_GLOBAL2, 0x19, val); - REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); - - return mv88e6352_phy_wait(ds); -} - static char *mv88e6352_probe(struct device *host_dev, int sw_addr) { struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); @@ -346,12 +293,12 @@ static int mv88e6352_phy_page_read(struct dsa_switch *ds, int ret; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_write(ds, port, 0x16, page); + ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); if (ret < 0) goto error; - ret = __mv88e6352_phy_read(ds, port, reg); + ret = mv88e6xxx_phy_read_indirect(ds, port, reg); error: - __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); mutex_unlock(&ps->phy_mutex); return ret; } @@ -363,13 +310,13 @@ static int mv88e6352_phy_page_write(struct dsa_switch *ds, int ret; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_write(ds, port, 0x16, page); + ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); if (ret < 0) goto error; - ret = __mv88e6352_phy_write(ds, port, reg, val); + ret = mv88e6xxx_phy_write_indirect(ds, port, reg, val); error: - __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); mutex_unlock(&ps->phy_mutex); return ret; } @@ -482,7 +429,7 @@ mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) return addr; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_read(ds, addr, regnum); + ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum); mutex_unlock(&ps->phy_mutex); return ret; @@ -499,7 +446,7 @@ mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return addr; mutex_lock(&ps->phy_mutex); - ret = __mv88e6352_phy_write(ds, addr, regnum, val); + ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); mutex_unlock(&ps->phy_mutex); return ret; @@ -553,7 +500,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) if (ret < 0) goto error; - ret = mv88e6352_eeprom_busy_wait(ds); + ret = mv88e6xxx_eeprom_busy_wait(ds); if (ret < 0) goto error; @@ -576,7 +523,7 @@ static int mv88e6352_get_eeprom(struct dsa_switch *ds, eeprom->magic = 0xc3ec4951; - ret = mv88e6352_eeprom_load_wait(ds); + ret = mv88e6xxx_eeprom_load_wait(ds); if (ret < 0) return ret; @@ -657,7 +604,7 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, if (ret < 0) goto error; - ret = mv88e6352_eeprom_busy_wait(ds); + ret = mv88e6xxx_eeprom_busy_wait(ds); error: mutex_unlock(&ps->eeprom_mutex); return ret; @@ -681,7 +628,7 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds, len = eeprom->len; eeprom->len = 0; - ret = mv88e6352_eeprom_load_wait(ds); + ret = mv88e6xxx_eeprom_load_wait(ds); if (ret < 0) return ret; @@ -770,6 +717,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .get_strings = mv88e6352_get_strings, .get_ethtool_stats = mv88e6352_get_ethtool_stats, .get_sset_count = mv88e6352_get_sset_count, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6352_get_temp, .get_temp_limit = mv88e6352_get_temp_limit, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 3e7e31a..c18ffc9 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -596,6 +596,110 @@ error: } #endif /* CONFIG_NET_DSA_HWMON */ +static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = REG_READ(reg, offset); + if (!(ret & mask)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +int mv88e6xxx_phy_wait(struct dsa_switch *ds) +{ + return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x18, 0x8000); +} + +int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) +{ + return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x0800); +} + +int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) +{ + return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x8000); +} + +int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum) +{ + int ret; + + REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); + + ret = mv88e6xxx_phy_wait(ds); + if (ret < 0) + return ret; + + return REG_READ(REG_GLOBAL2, 0x19); +} + +int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + REG_WRITE(REG_GLOBAL2, 0x19, val); + REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); + + return mv88e6xxx_phy_wait(ds); +} + +int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) +{ + int reg; + + reg = mv88e6xxx_phy_read_indirect(ds, port, 16); + if (reg < 0) + return -EOPNOTSUPP; + + e->eee_enabled = !!(reg & 0x0200); + e->tx_lpi_enabled = !!(reg & 0x0100); + + reg = REG_READ(REG_PORT(port), 0); + e->eee_active = !!(reg & 0x0040); + + return 0; +} + +static int mv88e6xxx_eee_enable_set(struct dsa_switch *ds, int port, + bool eee_enabled, bool tx_lpi_enabled) +{ + int reg, nreg; + + reg = mv88e6xxx_phy_read_indirect(ds, port, 16); + if (reg < 0) + return reg; + + nreg = reg & ~0x0300; + if (eee_enabled) + nreg |= 0x0200; + if (tx_lpi_enabled) + nreg |= 0x0100; + + if (nreg != reg) + return mv88e6xxx_phy_write_indirect(ds, port, 16, nreg); + + return 0; +} + +int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, + struct phy_device *phydev, struct ethtool_eee *e) +{ + int ret; + + ret = mv88e6xxx_eee_enable_set(ds, port, e->eee_enabled, + e->tx_lpi_enabled); + if (ret) + return -EOPNOTSUPP; + + return 0; +} + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 03e397e..5fd42ce 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -82,6 +82,15 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *_p); int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); +int mv88e6xxx_phy_wait(struct dsa_switch *ds); +int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); +int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); +int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum); +int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, + u16 val); +int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); +int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, + struct phy_device *phydev, struct ethtool_eee *e); extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index f3470d9..bab01c84 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -757,7 +757,7 @@ static void emac_shutdown(struct net_device *dev) /* Disable all interrupt */ writel(0, db->membase + EMAC_INT_CTL_REG); - /* clear interupt status */ + /* clear interrupt status */ reg_val = readl(db->membase + EMAC_INT_STA_REG); writel(reg_val, db->membase + EMAC_INT_STA_REG); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 6725dc0..fd9296a 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -105,11 +105,11 @@ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) /* set MDIO address */ csrwr32((mii_id & 0x1f), priv->mac_dev, - tse_csroffs(mdio_phy0_addr)); + tse_csroffs(mdio_phy1_addr)); /* get the data */ return csrrd32(priv->mac_dev, - tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; + tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; } static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, @@ -120,10 +120,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, /* set MDIO address */ csrwr32((mii_id & 0x1f), priv->mac_dev, - tse_csroffs(mdio_phy0_addr)); + tse_csroffs(mdio_phy1_addr)); /* write the data */ - csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); return 0; } @@ -1098,8 +1098,12 @@ static int tse_open(struct net_device *dev) spin_lock(&priv->mac_cfg_lock); ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ if (ret) - netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); ret = init_mac(priv); spin_unlock(&priv->mac_cfg_lock); @@ -1203,8 +1207,12 @@ static int tse_shutdown(struct net_device *dev) spin_lock(&priv->tx_lock); ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ if (ret) - netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); priv->dmaops->reset_dma(priv); free_skbufs(dev); diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 4c2ae22..94960055 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -723,13 +723,13 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) * the last correctly noting the error. */ if(status & ERR_BIT) { - /* reseting flags */ + /* resetting flags */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; } /* check for STP and ENP */ if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* reseting flags */ + /* resetting flags */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; } diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h index a75092d..7cdb185 100644 --- a/drivers/net/ethernet/amd/amd8111e.h +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -614,7 +614,7 @@ typedef enum { /* Assume contoller gets data 10 times the maximum processing time */ #define REPEAT_CNT 10 -/* amd8111e decriptor flag definitions */ +/* amd8111e descriptor flag definitions */ typedef enum { OWN_BIT = (1 << 15), diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e65..8eb37e0 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1708,7 +1708,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->dev_addr)) - memset(dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(dev->dev_addr); if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 13e8f95..1eea3e5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -620,7 +620,7 @@ struct xgbe_hw_features { unsigned int mgk; /* PMT magic packet */ unsigned int mmc; /* RMON module */ unsigned int aoe; /* ARP Offload */ - unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ unsigned int eee; /* Energy Efficient Ethernet */ unsigned int tx_coe; /* Tx Checksum Offload */ unsigned int rx_coe; /* Rx Checksum Offload */ diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 842fe76..7fcaf0d 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -720,7 +720,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) mace_reset(dev); /* * XXX mace likes to hang the machine after a xmtfs error. - * This is hard to reproduce, reseting *may* help + * This is hard to reproduce, resetting *may* help */ } cp = mp->tx_cmds + NCMDS_TX * i; diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 6e66127..89914ca 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -575,7 +575,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) mace_reset(dev); /* * XXX mace likes to hang the machine after a xmtfs error. - * This is hard to reproduce, reseting *may* help + * This is hard to reproduce, resetting *may* help */ } /* dma should have finished */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 52fdfe2..a8b80c5 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) /* * atl1c_read_phy_core - * core funtion to read register in PHY via MDIO control regsiter. + * core function to read register in PHY via MDIO control regsiter. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to read @@ -356,7 +356,7 @@ int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, /* * atl1c_write_phy_core - * core funtion to write to register in PHY via MDIO control regsiter. + * core function to write to register in PHY via MDIO control register. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to write diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 587f63e..932bd18 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -752,7 +752,7 @@ static void atl1c_patch_assign(struct atl1c_hw *hw) if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && hw->revision_id == L2CB_V21) { - /* config acess mode */ + /* config access mode */ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, REG_PCIE_DEV_MISC_CTRL); pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 41a3c98..ee4fdfe 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -71,12 +71,12 @@ config BCMGENET Broadcom BCM7xxx Set Top Box family chipset. config BNX2 - tristate "QLogic NetXtremeII support" + tristate "QLogic bnx2 support" depends on PCI select CRC32 select FW_LOADER ---help--- - This driver supports QLogic NetXtremeII gigabit Ethernet cards. + This driver supports QLogic bnx2 gigabit Ethernet cards. To compile this driver as a module, choose M here: the module will be called bnx2. This is recommended. @@ -87,8 +87,8 @@ config CNIC select BNX2 select UIO ---help--- - This driver supports offload features of QLogic NetXtremeII - gigabit Ethernet cards. + This driver supports offload features of QLogic bnx2 gigabit + Ethernet cards. To compile this driver as a module, choose M here: the module will be called cnic. This is recommended. diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 02bf0b8..2b66ef3 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1,7 +1,7 @@ -/* bnx2.c: QLogic NX2 network driver. +/* bnx2.c: QLogic bnx2 network driver. * * Copyright (c) 2004-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,8 +58,8 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.5" -#define DRV_MODULE_RELDATE "December 20, 2013" +#define DRV_MODULE_VERSION "2.2.6" +#define DRV_MODULE_RELDATE "January 29, 2014" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" @@ -72,10 +72,10 @@ #define TX_TIMEOUT (5*HZ) static char version[] = - "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); -MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver"); +MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FW_MIPS_FILE_06); @@ -4984,8 +4984,6 @@ bnx2_init_chip(struct bnx2 *bp) bp->idle_chk_status_idx = 0xffff; - bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; - /* Set up how to generate a link change interrupt. */ BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); @@ -7710,17 +7708,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) return 0; } -static netdev_features_t -bnx2_fix_features(struct net_device *dev, netdev_features_t features) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - features |= NETIF_F_HW_VLAN_CTAG_RX; - - return features; -} - static int bnx2_set_features(struct net_device *dev, netdev_features_t features) { @@ -8527,7 +8514,6 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, - .ndo_fix_features = bnx2_fix_features, .ndo_set_features = bnx2_set_features, .ndo_tx_timeout = bnx2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -8578,6 +8564,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= dev->hw_features; dev->priv_flags |= IFF_UNICAST_FLT; + if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if ((rc = register_netdev(dev))) { dev_err(&pdev->dev, "Cannot register net device\n"); goto error; diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 28df35d..f92f76c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -1,7 +1,7 @@ -/* bnx2.h: QLogic NX2 network driver. +/* bnx2.h: QLogic bnx2 network driver. * * Copyright (c) 2004-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h index 7db79c2..b0f2cca 100644 --- a/drivers/net/ethernet/broadcom/bnx2_fw.h +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h @@ -1,7 +1,7 @@ -/* bnx2_fw.h: QLogic NX2 network driver. +/* bnx2_fw.h: QLogic bnx2 network driver. * * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ffe4e00..e3d853c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2446,7 +2446,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) } packet = skb_put(skb, pkt_size); memcpy(packet, bp->dev->dev_addr, ETH_ALEN); - memset(packet + ETH_ALEN, 0, ETH_ALEN); + eth_zero_addr(packet + ETH_ALEN); memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); for (i = ETH_HLEN; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index bd90e50..d6e1975 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -278,7 +278,7 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, } -/* congestion managment port init api description +/* congestion management port init api description * the api works as follows: * the driver should pass the cmng_init_input struct, the port_init function * will prepare the required internal ram structure which will be passed back diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 778e4cd..b7c77b2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -563,7 +563,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( * Will return the NIG ETS registers to init values.Except * credit_upper_bound. * That isn't used in this configuration (No WFQ is enabled) and will be -* configured acording to spec +* configured according to spec *. ******************************************************************************/ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, @@ -680,7 +680,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( * Will return the PBF ETS registers to init values.Except * credit_upper_bound. * That isn't used in this configuration (No WFQ is enabled) and will be -* configured acording to spec +* configured according to spec *. ******************************************************************************/ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) @@ -738,7 +738,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) } /****************************************************************************** * Description: -* E3B0 disable will return basicly the values to init values. +* E3B0 disable will return basically the values to init values. *. ******************************************************************************/ static int bnx2x_ets_e3b0_disabled(const struct link_params *params, @@ -761,7 +761,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params, /****************************************************************************** * Description: -* Disable will return basicly the values to init values. +* Disable will return basically the values to init values. * ******************************************************************************/ int bnx2x_ets_disabled(struct link_params *params, @@ -2938,7 +2938,7 @@ static int bnx2x_eee_initial_config(struct link_params *params, { vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; - /* Propogate params' bits --> vars (for migration exposure) */ + /* Propagate params' bits --> vars (for migration exposure) */ if (params->eee_mode & EEE_MODE_ENABLE_LPI) vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; else @@ -13308,7 +13308,7 @@ static void bnx2x_check_over_curr(struct link_params *params, vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; } -/* Returns 0 if no change occured since last check; 1 otherwise. */ +/* Returns 0 if no change occurred since last check; 1 otherwise. */ static u8 bnx2x_analyze_link_error(struct link_params *params, struct link_vars *vars, u32 status, u32 phy_flag, u32 link_flag, u8 notify) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index bef750a..2ee12b5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11546,13 +11546,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) /* Disable iSCSI OOO if MAC configuration is invalid. */ if (!is_valid_ether_addr(iscsi_mac)) { bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - memset(iscsi_mac, 0, ETH_ALEN); + eth_zero_addr(iscsi_mac); } /* Disable FCoE if MAC configuration is invalid. */ if (!is_valid_ether_addr(fip_mac)) { bp->flags |= NO_FCOE_FLAG; - memset(bp->fip_mac, 0, ETH_ALEN); + eth_zero_addr(bp->fip_mac); } } @@ -11563,7 +11563,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) int port = BP_PORT(bp); /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(bp->dev->dev_addr); if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 6fe547c..0770e4b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -29,7 +29,7 @@ #define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1) /* [RW 1] Initiate the ATC array - reset all the valid bits */ #define ATC_REG_ATC_INIT_ARRAY 0x1100b8 -/* [R 1] ATC initalization done */ +/* [R 1] ATC initialization done */ #define ATC_REG_ATC_INIT_DONE 0x1100bc /* [RC 6] Interrupt register #0 read clear */ #define ATC_REG_ATC_INT_STS_CLR 0x1101c0 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e5aca2d..8638d6c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2693,7 +2693,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); else /* function has not been loaded yet. Show mac as 0s */ - memset(&ivi->mac, 0, ETH_ALEN); + eth_zero_addr(ivi->mac); /* vlan */ if (bulletin->valid_bitmap & (1 << VLAN_VALID)) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d160829..612cafb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1620,7 +1620,7 @@ void bnx2x_memset_stats(struct bnx2x *bp) if (bp->port.pmf && bp->port.port_stx) bnx2x_port_stats_base_init(bp); - /* mark the end of statistics initializiation */ + /* mark the end of statistics initialization */ bp->stats_init = false; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index be40eab..15b2d16 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -800,7 +800,7 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp, req->rss_key_size = T_ETH_RSS_KEY; req->rss_result_mask = params->rss_result_mask; - /* flags handled individually for backward/forward compatability */ + /* flags handled individually for backward/forward compatibility */ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) req->rss_flags |= VFPF_RSS_MODE_DISABLED; if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) @@ -1869,7 +1869,7 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, rss.rss_obj = &vf->rss_conf_obj; rss.rss_result_mask = rss_tlv->rss_result_mask; - /* flags handled individually for backward/forward compatability */ + /* flags handled individually for backward/forward compatibility */ rss.rss_flags = 0; rss.ramrod_flags = 0; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index f05fab6..17c145f 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,7 +1,7 @@ /* cnic.c: QLogic CNIC core network driver. * * Copyright (c) 2006-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,11 +58,11 @@ #define CNIC_MODULE_NAME "cnic" static char version[] = - "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " "Chen (zongxi@broadcom.com"); -MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver"); +MODULE_DESCRIPTION("QLogic cnic Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(CNIC_MODULE_VERSION); diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8bb36c1c..ef6125b 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,7 +1,7 @@ -/* cnic_if.h: QLogic CNIC core network driver. +/* cnic_if.h: QLogic cnic core network driver. * * Copyright (c) 2006-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,8 +15,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.20" -#define CNIC_MODULE_RELDATE "March 14, 2014" +#define CNIC_MODULE_VERSION "2.5.21" +#define CNIC_MODULE_RELDATE "January 29, 2015" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6befde6..275be56 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -54,8 +54,10 @@ /* Default highest priority queue for multi queue support */ #define GENET_Q0_PRIORITY 0 -#define GENET_DEFAULT_BD_CNT \ - (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) #define RX_BUF_LENGTH 2048 #define SKB_ALIGNMENT 32 @@ -923,7 +925,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, tx_cb_ptr = ring->cbs; tx_cb_ptr += ring->write_ptr - ring->cb_ptr; - tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; + /* Advancing local write pointer */ if (ring->write_ptr == ring->end_ptr) ring->write_ptr = ring->cb_ptr; @@ -978,39 +980,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); - int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned int pkts_compl = 0; - unsigned int bds_compl; unsigned int c_index; + unsigned int txbds_ready; + unsigned int txbds_processed = 0; /* Compute how many buffers are transmitted since last xmit call */ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); - txq = netdev_get_tx_queue(dev, ring->queue); - - last_c_index = ring->c_index; - num_tx_bds = ring->size; + c_index &= DMA_C_INDEX_MASK; - c_index &= (num_tx_bds - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; + if (likely(c_index >= ring->c_index)) + txbds_ready = c_index - ring->c_index; else - last_tx_cn = num_tx_bds - last_c_index + c_index; + txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; netif_dbg(priv, tx_done, dev, - "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", - __func__, ring->index, - c_index, last_tx_cn, last_c_index); + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); /* Reclaim transmitted buffers */ - while (last_tx_cn-- > 0) { - tx_cb_ptr = ring->cbs + last_c_index; - bds_compl = 0; + while (txbds_processed < txbds_ready) { + tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; if (tx_cb_ptr->skb) { pkts_compl++; - bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; + dev->stats.tx_packets++; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), @@ -1026,20 +1021,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, DMA_TO_DEVICE); dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); } - dev->stats.tx_packets++; - ring->free_bds += bds_compl; - last_c_index++; - last_c_index &= (num_tx_bds - 1); + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; } + ring->free_bds += txbds_processed; + ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(dev, ring->queue); if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } - ring->c_index = c_index; - return pkts_compl; } @@ -1365,16 +1363,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) } dma_unmap_addr_set(cb, dma_addr, mapping); - /* assign packet, prepare descriptor, and advance pointer */ - - dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); - - /* turn on the newly assigned BD for DMA to use */ - priv->rx_bd_assign_index++; - priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); - - priv->rx_bd_assign_ptr = priv->rx_bds + - (priv->rx_bd_assign_index * DMA_DESC_SIZE); + dmadesc_set_addr(priv, cb->bd_addr, mapping); return 0; } @@ -1383,8 +1372,10 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) * this could be called from bottom half, or from NAPI polling method. */ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, + unsigned int index, unsigned int budget) { + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; struct net_device *dev = priv->dev; struct enet_cb *cb; struct sk_buff *skb; @@ -1395,21 +1386,21 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, unsigned int p_index; unsigned int chksum_ok = 0; - p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX); + p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX); p_index &= DMA_P_INDEX_MASK; - if (p_index < priv->rx_c_index) - rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - - priv->rx_c_index + p_index; + if (likely(p_index >= ring->c_index)) + rxpkttoprocess = p_index - ring->c_index; else - rxpkttoprocess = p_index - priv->rx_c_index; + rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + + p_index; netif_dbg(priv, rx_status, dev, "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { - cb = &priv->rx_cbs[priv->rx_read_ptr]; + cb = &priv->rx_cbs[ring->read_ptr]; skb = cb->skb; /* We do not have a backing SKB, so we do not have a @@ -1432,10 +1423,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, if (!priv->desc_64b_en) { dma_length_status = - dmadesc_get_length_status(priv, - priv->rx_bds + - (priv->rx_read_ptr * - DMA_DESC_SIZE)); + dmadesc_get_length_status(priv, cb->bd_addr); } else { struct status_64 *status; @@ -1451,8 +1439,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, netif_dbg(priv, rx_status, dev, "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", - __func__, p_index, priv->rx_c_index, - priv->rx_read_ptr, dma_length_status); + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, @@ -1530,25 +1518,31 @@ refill: } rxpktprocessed++; - priv->rx_read_ptr++; - priv->rx_read_ptr &= (priv->num_rx_bds - 1); + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, index, ring->c_index, RDMA_CONS_INDEX); } return rxpktprocessed; } /* Assign skb to RX DMA descriptor. */ -static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) { struct enet_cb *cb; int ret = 0; int i; - netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); /* loop here for each buffer needing assign */ - for (i = 0; i < priv->num_rx_bds; i++) { - cb = &priv->rx_cbs[priv->rx_bd_assign_index]; + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; if (cb->skb) continue; @@ -1710,17 +1704,14 @@ static int init_umac(struct bcmgenet_priv *priv) return 0; } -/* Initialize all house-keeping variables for a TX ring, along - * with corresponding hardware registers - */ +/* Initialize a Tx ring along with corresponding hardware registers */ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int index, unsigned int size, - unsigned int write_ptr, unsigned int end_ptr) + unsigned int start_ptr, unsigned int end_ptr) { struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); u32 flow_period_val = 0; - unsigned int first_bd; spin_lock_init(&ring->lock); ring->priv = priv; @@ -1735,12 +1726,13 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, ring->int_enable = bcmgenet_tx_ring_int_enable; ring->int_disable = bcmgenet_tx_ring_int_disable; } - ring->cbs = priv->tx_cbs + write_ptr; + ring->cbs = priv->tx_cbs + start_ptr; ring->size = size; + ring->clean_ptr = start_ptr; ring->c_index = 0; ring->free_bds = size; - ring->write_ptr = write_ptr; - ring->cb_ptr = write_ptr; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; ring->end_ptr = end_ptr - 1; ring->prod_index = 0; @@ -1754,19 +1746,16 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, /* Disable rate control for now */ bcmgenet_tdma_ring_writel(priv, index, flow_period_val, TDMA_FLOW_PERIOD); - /* Unclassified traffic goes to ring 16 */ bcmgenet_tdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - first_bd = write_ptr; - /* Set start and end address, read and write pointers */ - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, DMA_START_ADDR); - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_READ_PTR); - bcmgenet_tdma_ring_writel(priv, index, first_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); @@ -1785,118 +1774,176 @@ static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, /* Initialize a RDMA ring */ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, - unsigned int index, unsigned int size) + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) { + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); int ret; - priv->num_rx_bds = TOTAL_DESC; - priv->rx_bds = priv->base + priv->hw_params->rdma_offset; - priv->rx_bd_assign_ptr = priv->rx_bds; - priv->rx_bd_assign_index = 0; - priv->rx_c_index = 0; - priv->rx_read_ptr = 0; - priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), - GFP_KERNEL); - if (!priv->rx_cbs) - return -ENOMEM; + ring->index = index; + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; - ret = bcmgenet_alloc_rx_buffers(priv); - if (ret) { - kfree(priv->rx_cbs); + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) return ret; - } - bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); bcmgenet_rdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); - bcmgenet_rdma_ring_writel(priv, index, - words_per_bd * size - 1, DMA_END_ADDR); bcmgenet_rdma_ring_writel(priv, index, (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); - bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); return ret; } -/* init multi xmit queues, only available for GENET2+ - * the queue is partitioned as follows: +/* Initialize Tx queues * - * queue 0 - 3 is priority based, each one has 32 descriptors, + * Queues 0-3 are priority-based, each one has 32 descriptors, * with queue 0 being the highest priority queue. * - * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT - * descriptors: 256 - (number of tx queues * bds per queues) = 128 - * descriptors. + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. * - * The transmit control block pool is then partitioned as following: - * - tx_cbs[0...127] are for queue 16 - * - tx_ring_cbs[0] points to tx_cbs[128..159] - * - tx_ring_cbs[1] points to tx_cbs[160..191] - * - tx_ring_cbs[2] points to tx_cbs[192..223] - * - tx_ring_cbs[3] points to tx_cbs[224..255] + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] */ -static void bcmgenet_init_multiq(struct net_device *dev) +static void bcmgenet_init_tx_queues(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned int i, dma_enable; - u32 reg, dma_ctrl, ring_cfg = 0; + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; u32 dma_priority[3] = {0, 0, 0}; - if (!netif_is_multiqueue(dev)) { - netdev_warn(dev, "called with non multi queue aware HW\n"); - return; - } - dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); dma_enable = dma_ctrl & DMA_EN; dma_ctrl &= ~DMA_EN; bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + dma_ctrl = 0; + ring_cfg = 0; + /* Enable strict priority arbiter mode */ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + /* Initialize Tx priority queues */ for (i = 0; i < priv->hw_params->tx_queues; i++) { - /* first 64 tx_cbs are reserved for default tx queue - * (ring 16) - */ - bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, - i * priv->hw_params->bds_cnt, - (i + 1) * priv->hw_params->bds_cnt); - - /* Configure ring as descriptor ring and setup priority */ - ring_cfg |= 1 << i; - dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); - + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(i)] |= ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); } - /* Set ring 16 priority and program the hardware registers */ + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + /* Enable rings */ - reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); - reg |= ring_cfg; - bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); /* Configure ring as descriptor ring and re-enable DMA if enabled */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg |= dma_ctrl; if (dma_enable) - reg |= DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; } static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) @@ -1985,19 +2032,36 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) static int bcmgenet_init_dma(struct bcmgenet_priv *priv) { int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Init rDma */ + bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; - netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } - /* by default, enable ring 16 (descriptor based) */ - ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); if (ret) { - netdev_err(priv->dev, "failed to initialize RX ring\n"); + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); return ret; } - /* init rDma */ - bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); - /* Init tDma */ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); @@ -2011,14 +2075,13 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) return -ENOMEM; } - /* initialize multi xmit queue */ - bcmgenet_init_multiq(priv->dev); + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } - /* initialize special ring 16 */ - bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, - priv->hw_params->tx_queues * - priv->hw_params->bds_cnt, - TOTAL_DESC); + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); return 0; } @@ -2030,13 +2093,8 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) struct bcmgenet_priv, napi); unsigned int work_done; - work_done = bcmgenet_desc_rx(priv, budget); + work_done = bcmgenet_desc_rx(priv, DESC_INDEX, budget); - /* Advancing our consumer index*/ - priv->rx_c_index += work_done; - priv->rx_c_index &= DMA_C_INDEX_MASK; - bcmgenet_rdma_ring_writel(priv, DESC_INDEX, - priv->rx_c_index, RDMA_CONS_INDEX); if (work_done < budget) { napi_complete(napi); bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, @@ -2499,8 +2557,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = { static struct bcmgenet_hw_params bcmgenet_hw_params[] = { [GENET_V1] = { .tx_queues = 0, + .tx_bds_per_q = 0, .rx_queues = 0, - .bds_cnt = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2512,8 +2571,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V2] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2528,8 +2588,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V3] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, @@ -2544,8 +2605,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V4] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, @@ -2645,14 +2707,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) #endif pr_debug("Configuration for version: %d\n" - "TXq: %1d, RXq: %1d, BDs: %1d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" "BP << en: %2d, BP msk: 0x%05x\n" "HFB count: %2d, QTAQ msk: 0x%05x\n" "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" "RDMA: 0x%05x, TDMA: 0x%05x\n" "Words/BD: %d\n", priv->version, - params->tx_queues, params->rx_queues, params->bds_cnt, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, params->bp_in_en_shift, params->bp_in_mask, params->hfb_filter_cnt, params->qtag_mask, params->tbuf_offset, params->hfb_offset, @@ -2680,8 +2743,9 @@ static int bcmgenet_probe(struct platform_device *pdev) struct resource *r; int err = -EIO; - /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ - dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); if (!dev) { dev_err(&pdev->dev, "can't allocate net device\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0d370d1..17443db 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -503,8 +503,9 @@ enum bcmgenet_version { */ struct bcmgenet_hw_params { u8 tx_queues; + u8 tx_bds_per_q; u8 rx_queues; - u8 bds_cnt; + u8 rx_bds_per_q; u8 bp_in_en_shift; u32 bp_in_mask; u8 hfb_filter_cnt; @@ -525,6 +526,7 @@ struct bcmgenet_tx_ring { unsigned int queue; /* queue index */ struct enet_cb *cbs; /* tx ring buffer control block*/ unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ unsigned int c_index; /* last consumer index of each ring*/ unsigned int free_bds; /* # of free bds for each ring */ unsigned int write_ptr; /* Tx ring write pointer SW copy */ @@ -538,6 +540,16 @@ struct bcmgenet_tx_ring { struct bcmgenet_priv *priv; }; +struct bcmgenet_rx_ring { + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ +}; + /* device context */ struct bcmgenet_priv { void __iomem *base; @@ -558,13 +570,11 @@ struct bcmgenet_priv { /* receive variables */ void __iomem *rx_bds; - void __iomem *rx_bd_assign_ptr; - int rx_bd_assign_index; struct enet_cb *rx_cbs; unsigned int num_rx_bds; unsigned int rx_buf_len; - unsigned int rx_read_ptr; - unsigned int rx_c_index; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; /* other misc variables */ struct bcmgenet_hw_params *hw_params; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 23a019c..22b33da 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7244,7 +7244,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) if (tnapi == &tp->napi[1] && tp->rx_refill) continue; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Reenable interrupts. */ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); @@ -7337,7 +7337,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) sblk->status &= ~SD_STATUS_UPDATED; if (likely(!tg3_has_work(tnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); tg3_int_reenable(tnapi); break; } diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig index 2641557..4e8c0b6 100644 --- a/drivers/net/ethernet/brocade/Kconfig +++ b/drivers/net/ethernet/brocade/Kconfig @@ -1,9 +1,9 @@ # -# Brocade device configuration +# QLogic BR-series device configuration # config NET_VENDOR_BROCADE - bool "Brocade devices" + bool "QLogic BR-series devices" default y depends on PCI ---help--- @@ -13,8 +13,8 @@ config NET_VENDOR_BROCADE Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Brocade cards. If you say Y, you will be asked for - your specific card in the following questions. + the questions about QLogic BR-series cards. If you say Y, you will be + asked for your specific card in the following questions. if NET_VENDOR_BROCADE diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile index b58238d..fec10f9 100644 --- a/drivers/net/ethernet/brocade/Makefile +++ b/drivers/net/ethernet/brocade/Makefile @@ -1,5 +1,5 @@ # -# Makefile for the Brocade device drivers. +# Makefile for the QLogic BR-series device drivers. # obj-$(CONFIG_BNA) += bna/ diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig index dc2eb52..fe01279 100644 --- a/drivers/net/ethernet/brocade/bna/Kconfig +++ b/drivers/net/ethernet/brocade/bna/Kconfig @@ -1,17 +1,17 @@ # -# Brocade network device configuration +# QLogic BR-series network device configuration # config BNA - tristate "Brocade 1010/1020 10Gb Ethernet Driver support" + tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support" depends on PCI ---help--- - This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet - cards. + This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable + Ethernet cards. To compile this driver as a module, choose M here: the module will be called bna. - For general information and support, go to the Brocade support + For general information and support, go to the QLogic support website at: - <http://support.brocade.com> + <http://support.qlogic.com> diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile index 6027302..6e10b99 100644 --- a/drivers/net/ethernet/brocade/bna/Makefile +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -1,5 +1,6 @@ # -# Copyright (c) 2005-2010 Brocade Communications Systems, Inc. +# Copyright (c) 2005-2014 Brocade Communications Systems, Inc. +# Copyright (c) 2014-2015 QLogic Corporation. # All rights reserved. # diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 550d252..cf9f395 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_cee.h" diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h index 93fde63..d04eef5d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_CEE_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index ad004a4..af25d8e 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* BFA common services */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index b7d8127..3bfd9da 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h index b39c5f2..a37326d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_CNA_H__ #define __BFA_DEFS_CNA_H__ @@ -134,7 +135,7 @@ struct bfa_cee_lldp_str { u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; }; -/* LLDP paramters */ +/* LLDP parameters */ struct bfa_cee_lldp_cfg { struct bfa_cee_lldp_str chassis_id; struct bfa_cee_lldp_str port_id; diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 7fb396f..7a45cd0 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_MFG_COMM_H__ #define __BFA_DEFS_MFG_COMM_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h index ea9af9a..a43b560 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_STATUS_H__ #define __BFA_DEFS_STATUS_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 354ae97..594a2ab 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_ioc.h" @@ -1339,7 +1340,7 @@ bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, return true; } -/* Returns TRUE if major minor and maintainence are same. +/* Returns TRUE if major minor and maintenance are same. * If patch version are same, check for MD5 Checksum to be same. */ static bool @@ -2763,7 +2764,7 @@ bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, list_add_tail(¬ify->qe, &ioc->notify_q); } -#define BFA_MFG_NAME "Brocade" +#define BFA_MFG_NAME "QLogic" static void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, struct bfa_adapter_attr *ad_attr) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 20cff7d..effb715 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_IOC_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index d639558..2e72445 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_ioc.h" @@ -698,7 +699,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb) /* * Ignore mode and program for the max clock (which is FC16) - * Firmware/NFC will do the PLL init appropiately + * Firmware/NFC will do the PLL init appropriately */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c index 55067d0..c07d5b9 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* MSGQ module source file. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h index a6a565a..66bc8b5 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_MSGQ_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 8c563a7..2bcde40 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFI_H__ #define __BFI_H__ @@ -158,8 +159,8 @@ enum bfi_asic_gen { }; enum bfi_asic_mode { - BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */ - BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */ + BFI_ASIC_MODE_FC = 1, /* FC up to 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC up to 16G speed */ BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ }; diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h index 6704a43..bd605be 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFI_CNA_H__ #define __BFI_CNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index ae072dc..bccca3b 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* BNA Hardware and Firmware Interface */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index c49fa31..2835b51 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,13 +11,14 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* - * bfi_reg.h ASIC register defines for all Brocade adapter ASICs + * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs */ #ifndef __BFI_REG_H__ @@ -221,7 +222,7 @@ enum { #define __PMM_1T_RESET_P 0x00000001 #define PMM_1T_RESET_REG_P1 0x00023c1c -/* Brocade 1860 Adapter specific defines */ +/* QLogic BR-series 1860 Adapter specific defines */ #define CT2_PCI_CPQ_BASE 0x00030000 #define CT2_PCI_APP_BASE 0x00030100 #define CT2_PCI_ETH_BASE 0x00030400 @@ -264,7 +265,7 @@ enum { #define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) /* - * Brocade 1860 adapter CPQ block registers + * QLogic BR-series 1860 adapter CPQ block registers */ #define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) #define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 1f51219..8ba72b1 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNA_H__ #define __BNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 903466e..deb8da6 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bna.h" diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 2702d02..174af0e 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* File for interrupt macros and functions */ @@ -362,7 +363,7 @@ struct bna_txq_wi_vector { /* TxQ Entry Structure * - * BEWARE: Load values into this structure with correct endianess. + * BEWARE: Load values into this structure with correct endianness. */ struct bna_txq_entry { union { diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5fac411..8ab3a5f 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bna.h" #include "bfi.h" diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index 621547c..d0a7a56 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNA_TYPES_H__ #define __BNA_TYPES_H__ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7714d77..37072a8 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/bitops.h> #include <linux/netdevice.h> @@ -3867,7 +3868,7 @@ bnad_module_init(void) { int err; - pr_info("Brocade 10G Ethernet driver - version: %s\n", + pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", BNAD_VERSION); bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); @@ -3894,7 +3895,7 @@ module_exit(bnad_module_exit); MODULE_AUTHOR("Brocade"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); +MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); MODULE_VERSION(BNAD_VERSION); MODULE_FIRMWARE(CNA_FW_FILE_CT); MODULE_FIRMWARE(CNA_FW_FILE_CT2); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 2842c18..7ead6c2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNAD_H__ #define __BNAD_H__ @@ -71,7 +72,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.2.23.0" +#define BNAD_VERSION "3.2.25.1" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 619083a..72c8955 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/debugfs.h> diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index d26adac..12f344d 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "cna.h" diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index b3ff6d5..28e7d0f 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2006-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2006-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __CNA_H__ @@ -37,8 +38,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin" -#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index 6f72771..ebf462d 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/firmware.h> #include "bnad.h" diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 321d2ad..1ba3e3a 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -4,7 +4,7 @@ config NET_CADENCE bool "Cadence devices" - depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST) + depends on HAS_IOMEM default y ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -20,17 +20,9 @@ config NET_CADENCE if NET_CADENCE -config ARM_AT91_ETHER - tristate "AT91RM9200 Ethernet support" - depends on HAS_DMA && (ARCH_AT91 || COMPILE_TEST) - select MACB - ---help--- - If you wish to compile a kernel for the AT91RM9200 and enable - ethernet support, then you should always answer Y to this. - config MACB tristate "Cadence MACB/GEM support" - depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST) + depends on HAS_DMA select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile index 9068b83..91f79b1 100644 --- a/drivers/net/ethernet/cadence/Makefile +++ b/drivers/net/ethernet/cadence/Makefile @@ -2,5 +2,4 @@ # Makefile for the Atmel network device drivers. # -obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o obj-$(CONFIG_MACB) += macb.o diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c deleted file mode 100644 index 7ef55f5..0000000 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ /dev/null @@ -1,481 +0,0 @@ -/* - * Ethernet driver for the Atmel AT91RM9200 (Thunder) - * - * Copyright (C) 2003 SAN People (Pty) Ltd - * - * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. - * Initial version by Rick Bronson 01/11/2003 - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <linux/ethtool.h> -#include <linux/platform_data/macb.h> -#include <linux/platform_device.h> -#include <linux/clk.h> -#include <linux/gfp.h> -#include <linux/phy.h> -#include <linux/io.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_net.h> - -#include "macb.h" - -/* 1518 rounded up */ -#define MAX_RBUFF_SZ 0x600 -/* max number of receive buffers */ -#define MAX_RX_DESCR 9 - -/* Initialize and start the Receiver and Transmit subsystems */ -static int at91ether_start(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - dma_addr_t addr; - u32 ctl; - int i; - - lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, - (MAX_RX_DESCR * - sizeof(struct macb_dma_desc)), - &lp->rx_ring_dma, GFP_KERNEL); - if (!lp->rx_ring) - return -ENOMEM; - - lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, - MAX_RX_DESCR * MAX_RBUFF_SZ, - &lp->rx_buffers_dma, GFP_KERNEL); - if (!lp->rx_buffers) { - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * sizeof(struct macb_dma_desc), - lp->rx_ring, lp->rx_ring_dma); - lp->rx_ring = NULL; - return -ENOMEM; - } - - addr = lp->rx_buffers_dma; - for (i = 0; i < MAX_RX_DESCR; i++) { - lp->rx_ring[i].addr = addr; - lp->rx_ring[i].ctrl = 0; - addr += MAX_RBUFF_SZ; - } - - /* Set the Wrap bit on the last descriptor */ - lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); - - /* Reset buffer index */ - lp->rx_tail = 0; - - /* Program address of descriptor list in Rx Buffer Queue register */ - macb_writel(lp, RBQP, lp->rx_ring_dma); - - /* Enable Receive and Transmit */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); - - return 0; -} - -/* Open the ethernet interface */ -static int at91ether_open(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - u32 ctl; - int ret; - - /* Clear internal statistics */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); - - macb_set_hwaddr(lp); - - ret = at91ether_start(dev); - if (ret) - return ret; - - /* Enable MAC interrupts */ - macb_writel(lp, IER, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); - - /* schedule a link state check */ - phy_start(lp->phy_dev); - - netif_start_queue(dev); - - return 0; -} - -/* Close the interface */ -static int at91ether_close(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - u32 ctl; - - /* Disable Receiver and Transmitter */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); - - /* Disable MAC interrupts */ - macb_writel(lp, IDR, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); - - netif_stop_queue(dev); - - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * sizeof(struct macb_dma_desc), - lp->rx_ring, lp->rx_ring_dma); - lp->rx_ring = NULL; - - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * MAX_RBUFF_SZ, - lp->rx_buffers, lp->rx_buffers_dma); - lp->rx_buffers = NULL; - - return 0; -} - -/* Transmit packet */ -static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - - if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { - netif_stop_queue(dev); - - /* Store packet information (to free when Tx completed) */ - lp->skb = skb; - lp->skb_length = skb->len; - lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, - DMA_TO_DEVICE); - - /* Set address of the data in the Transmit Address register */ - macb_writel(lp, TAR, lp->skb_physaddr); - /* Set length of the packet in the Transmit Control register */ - macb_writel(lp, TCR, skb->len); - - } else { - netdev_err(dev, "%s called, but device is busy!\n", __func__); - return NETDEV_TX_BUSY; - } - - return NETDEV_TX_OK; -} - -/* Extract received frame from buffer descriptors and sent to upper layers. - * (Called from interrupt context) - */ -static void at91ether_rx(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - unsigned char *p_recv; - struct sk_buff *skb; - unsigned int pktlen; - - while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { - p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ; - pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); - skb = netdev_alloc_skb(dev, pktlen + 2); - if (skb) { - skb_reserve(skb, 2); - memcpy(skb_put(skb, pktlen), p_recv, pktlen); - - skb->protocol = eth_type_trans(skb, dev); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pktlen; - netif_rx(skb); - } else { - lp->stats.rx_dropped++; - } - - if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) - lp->stats.multicast++; - - /* reset ownership bit */ - lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); - - /* wrap after last buffer */ - if (lp->rx_tail == MAX_RX_DESCR - 1) - lp->rx_tail = 0; - else - lp->rx_tail++; - } -} - -/* MAC interrupt handler */ -static irqreturn_t at91ether_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct macb *lp = netdev_priv(dev); - u32 intstatus, ctl; - - /* MAC Interrupt Status register indicates what interrupts are pending. - * It is automatically cleared once read. - */ - intstatus = macb_readl(lp, ISR); - - /* Receive complete */ - if (intstatus & MACB_BIT(RCOMP)) - at91ether_rx(dev); - - /* Transmit complete */ - if (intstatus & MACB_BIT(TCOMP)) { - /* The TCOM bit is set even if the transmission failed */ - if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) - lp->stats.tx_errors++; - - if (lp->skb) { - dev_kfree_skb_irq(lp->skb); - lp->skb = NULL; - dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); - lp->stats.tx_packets++; - lp->stats.tx_bytes += lp->skb_length; - } - netif_wake_queue(dev); - } - - /* Work-around for EMAC Errata section 41.3.1 */ - if (intstatus & MACB_BIT(RXUBR)) { - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); - macb_writel(lp, NCR, ctl | MACB_BIT(RE)); - } - - if (intstatus & MACB_BIT(ISR_ROVR)) - netdev_err(dev, "ROVR error\n"); - - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void at91ether_poll_controller(struct net_device *dev) -{ - unsigned long flags; - - local_irq_save(flags); - at91ether_interrupt(dev->irq, dev); - local_irq_restore(flags); -} -#endif - -static const struct net_device_ops at91ether_netdev_ops = { - .ndo_open = at91ether_open, - .ndo_stop = at91ether_close, - .ndo_start_xmit = at91ether_start_xmit, - .ndo_get_stats = macb_get_stats, - .ndo_set_rx_mode = macb_set_rx_mode, - .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = macb_ioctl, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = at91ether_poll_controller, -#endif -}; - -#if defined(CONFIG_OF) -static const struct of_device_id at91ether_dt_ids[] = { - { .compatible = "cdns,at91rm9200-emac" }, - { .compatible = "cdns,emac" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, at91ether_dt_ids); -#endif - -/* Detect MAC & PHY and perform ethernet interface initialization */ -static int __init at91ether_probe(struct platform_device *pdev) -{ - struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev); - struct resource *regs; - struct net_device *dev; - struct phy_device *phydev; - struct macb *lp; - int res; - u32 reg; - const char *mac; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) - return -ENOENT; - - dev = alloc_etherdev(sizeof(struct macb)); - if (!dev) - return -ENOMEM; - - lp = netdev_priv(dev); - lp->pdev = pdev; - lp->dev = dev; - spin_lock_init(&lp->lock); - - /* physical base address */ - dev->base_addr = regs->start; - lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); - if (!lp->regs) { - res = -ENOMEM; - goto err_free_dev; - } - - /* Clock */ - lp->pclk = devm_clk_get(&pdev->dev, "ether_clk"); - if (IS_ERR(lp->pclk)) { - res = PTR_ERR(lp->pclk); - goto err_free_dev; - } - clk_prepare_enable(lp->pclk); - - lp->hclk = ERR_PTR(-ENOENT); - lp->tx_clk = ERR_PTR(-ENOENT); - - /* Install the interrupt handler */ - dev->irq = platform_get_irq(pdev, 0); - res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev); - if (res) - goto err_disable_clock; - - dev->netdev_ops = &at91ether_netdev_ops; - dev->ethtool_ops = &macb_ethtool_ops; - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - mac = of_get_mac_address(pdev->dev.of_node); - if (mac) - memcpy(lp->dev->dev_addr, mac, ETH_ALEN); - else - macb_get_hwaddr(lp); - - res = of_get_phy_mode(pdev->dev.of_node); - if (res < 0) { - if (board_data && board_data->is_rmii) - lp->phy_interface = PHY_INTERFACE_MODE_RMII; - else - lp->phy_interface = PHY_INTERFACE_MODE_MII; - } else { - lp->phy_interface = res; - } - - macb_writel(lp, NCR, 0); - - reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); - if (lp->phy_interface == PHY_INTERFACE_MODE_RMII) - reg |= MACB_BIT(RM9200_RMII); - - macb_writel(lp, NCFGR, reg); - - /* Register the network interface */ - res = register_netdev(dev); - if (res) - goto err_disable_clock; - - res = macb_mii_init(lp); - if (res) - goto err_out_unregister_netdev; - - /* will be enabled in open() */ - netif_carrier_off(dev); - - phydev = lp->phy_dev; - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), - phydev->irq); - - /* Display ethernet banner */ - netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n", - dev->base_addr, dev->irq, dev->dev_addr); - - return 0; - -err_out_unregister_netdev: - unregister_netdev(dev); -err_disable_clock: - clk_disable_unprepare(lp->pclk); -err_free_dev: - free_netdev(dev); - return res; -} - -static int at91ether_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct macb *lp = netdev_priv(dev); - - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - - mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); - mdiobus_free(lp->mii_bus); - unregister_netdev(dev); - clk_disable_unprepare(lp->pclk); - free_netdev(dev); - - return 0; -} - -#ifdef CONFIG_PM -static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - struct net_device *net_dev = platform_get_drvdata(pdev); - struct macb *lp = netdev_priv(net_dev); - - if (netif_running(net_dev)) { - netif_stop_queue(net_dev); - netif_device_detach(net_dev); - - clk_disable_unprepare(lp->pclk); - } - return 0; -} - -static int at91ether_resume(struct platform_device *pdev) -{ - struct net_device *net_dev = platform_get_drvdata(pdev); - struct macb *lp = netdev_priv(net_dev); - - if (netif_running(net_dev)) { - clk_prepare_enable(lp->pclk); - - netif_device_attach(net_dev); - netif_start_queue(net_dev); - } - return 0; -} -#else -#define at91ether_suspend NULL -#define at91ether_resume NULL -#endif - -static struct platform_driver at91ether_driver = { - .remove = at91ether_remove, - .suspend = at91ether_suspend, - .resume = at91ether_resume, - .driver = { - .name = "at91_ether", - .of_match_table = of_match_ptr(at91ether_dt_ids), - }, -}; - -module_platform_driver_probe(at91ether_driver, at91ether_probe); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); -MODULE_AUTHOR("Andrew Victor"); -MODULE_ALIAS("platform:at91_ether"); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 81d4153..f032e2a 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -102,7 +102,7 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index) return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); } -void macb_set_hwaddr(struct macb *bp) +static void macb_set_hwaddr(struct macb *bp) { u32 bottom; u16 top; @@ -120,9 +120,8 @@ void macb_set_hwaddr(struct macb *bp) macb_or_gem_writel(bp, SA4B, 0); macb_or_gem_writel(bp, SA4T, 0); } -EXPORT_SYMBOL_GPL(macb_set_hwaddr); -void macb_get_hwaddr(struct macb *bp) +static void macb_get_hwaddr(struct macb *bp) { struct macb_platform_data *pdata; u32 bottom; @@ -162,7 +161,6 @@ void macb_get_hwaddr(struct macb *bp) netdev_info(bp->dev, "invalid hw address, using random\n"); eth_hw_addr_random(bp->dev); } -EXPORT_SYMBOL_GPL(macb_get_hwaddr); static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { @@ -213,6 +211,9 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) { long ferr, rate, rate_rounded; + if (!clk) + return; + switch (speed) { case SPEED_10: rate = 2500000; @@ -292,8 +293,7 @@ static void macb_handle_link_change(struct net_device *dev) spin_unlock_irqrestore(&bp->lock, flags); - if (!IS_ERR(bp->tx_clk)) - macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); + macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); if (status_change) { if (phydev->link) { @@ -357,7 +357,7 @@ static int macb_mii_probe(struct net_device *dev) return 0; } -int macb_mii_init(struct macb *bp) +static int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; struct device_node *np; @@ -438,7 +438,6 @@ err_out_free_mdiobus: err_out: return err; } -EXPORT_SYMBOL_GPL(macb_mii_init); static void macb_update_stats(struct macb *bp) { @@ -449,7 +448,7 @@ static void macb_update_stats(struct macb *bp) WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); for(; p < end; p++, reg++) - *p += __raw_readl(reg); + *p += readl_relaxed(reg); } static int macb_halt_tx(struct macb *bp) @@ -1578,6 +1577,7 @@ static u32 macb_dbw(struct macb *bp) static void macb_configure_dma(struct macb *bp) { u32 dmacfg; + u32 tmp, ncr; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); @@ -1585,7 +1585,24 @@ static void macb_configure_dma(struct macb *bp) if (bp->dma_burst_length) dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); - dmacfg &= ~GEM_BIT(ENDIA); + dmacfg &= ~GEM_BIT(ENDIA_PKT); + + /* Find the CPU endianness by using the loopback bit of net_ctrl + * register. save it first. When the CPU is in big endian we + * need to program swaped mode for management descriptor access. + */ + ncr = macb_readl(bp, NCR); + __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); + tmp = __raw_readl(bp->regs + MACB_NCR); + + if (tmp == MACB_BIT(LLB)) + dmacfg &= ~GEM_BIT(ENDIA_DESC); + else + dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ + + /* Restore net_ctrl */ + macb_writel(bp, NCR, ncr); + if (bp->dev->features & NETIF_F_HW_CSUM) dmacfg |= GEM_BIT(TXCOEN); else @@ -1723,7 +1740,7 @@ static void macb_sethashtable(struct net_device *dev) /* * Enable/Disable promiscuous and multicast modes. */ -void macb_set_rx_mode(struct net_device *dev) +static void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; struct macb *bp = netdev_priv(dev); @@ -1764,7 +1781,6 @@ void macb_set_rx_mode(struct net_device *dev) macb_writel(bp, NCFGR, cfg); } -EXPORT_SYMBOL_GPL(macb_set_rx_mode); static int macb_open(struct net_device *dev) { @@ -1832,14 +1848,14 @@ static void gem_update_stats(struct macb *bp) for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { u32 offset = gem_statistics[i].offset; - u64 val = __raw_readl(bp->regs + offset); + u64 val = readl_relaxed(bp->regs + offset); bp->ethtool_stats[i] += val; *p += val; if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { /* Add GEM_OCTTXH, GEM_OCTRXH */ - val = __raw_readl(bp->regs + offset + 4); + val = readl_relaxed(bp->regs + offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; *(++p) += val; } @@ -1917,7 +1933,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) } } -struct net_device_stats *macb_get_stats(struct net_device *dev) +static struct net_device_stats *macb_get_stats(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct net_device_stats *nstat = &bp->stats; @@ -1963,7 +1979,6 @@ struct net_device_stats *macb_get_stats(struct net_device *dev) return nstat; } -EXPORT_SYMBOL_GPL(macb_get_stats); static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -2025,7 +2040,7 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } -const struct ethtool_ops macb_ethtool_ops = { +static const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, @@ -2033,7 +2048,6 @@ const struct ethtool_ops macb_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, }; -EXPORT_SYMBOL_GPL(macb_ethtool_ops); static const struct ethtool_ops gem_ethtool_ops = { .get_settings = macb_get_settings, @@ -2047,7 +2061,7 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_sset_count = gem_get_sset_count, }; -int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; @@ -2060,7 +2074,6 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(phydev, rq, cmd); } -EXPORT_SYMBOL_GPL(macb_ioctl); static int macb_set_features(struct net_device *netdev, netdev_features_t features) @@ -2112,44 +2125,15 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_set_features = macb_set_features, }; -#if defined(CONFIG_OF) -static const struct macb_config pc302gem_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, - .dma_burst_length = 16, -}; - -static const struct macb_config sama5d3_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, - .dma_burst_length = 16, -}; - -static const struct macb_config sama5d4_config = { - .caps = 0, - .dma_burst_length = 4, -}; - -static const struct of_device_id macb_dt_ids[] = { - { .compatible = "cdns,at32ap7000-macb" }, - { .compatible = "cdns,at91sam9260-macb" }, - { .compatible = "cdns,macb" }, - { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, - { .compatible = "cdns,gem", .data = &pc302gem_config }, - { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, - { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, macb_dt_ids); -#endif - /* * Configure peripheral capacities according to device tree * and integration options used */ static void macb_configure_caps(struct macb *bp) { - u32 dcfg; const struct of_device_id *match; const struct macb_config *config; + u32 dcfg; if (bp->pdev->dev.of_node) { match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); @@ -2191,12 +2175,14 @@ static void macb_probe_queues(void __iomem *mem, *num_queues = 1; /* is it macb or gem ? */ - mid = __raw_readl(mem + MACB_MID); - if (MACB_BFEXT(IDNUM, mid) != 0x2) + mid = readl_relaxed(mem + MACB_MID); + + if (MACB_BFEXT(IDNUM, mid) < 0x2) return; /* bit 0 is never set but queue 0 always exists */ - *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff; + *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; + *queue_mask |= 0x1; for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) @@ -2204,93 +2190,57 @@ static void macb_probe_queues(void __iomem *mem, (*num_queues)++; } -static int macb_probe(struct platform_device *pdev) +static int macb_init(struct platform_device *pdev) { - struct macb_platform_data *pdata; - struct resource *regs; - struct net_device *dev; - struct macb *bp; - struct macb_queue *queue; - struct phy_device *phydev; - u32 config; - int err = -ENXIO; - const char *mac; - void __iomem *mem; + struct net_device *dev = platform_get_drvdata(pdev); unsigned int hw_q, queue_mask, q, num_queues; - struct clk *pclk, *hclk, *tx_clk; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "no mmio resource defined\n"); - goto err_out; - } + struct macb *bp = netdev_priv(dev); + struct macb_queue *queue; + int err; + u32 val; - pclk = devm_clk_get(&pdev->dev, "pclk"); - if (IS_ERR(pclk)) { - err = PTR_ERR(pclk); + bp->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(bp->pclk)) { + err = PTR_ERR(bp->pclk); dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); - goto err_out; + return err; } - hclk = devm_clk_get(&pdev->dev, "hclk"); - if (IS_ERR(hclk)) { - err = PTR_ERR(hclk); + bp->hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(bp->hclk)) { + err = PTR_ERR(bp->hclk); dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); - goto err_out; + return err; } - tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + if (IS_ERR(bp->tx_clk)) + bp->tx_clk = NULL; - err = clk_prepare_enable(pclk); + err = clk_prepare_enable(bp->pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); - goto err_out; + return err; } - err = clk_prepare_enable(hclk); + err = clk_prepare_enable(bp->hclk); if (err) { dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); - goto err_out_disable_pclk; - } - - if (!IS_ERR(tx_clk)) { - err = clk_prepare_enable(tx_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", - err); - goto err_out_disable_hclk; - } + goto err_disable_pclk; } - err = -ENOMEM; - mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); - if (!mem) { - dev_err(&pdev->dev, "failed to map registers, aborting.\n"); - goto err_out_disable_clocks; + err = clk_prepare_enable(bp->tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_hclk; } - macb_probe_queues(mem, &queue_mask, &num_queues); - dev = alloc_etherdev_mq(sizeof(*bp), num_queues); - if (!dev) - goto err_out_disable_clocks; - - SET_NETDEV_DEV(dev, &pdev->dev); - - bp = netdev_priv(dev); - bp->pdev = pdev; - bp->dev = dev; - bp->regs = mem; - bp->num_queues = num_queues; - bp->pclk = pclk; - bp->hclk = hclk; - bp->tx_clk = tx_clk; - - spin_lock_init(&bp->lock); - /* set the queue register mapping once for all: queue0 has a special * register mapping but we don't want to test the queue index then * compute the corresponding register offset at run time. */ + macb_probe_queues(bp->regs, &queue_mask, &num_queues); + for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { if (!(queue_mask & (1 << hw_q))) continue; @@ -2319,27 +2269,21 @@ static int macb_probe(struct platform_device *pdev) */ queue->irq = platform_get_irq(pdev, q); err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, - 0, dev->name, queue); + IRQF_SHARED, dev->name, queue); if (err) { dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", queue->irq, err); - goto err_out_free_netdev; + goto err_disable_tx_clk; } INIT_WORK(&queue->tx_error_task, macb_tx_error_task); q++; } - dev->irq = bp->queues[0].irq; dev->netdev_ops = &macb_netdev_ops; netif_napi_add(dev, &bp->napi, macb_poll, 64); - dev->base_addr = regs->start; - - /* setup capacities */ - macb_configure_caps(bp); - /* setup appropriated routines according to adapter type */ if (macb_is_gem(bp)) { bp->max_tx_length = GEM_MAX_TX_LEN; @@ -2366,18 +2310,439 @@ static int macb_probe(struct platform_device *pdev) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; + val = 0; + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + val = GEM_BIT(RGMII); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(RMII); + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(MII); + + if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) + val |= MACB_BIT(CLKEN); + + macb_or_gem_writel(bp, USRIO, val); + + /* setup capacities */ + macb_configure_caps(bp); + /* Set MII management clock divider */ - config = macb_mdc_clk_div(bp); - config |= macb_dbw(bp); - macb_writel(bp, NCFGR, config); + val = macb_mdc_clk_div(bp); + val |= macb_dbw(bp); + macb_writel(bp, NCFGR, val); + + return 0; + +err_disable_tx_clk: + clk_disable_unprepare(bp->tx_clk); + +err_disable_hclk: + clk_disable_unprepare(bp->hclk); + +err_disable_pclk: + clk_disable_unprepare(bp->pclk); + + return err; +} + +#if defined(CONFIG_OF) +/* 1518 rounded up */ +#define AT91ETHER_MAX_RBUFF_SZ 0x600 +/* max number of receive buffers */ +#define AT91ETHER_MAX_RX_DESCR 9 + +/* Initialize and start the Receiver and Transmit subsystems */ +static int at91ether_start(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + dma_addr_t addr; + u32 ctl; + int i; + + lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, + (AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc)), + &lp->rx_ring_dma, GFP_KERNEL); + if (!lp->rx_ring) + return -ENOMEM; + + lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + AT91ETHER_MAX_RBUFF_SZ, + &lp->rx_buffers_dma, GFP_KERNEL); + if (!lp->rx_buffers) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + return -ENOMEM; + } + + addr = lp->rx_buffers_dma; + for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { + lp->rx_ring[i].addr = addr; + lp->rx_ring[i].ctrl = 0; + addr += AT91ETHER_MAX_RBUFF_SZ; + } + + /* Set the Wrap bit on the last descriptor */ + lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); + + /* Reset buffer index */ + lp->rx_tail = 0; + + /* Program address of descriptor list in Rx Buffer Queue register */ + macb_writel(lp, RBQP, lp->rx_ring_dma); + + /* Enable Receive and Transmit */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); + + return 0; +} + +/* Open the ethernet interface */ +static int at91ether_open(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + u32 ctl; + int ret; + + /* Clear internal statistics */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); + + macb_set_hwaddr(lp); + + ret = at91ether_start(dev); + if (ret) + return ret; + + /* Enable MAC interrupts */ + macb_writel(lp, IER, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + /* schedule a link state check */ + phy_start(lp->phy_dev); + + netif_start_queue(dev); + + return 0; +} + +/* Close the interface */ +static int at91ether_close(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + u32 ctl; + + /* Disable Receiver and Transmitter */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); + + /* Disable MAC interrupts */ + macb_writel(lp, IDR, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + netif_stop_queue(dev); + + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, + lp->rx_buffers, lp->rx_buffers_dma); + lp->rx_buffers = NULL; + + return 0; +} + +/* Transmit packet */ +static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + + if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { + netif_stop_queue(dev); + + /* Store packet information (to free when Tx completed) */ + lp->skb = skb; + lp->skb_length = skb->len; + lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, + DMA_TO_DEVICE); + + /* Set address of the data in the Transmit Address register */ + macb_writel(lp, TAR, lp->skb_physaddr); + /* Set length of the packet in the Transmit Control register */ + macb_writel(lp, TCR, skb->len); - mac = of_get_mac_address(pdev->dev.of_node); + } else { + netdev_err(dev, "%s called, but device is busy!\n", __func__); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +/* Extract received frame from buffer descriptors and sent to upper layers. + * (Called from interrupt context) + */ +static void at91ether_rx(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + unsigned char *p_recv; + struct sk_buff *skb; + unsigned int pktlen; + + while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { + p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; + pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); + skb = netdev_alloc_skb(dev, pktlen + 2); + if (skb) { + skb_reserve(skb, 2); + memcpy(skb_put(skb, pktlen), p_recv, pktlen); + + skb->protocol = eth_type_trans(skb, dev); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pktlen; + netif_rx(skb); + } else { + lp->stats.rx_dropped++; + } + + if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) + lp->stats.multicast++; + + /* reset ownership bit */ + lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); + + /* wrap after last buffer */ + if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) + lp->rx_tail = 0; + else + lp->rx_tail++; + } +} + +/* MAC interrupt handler */ +static irqreturn_t at91ether_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct macb *lp = netdev_priv(dev); + u32 intstatus, ctl; + + /* MAC Interrupt Status register indicates what interrupts are pending. + * It is automatically cleared once read. + */ + intstatus = macb_readl(lp, ISR); + + /* Receive complete */ + if (intstatus & MACB_BIT(RCOMP)) + at91ether_rx(dev); + + /* Transmit complete */ + if (intstatus & MACB_BIT(TCOMP)) { + /* The TCOM bit is set even if the transmission failed */ + if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) + lp->stats.tx_errors++; + + if (lp->skb) { + dev_kfree_skb_irq(lp->skb); + lp->skb = NULL; + dma_unmap_single(NULL, lp->skb_physaddr, + lp->skb_length, DMA_TO_DEVICE); + lp->stats.tx_packets++; + lp->stats.tx_bytes += lp->skb_length; + } + netif_wake_queue(dev); + } + + /* Work-around for EMAC Errata section 41.3.1 */ + if (intstatus & MACB_BIT(RXUBR)) { + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); + macb_writel(lp, NCR, ctl | MACB_BIT(RE)); + } + + if (intstatus & MACB_BIT(ISR_ROVR)) + netdev_err(dev, "ROVR error\n"); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void at91ether_poll_controller(struct net_device *dev) +{ + unsigned long flags; + + local_irq_save(flags); + at91ether_interrupt(dev->irq, dev); + local_irq_restore(flags); +} +#endif + +static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = macb_get_stats, + .ndo_set_rx_mode = macb_set_rx_mode, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = macb_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, +#endif +}; + +static int at91ether_init(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(dev); + int err; + u32 reg; + + bp->pclk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(bp->pclk)) + return PTR_ERR(bp->pclk); + + err = clk_prepare_enable(bp->pclk); + if (err) { + dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + return err; + } + + dev->netdev_ops = &at91ether_netdev_ops; + dev->ethtool_ops = &macb_ethtool_ops; + + err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, + 0, dev->name, dev); + if (err) + goto err_disable_clk; + + macb_writel(bp, NCR, 0); + + reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); + if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) + reg |= MACB_BIT(RM9200_RMII); + + macb_writel(bp, NCFGR, reg); + + return 0; + +err_disable_clk: + clk_disable_unprepare(bp->pclk); + + return err; +} + +static const struct macb_config at91sam9260_config = { + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .init = macb_init, +}; + +static const struct macb_config pc302gem_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .dma_burst_length = 16, + .init = macb_init, +}; + +static const struct macb_config sama5d3_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .dma_burst_length = 16, + .init = macb_init, +}; + +static const struct macb_config sama5d4_config = { + .caps = 0, + .dma_burst_length = 4, + .init = macb_init, +}; + +static const struct macb_config emac_config = { + .init = at91ether_init, +}; + +static const struct of_device_id macb_dt_ids[] = { + { .compatible = "cdns,at32ap7000-macb" }, + { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, + { .compatible = "cdns,macb" }, + { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, + { .compatible = "cdns,gem", .data = &pc302gem_config }, + { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, + { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, + { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, + { .compatible = "cdns,emac", .data = &emac_config }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, macb_dt_ids); +#endif /* CONFIG_OF */ + +static int macb_probe(struct platform_device *pdev) +{ + int (*init)(struct platform_device *) = macb_init; + struct device_node *np = pdev->dev.of_node; + const struct macb_config *macb_config = NULL; + unsigned int queue_mask, num_queues; + struct macb_platform_data *pdata; + struct phy_device *phydev; + struct net_device *dev; + struct resource *regs; + void __iomem *mem; + const char *mac; + struct macb *bp; + int err; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mem = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + macb_probe_queues(mem, &queue_mask, &num_queues); + dev = alloc_etherdev_mq(sizeof(*bp), num_queues); + if (!dev) + return -ENOMEM; + + dev->base_addr = regs->start; + + SET_NETDEV_DEV(dev, &pdev->dev); + + bp = netdev_priv(dev); + bp->pdev = pdev; + bp->dev = dev; + bp->regs = mem; + bp->num_queues = num_queues; + spin_lock_init(&bp->lock); + + platform_set_drvdata(pdev, dev); + + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) + return dev->irq; + + mac = of_get_mac_address(np); if (mac) memcpy(bp->dev->dev_addr, mac, ETH_ALEN); else macb_get_hwaddr(bp); - err = of_get_phy_mode(pdev->dev.of_node); + err = of_get_phy_mode(np); if (err < 0) { pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->is_rmii) @@ -2388,34 +2753,35 @@ static int macb_probe(struct platform_device *pdev) bp->phy_interface = err; } - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) - macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII)); - else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) -#if defined(CONFIG_ARCH_AT91) - macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | - MACB_BIT(CLKEN))); -#else - macb_or_gem_writel(bp, USRIO, 0); -#endif - else -#if defined(CONFIG_ARCH_AT91) - macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); -#else - macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); -#endif + if (np) { + const struct of_device_id *match; + + match = of_match_node(macb_dt_ids, np); + if (match) + macb_config = match->data; + } + + if (macb_config) { + bp->caps = macb_config->caps; + bp->dma_burst_length = macb_config->dma_burst_length; + init = macb_config->init; + } + + /* IP specific init */ + err = init(pdev); + if (err) + goto err_out_free_netdev; err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_free_netdev; + goto err_disable_clocks; } err = macb_mii_init(bp); if (err) goto err_out_unregister_netdev; - platform_set_drvdata(pdev, dev); - netif_carrier_off(dev); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", @@ -2430,16 +2796,15 @@ static int macb_probe(struct platform_device *pdev) err_out_unregister_netdev: unregister_netdev(dev); + +err_disable_clocks: + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + err_out_free_netdev: free_netdev(dev); -err_out_disable_clocks: - if (!IS_ERR(tx_clk)) - clk_disable_unprepare(tx_clk); -err_out_disable_hclk: - clk_disable_unprepare(hclk); -err_out_disable_pclk: - clk_disable_unprepare(pclk); -err_out: + return err; } @@ -2458,8 +2823,7 @@ static int macb_remove(struct platform_device *pdev) kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); unregister_netdev(dev); - if (!IS_ERR(bp->tx_clk)) - clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); free_netdev(dev); @@ -2477,8 +2841,7 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); netif_device_detach(netdev); - if (!IS_ERR(bp->tx_clk)) - clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -2493,8 +2856,7 @@ static int __maybe_unused macb_resume(struct device *dev) clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); - if (!IS_ERR(bp->tx_clk)) - clk_prepare_enable(bp->tx_clk); + clk_prepare_enable(bp->tx_clk); netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index ff85619..bc6e35c 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -229,8 +229,10 @@ /* Bitfields in DMACFG. */ #define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ #define GEM_FBLDO_SIZE 5 -#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */ -#define GEM_ENDIA_SIZE 1 +#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */ +#define GEM_ENDIA_DESC_SIZE 1 +#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */ +#define GEM_ENDIA_PKT_SIZE 1 #define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ #define GEM_RXBMS_SIZE 2 #define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ @@ -389,6 +391,8 @@ /* Capability mask bits */ #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 +#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -423,17 +427,17 @@ /* Register access macros */ #define macb_readl(port,reg) \ - __raw_readl((port)->regs + MACB_##reg) + readl_relaxed((port)->regs + MACB_##reg) #define macb_writel(port,reg,value) \ - __raw_writel((value), (port)->regs + MACB_##reg) + writel_relaxed((value), (port)->regs + MACB_##reg) #define gem_readl(port, reg) \ - __raw_readl((port)->regs + GEM_##reg) + readl_relaxed((port)->regs + GEM_##reg) #define gem_writel(port, reg, value) \ - __raw_writel((value), (port)->regs + GEM_##reg) + writel_relaxed((value), (port)->regs + GEM_##reg) #define queue_readl(queue, reg) \ - __raw_readl((queue)->bp->regs + (queue)->reg) + readl_relaxed((queue)->bp->regs + (queue)->reg) #define queue_writel(queue, reg, value) \ - __raw_writel((value), (queue)->bp->regs + (queue)->reg) + writel_relaxed((value), (queue)->bp->regs + (queue)->reg) /* Conditional GEM/MACB macros. These perform the operation to the correct * register dependent on whether the device is a GEM or a MACB. For registers @@ -750,6 +754,7 @@ struct macb_or_gem_ops { struct macb_config { u32 caps; unsigned int dma_burst_length; + int (*init)(struct platform_device *pdev); }; struct macb_queue { @@ -820,15 +825,6 @@ struct macb { u64 ethtool_stats[GEM_STATS_LEN]; }; -extern const struct ethtool_ops macb_ethtool_ops; - -int macb_mii_init(struct macb *bp); -int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -struct net_device_stats *macb_get_stats(struct net_device *dev); -void macb_set_rx_mode(struct net_device *dev); -void macb_set_hwaddr(struct macb *bp); -void macb_get_hwaddr(struct macb *bp); - static inline bool macb_is_gem(struct macb *bp) { return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 47bfea2..63efa0d 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -47,9 +47,9 @@ #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ -#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ +#define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */ #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ -#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ +#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */ #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ /* Hardware TX Statistics Counters */ @@ -153,7 +153,7 @@ #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ #define XGMAC_FLOW_CTRL_PT_SHIFT 16 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ -#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ +#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */ #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ @@ -254,18 +254,18 @@ /* XGMAC Operation Mode Register */ #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ -#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ +#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */ #define XGMAC_OMR_TTC_MASK 0x00030000 -#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ -#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ -#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ -#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ +#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */ +#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */ +#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */ +#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */ #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ -#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */ -#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ +#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */ +#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */ /* XGMAC HW Features Register */ #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 184a8d5..a22768c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -840,7 +840,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's - * natural endianess. + * natural endianness. */ static int t3_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 97842d0..4555634 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -369,7 +369,7 @@ enum { MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ - MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */ + MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ }; @@ -599,8 +599,8 @@ struct sge { u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ u16 ofld_rxq[MAX_OFLD_QSETS]; - u16 rdma_rxq[NCHAN]; - u16 rdma_ciq[NCHAN]; + u16 rdma_rxq[MAX_RDMA_QUEUES]; + u16 rdma_ciq[MAX_RDMA_CIQS]; u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; u32 fl_pg_order; /* large page allocation size */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 78854ce..0918c16 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1769,6 +1769,8 @@ do { \ int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx); S("QType:", "RDMA-CPL"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); @@ -1788,6 +1790,8 @@ do { \ int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); S("QType:", "RDMA-CIQ"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a22cf93..4af8a9f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -957,6 +957,28 @@ static void enable_rx(struct adapter *adap) } } +static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, + unsigned int nq, unsigned int per_chan, int msi_idx, + u16 *ids) +{ + int i, err; + + for (i = 0; i < nq; i++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, + adap->port[i / per_chan], + msi_idx, q->fl.size ? &q->fl : NULL, + uldrx_handler); + if (err) + return err; + memset(&q->stats, 0, sizeof(q->stats)); + if (ids) + ids[i] = q->rspq.abs_id; + } + return 0; +} + /** * setup_sge_queues - configure SGE Tx/Rx/response queues * @adap: the adapter @@ -1018,51 +1040,27 @@ freeout: t4_free_sge_resources(adap); j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ for_each_ofldrxq(s, i) { - struct sge_ofld_rxq *q = &s->ofldrxq[i]; - struct net_device *dev = adap->port[i / j]; - - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, - q->fl.size ? &q->fl : NULL, - uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->ofld_rxq[i] = q->rspq.abs_id; - err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], + adap->port[i / j], s->fw_evtq.cntxt_id); if (err) goto freeout; } - for_each_rdmarxq(s, i) { - struct sge_ofld_rxq *q = &s->rdmarxq[i]; +#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ + if (err) \ + goto freeout; \ + if (msi_idx > 0) \ + msi_idx += nq; \ +} while (0) - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], - msi_idx, q->fl.size ? &q->fl : NULL, - uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->rdma_rxq[i] = q->rspq.abs_id; - } + ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq); + ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); + j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ + ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); - for_each_rdmaciq(s, i) { - struct sge_ofld_rxq *q = &s->rdmaciq[i]; - - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], - msi_idx, q->fl.size ? &q->fl : NULL, - uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->rdma_ciq[i] = q->rspq.abs_id; - } +#undef ALLOC_OFLD_RXQS for_each_port(adap, i) { /* @@ -5368,7 +5366,7 @@ static int adap_init0(struct adapter *adap) adap->tids.stid_base = val[1]; adap->tids.nstids = val[2] - val[1] + 1; /* - * Setup server filter region. Divide the availble filter + * Setup server filter region. Divide the available filter * region into two parts. Regular filters get 1/3rd and server * filters get 2/3rd part. This is only enabled if workarond * path is enabled. @@ -5705,7 +5703,16 @@ static void cfg_queues(struct adapter *adap) s->ofldqsets = adap->params.nports; /* For RDMA one Rx queue per channel suffices */ s->rdmaqs = adap->params.nports; - s->rdmaciqs = adap->params.nports; + /* Try and allow at least 1 CIQ per cpu rounding down + * to the number of ports, with a minimum of 1 per port. + * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port. + * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port. + * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port. + */ + s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus()); + s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * + adap->params.nports; + s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); } for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { @@ -5791,12 +5798,17 @@ static void reduce_ethqs(struct adapter *adap, int n) static int enable_msix(struct adapter *adap) { int ofld_need = 0; - int i, want, need; + int i, want, need, allocated; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; - struct msix_entry entries[MAX_INGQ + 1]; + struct msix_entry *entries; - for (i = 0; i < ARRAY_SIZE(entries); ++i) + entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), + GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < MAX_INGQ + 1; ++i) entries[i].entry = i; want = s->max_ethqsets + EXTRA_VECS; @@ -5813,29 +5825,39 @@ static int enable_msix(struct adapter *adap) #else need = adap->params.nports + EXTRA_VECS + ofld_need; #endif - want = pci_enable_msix_range(adap->pdev, entries, need, want); - if (want < 0) - return want; + allocated = pci_enable_msix_range(adap->pdev, entries, need, want); + if (allocated < 0) { + dev_info(adap->pdev_dev, "not enough MSI-X vectors left," + " not using MSI-X\n"); + kfree(entries); + return allocated; + } - /* - * Distribute available vectors to the various queue groups. + /* Distribute available vectors to the various queue groups. * Every group gets its minimum requirement and NIC gets top * priority for leftovers. */ - i = want - EXTRA_VECS - ofld_need; + i = allocated - EXTRA_VECS - ofld_need; if (i < s->max_ethqsets) { s->max_ethqsets = i; if (i < s->ethqsets) reduce_ethqs(adap, i); } if (is_offload(adap)) { - i = want - EXTRA_VECS - s->max_ethqsets; - i -= ofld_need - nchan; + if (allocated < want) { + s->rdmaqs = nchan; + s->rdmaciqs = nchan; + } + + /* leftovers go to OFLD */ + i = allocated - EXTRA_VECS - s->max_ethqsets - + s->rdmaqs - s->rdmaciqs; s->ofldqsets = (i / nchan) * nchan; /* round down */ } - for (i = 0; i < want; ++i) + for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; + kfree(entries); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 853c389..1498d07 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -867,7 +867,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's - * natural endianess. + * natural endianness. */ int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) @@ -3558,7 +3558,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * For the single-MTU buffers in unpacked mode we need to include * space for the SGE Control Packet Shift, 14 byte Ethernet header, * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet - * Padding boundry. All of these are accommodated in the Factory + * Padding boundary. All of these are accommodated in the Factory * Default Firmware Configuration File but we need to adjust it for * this host's cache line size. */ @@ -4529,7 +4529,7 @@ int t4_init_tp_params(struct adapter *adap) PROTOCOL_F); /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID - * represents the presense of an Outer VLAN instead of a VNIC ID. + * represents the presence of an Outer VLAN instead of a VNIC ID. */ if ((adap->params.tp.ingress_config & VNIC_F) == 0) adap->params.tp.vnic_shift = -1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 9b353a8..d136ca6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -36,7 +36,7 @@ #define _T4FW_INTERFACE_H_ enum fw_retval { - FW_SUCCESS = 0, /* completed sucessfully */ + FW_SUCCESS = 0, /* completed successfully */ FW_EPERM = 1, /* operation not permitted */ FW_ENOENT = 2, /* no such file or directory */ FW_EIO = 5, /* input/output error; hw bad */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 0545f0d..5ba14b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -875,7 +875,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * Write Header (incorporated as part of the cpl_tx_pkt_lso and * cpl_tx_pkt structures), followed by either a TX Packet Write CPL * message or, if we're doing a Large Send Offload, an LSO CPL message - * with an embeded TX Packet Write CPL message. + * with an embedded TX Packet Write CPL message. */ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); if (skb_shinfo(skb)->gso_size) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 1b5506d..c21e2e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -339,7 +339,7 @@ int t4vf_port_init(struct adapter *adapter, int pidx) * @adapter: the adapter * * Issues a reset command to FW. For a Physical Function this would - * result in the Firmware reseting all of its state. For a Virtual + * result in the Firmware resetting all of its state. For a Virtual * Function this just resets the state associated with the VF. */ int t4vf_fw_reset(struct adapter *adapter) diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index d1c025f..6038304 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1578,7 +1578,7 @@ out1: #ifndef CONFIG_CS89x0_PLATFORM /* - * This function converts the I/O port addres used by the cs89x0_probe() and + * This function converts the I/O port address used by the cs89x0_probe() and * init_module() functions to the I/O memory address used by the * cs89x0_probe1() function. */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index a5179bf..204bd182 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, } else { memset(pp, 0, sizeof(*pp)); if (vf == PORT_SELF_VF) - memset(netdev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(netdev->dev_addr); } } else { /* Set flag to indicate that the port assoc/disassoc @@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ if (pp->request == PORT_REQUEST_DISASSOCIATE) { - memset(pp->mac_addr, 0, ETH_ALEN); + eth_zero_addr(pp->mac_addr); if (vf == PORT_SELF_VF) - memset(netdev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(netdev->dev_addr); } } if (vf == PORT_SELF_VF) - memset(pp->vf_mac, 0, ETH_ALEN); + eth_zero_addr(pp->vf_mac); return err; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 50a0077..afd8e78 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -653,7 +653,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev) if ( !(db->media_mode & DMFE_AUTO) ) db->op_mode = db->media_mode; /* Force Mode */ - /* Initialize Transmit/Receive decriptor and CR3/4 */ + /* Initialize Transmit/Receive descriptor and CR3/4 */ dmfe_descriptor_init(dev); /* Init CR6 to program DM910x operation */ diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 1c5916b..2c30c0c 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -564,7 +564,7 @@ static void uli526x_init(struct net_device *dev) if ( !(db->media_mode & ULI526X_AUTO) ) db->op_mode = db->media_mode; /* Force Mode */ - /* Initialize Transmit/Receive decriptor and CR3/4 */ + /* Initialize Transmit/Receive descriptor and CR3/4 */ uli526x_descriptor_init(dev, ioaddr); /* Init CR6 to program M526X operation */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37a..996bbc6 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -87,6 +87,7 @@ #define BE3_MAX_EVT_QS 16 #define BE3_SRIOV_MAX_EVT_QS 8 +#define MAX_RSS_IFACES 15 #define MAX_RX_QS 32 #define MAX_EVT_QS 32 #define MAX_TX_QS 32 @@ -238,10 +239,17 @@ struct be_tx_stats { struct u64_stats_sync sync_compl; }; +/* Structure to hold some data of interest obtained from a TX CQE */ +struct be_tx_compl_info { + u8 status; /* Completion status */ + u16 end_index; /* Completed TXQ Index */ +}; + struct be_tx_obj { u32 db_offset; struct be_queue_info q; struct be_queue_info cq; + struct be_tx_compl_info txcp; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats; @@ -369,6 +377,7 @@ enum vf_state { #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) #define BE_FLAGS_SETUP_DONE BIT(9) #define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 @@ -403,8 +412,11 @@ struct be_resources { u16 max_tx_qs; u16 max_rss_qs; u16 max_rx_qs; + u16 max_cq_count; u16 max_uc_mac; /* Max UC MACs programmable */ u16 max_vlans; /* Number of vlans supported */ + u16 max_iface_count; + u16 max_mcc_count; u16 max_evt_qs; u32 if_cap_flags; u32 vf_if_cap_flags; /* VF if capability flags */ @@ -417,6 +429,39 @@ struct rss_info { u8 rss_hkey[RSS_HASH_KEY_LEN]; }; +/* Macros to read/write the 'features' word of be_wrb_params structure. + */ +#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT +#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT) + +#define BE_WRB_F_GET(word, name) \ + (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name)) + +#define BE_WRB_F_SET(word, name, val) \ + ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name))) + +/* Feature/offload bits */ +enum { + BE_WRB_F_CRC_BIT, /* Ethernet CRC */ + BE_WRB_F_IPCS_BIT, /* IP csum */ + BE_WRB_F_TCPCS_BIT, /* TCP csum */ + BE_WRB_F_UDPCS_BIT, /* UDP csum */ + BE_WRB_F_LSO_BIT, /* LSO */ + BE_WRB_F_LSO6_BIT, /* LSO6 */ + BE_WRB_F_VLAN_BIT, /* VLAN */ + BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */ +}; + +/* The structure below provides a HW-agnostic abstraction of WRB params + * retrieved from a TX skb. This is in turn passed to chip specific routines + * during transmit, to set the corresponding params in the WRB. + */ +struct be_wrb_params { + u32 features; /* Feature bits */ + u16 vlan_tag; /* VLAN tag */ + u16 lso_mss; /* MSS for LSO */ +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -447,6 +492,8 @@ struct be_adapter { /* Rx rings */ u16 num_rx_qs; + u16 num_rss_qs; + u16 need_def_rxq; struct be_rx_obj rx_obj[MAX_RX_QS]; u32 big_page_size; /* Compounded page size shared by rx wrbs */ @@ -461,7 +508,7 @@ struct be_adapter { struct delayed_work work; u16 work_counter; - struct delayed_work func_recovery_work; + struct delayed_work be_err_detection_work; u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ @@ -594,9 +641,8 @@ extern const struct ethtool_ops be_ethtool_ops; for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \ i++, rxo++) -/* Skip the default non-rss queue (last one)*/ #define for_all_rss_queues(adapter, rxo, i) \ - for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\ + for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \ i++, rxo++) #define for_all_tx_queues(adapter, txo, i) \ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cf..dc27839 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter) for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_RDY_MASK) - break; - - msleep(1000); - } - - if (i == SLIPORT_READY_TIMEOUT) - return sliport_status ? : -1; - - return 0; -} - -static bool lancer_provisioning_error(struct be_adapter *adapter) -{ - u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; - - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - if (sliport_status & SLIPORT_STATUS_ERR_MASK) { - sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); - sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); - - if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && - sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) - return true; - } - return false; -} - -int lancer_test_and_set_rdy_state(struct be_adapter *adapter) -{ - int status; - u32 sliport_status, err, reset_needed; - bool resource_error; + return 0; - resource_error = lancer_provisioning_error(adapter); - if (resource_error) - return -EAGAIN; + if (sliport_status & SLIPORT_STATUS_ERR_MASK && + !(sliport_status & SLIPORT_STATUS_RN_MASK)) + return -EIO; - status = lancer_wait_ready(adapter); - if (!status) { - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - err = sliport_status & SLIPORT_STATUS_ERR_MASK; - reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; - if (err && reset_needed) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - - /* check if adapter has corrected the error */ - status = lancer_wait_ready(adapter); - sliport_status = ioread32(adapter->db + - SLIPORT_STATUS_OFFSET); - sliport_status &= (SLIPORT_STATUS_ERR_MASK | - SLIPORT_STATUS_RN_MASK); - if (status || sliport_status) - status = -1; - } else if (err || reset_needed) { - status = -1; - } + msleep(1000); } - /* Stop error recovery if error is not recoverable. - * No resource error is temporary errors and will go away - * when PF provisions resources. - */ - resource_error = lancer_provisioning_error(adapter); - if (resource_error) - status = -EAGAIN; - return status; + return sliport_status ? : -1; } int be_fw_wait_ready(struct be_adapter *adapter) @@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { + /* There's no means to poll POST state on BE2/3 VFs */ + if (BEx_chip(adapter) && be_virtfn(adapter)) + return 0; + stage = be_POST_stage_get(adapter); if (stage == POST_STAGE_ARMFW_RDY) return 0; @@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter) err: dev_err(dev, "POST timeout; stage=%#x\n", stage); - return -1; + return -ETIMEDOUT; } static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) @@ -2126,16 +2073,12 @@ int be_cmd_reset_function(struct be_adapter *adapter) int status; if (lancer_chip(adapter)) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); status = lancer_wait_ready(adapter); - if (!status) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - status = lancer_test_and_set_rdy_state(adapter); - } - if (status) { + if (status) dev_err(&adapter->pdev->dev, "Adapter in non recoverable error\n"); - } return status; } @@ -3078,7 +3021,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids - * or one or more true or pseudo permanant mac addresses. + * or one or more true or pseudo permanent mac addresses. * If an active mac_id is present, return first active mac_id * found. */ @@ -3133,7 +3076,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) int status; bool pmac_valid = false; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); if (BEx_chip(adapter)) { if (be_physfn(adapter)) @@ -3634,12 +3577,12 @@ static void be_copy_nic_desc(struct be_resources *res, res->max_rss_qs = le16_to_cpu(desc->rssq_count); res->max_rx_qs = le16_to_cpu(desc->rq_count); res->max_evt_qs = le16_to_cpu(desc->eq_count); + res->max_cq_count = le16_to_cpu(desc->cq_count); + res->max_iface_count = le16_to_cpu(desc->iface_count); + res->max_mcc_count = le16_to_cpu(desc->mcc_count); /* Clear flags that driver is not interested in */ res->if_cap_flags = le32_to_cpu(desc->cap_flags) & BE_IF_CAP_FLAGS_WANT; - /* Need 1 RXQ as the default RXQ */ - if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs) - res->max_rss_qs -= 1; } /* Uses Mbox */ @@ -3701,7 +3644,7 @@ err: /* Will use MBOX only if MCCQ has not been created */ int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 domain) + struct be_resources *res, u8 query, u8 domain) { struct be_cmd_resp_get_profile_config *resp; struct be_cmd_req_get_profile_config *req; @@ -3711,7 +3654,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_nic_res_desc *nic; struct be_mcc_wrb wrb = {0}; struct be_dma_mem cmd; - u32 desc_count; + u16 desc_count; int status; memset(&cmd, 0, sizeof(struct be_dma_mem)); @@ -3730,12 +3673,19 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; + /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the + * descriptors with all bits set to "1" for the fields which can be + * modified using SET_PROFILE_CONFIG cmd. + */ + if (query == RESOURCE_MODIFIABLE) + req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; + status = be_cmd_notify_wait(adapter, &wrb); if (status) goto err; resp = cmd.va; - desc_count = le32_to_cpu(resp->desc_count); + desc_count = le16_to_cpu(resp->desc_count); pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, desc_count); @@ -3860,23 +3810,80 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 1, version, domain); } +static void be_fill_vf_res_template(struct be_adapter *adapter, + struct be_resources pool_res, + u16 num_vfs, u16 num_vf_qs, + struct be_nic_res_desc *nic_vft) +{ + u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; + struct be_resources res_mod = {0}; + + /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, + * which are modifiable using SET_PROFILE_CONFIG cmd. + */ + be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); + + /* If RSS IFACE capability flags are modifiable for a VF, set the + * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if + * more than 1 RSSQ is available for a VF. + * Otherwise, provision only 1 queue pair for VF. + */ + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + if (num_vf_qs > 1) { + vf_if_cap_flags |= BE_IF_FLAGS_RSS; + if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) + vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; + } else { + vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | + BE_IF_FLAGS_DEFQ_RSS); + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); + } else { + num_vf_qs = 1; + } + + nic_vft->rq_count = cpu_to_le16(num_vf_qs); + nic_vft->txq_count = cpu_to_le16(num_vf_qs); + nic_vft->rssq_count = cpu_to_le16(num_vf_qs); + nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / + (num_vfs + 1)); + + /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally + * among the PF and it's VFs, if the fields are changeable + */ + if (res_mod.max_uc_mac == FIELD_MODIFIABLE) + nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / + (num_vfs + 1)); + + if (res_mod.max_vlans == FIELD_MODIFIABLE) + nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / + (num_vfs + 1)); + + if (res_mod.max_iface_count == FIELD_MODIFIABLE) + nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / + (num_vfs + 1)); + + if (res_mod.max_mcc_count == FIELD_MODIFIABLE) + nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / + (num_vfs + 1)); +} + int be_cmd_set_sriov_config(struct be_adapter *adapter, - struct be_resources res, u16 num_vfs) + struct be_resources pool_res, u16 num_vfs, + u16 num_vf_qs) { struct { struct be_pcie_res_desc pcie; struct be_nic_res_desc nic_vft; } __packed desc; - u16 vf_q_count; - - if (BEx_chip(adapter) || lancer_chip(adapter)) - return 0; /* PF PCIE descriptor */ be_reset_pcie_desc(&desc.pcie); desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; - desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); + desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); desc.pcie.pf_num = adapter->pdev->devfn; desc.pcie.sriov_state = num_vfs ? 1 : 0; desc.pcie.num_vfs = cpu_to_le16(num_vfs); @@ -3885,32 +3892,12 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter, be_reset_nic_desc(&desc.nic_vft); desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; - desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) | - (1 << NOSV_SHIFT); + desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); desc.nic_vft.pf_num = adapter->pdev->devfn; desc.nic_vft.vf_num = 0; - if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) { - /* If number of VFs requested is 8 less than max supported, - * assign 8 queue pairs to the PF and divide the remaining - * resources evenly among the VFs - */ - if (num_vfs < (be_max_vfs(adapter) - 8)) - vf_q_count = (res.max_rss_qs - 8) / num_vfs; - else - vf_q_count = res.max_rss_qs / num_vfs; - - desc.nic_vft.rq_count = cpu_to_le16(vf_q_count); - desc.nic_vft.txq_count = cpu_to_le16(vf_q_count); - desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1); - desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count); - } else { - desc.nic_vft.txq_count = cpu_to_le16(1); - desc.nic_vft.rq_count = cpu_to_le16(1); - desc.nic_vft.rssq_count = cpu_to_le16(0); - /* One CQ for each TX, RX and MCCQ */ - desc.nic_vft.cq_count = cpu_to_le16(3); - } + be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, + &desc.nic_vft); return be_cmd_set_profile_config(adapter, &desc, 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e..53e903f 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -588,14 +588,15 @@ enum be_if_flags { BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800, - BE_IF_FLAGS_MULTICAST = 0x1000 + BE_IF_FLAGS_MULTICAST = 0x1000, + BE_IF_FLAGS_DEFQ_RSS = 0x1000000 }; #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ - BE_IF_FLAGS_UNTAGGED) + BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS) #define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \ BE_IF_FLAGS_VLAN_PROMISCUOUS |\ @@ -2021,6 +2022,7 @@ struct be_cmd_req_set_ext_fat_caps { #define PORT_RESOURCE_DESC_TYPE_V1 0x55 #define MAX_RESOURCE_DESC 264 +#define IF_CAPS_FLAGS_VALID_SHIFT 0 /* IF caps valid */ #define VFT_SHIFT 3 /* VF template */ #define IMM_SHIFT 6 /* Immediate */ #define NOSV_SHIFT 7 /* No save */ @@ -2131,20 +2133,28 @@ struct be_cmd_resp_get_func_config { u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; }; -#define ACTIVE_PROFILE_TYPE 0x2 +enum { + RESOURCE_LIMITS, + RESOURCE_MODIFIABLE +}; + struct be_cmd_req_get_profile_config { struct be_cmd_req_hdr hdr; u8 rsvd; +#define ACTIVE_PROFILE_TYPE 0x2 +#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3) u8 type; u16 rsvd1; }; struct be_cmd_resp_get_profile_config { struct be_cmd_resp_hdr hdr; - u32 desc_count; + __le16 desc_count; + u16 rsvd; u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; }; +#define FIELD_MODIFIABLE 0xFFFF struct be_cmd_req_set_profile_config { struct be_cmd_req_hdr hdr; u32 rsvd; @@ -2344,7 +2354,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter); int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 domain); + struct be_resources *res, u8 query, u8 domain); int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); @@ -2355,4 +2365,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter, int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); int be_cmd_set_sriov_config(struct be_adapter *adapter, - struct be_resources res, u16 num_vfs); + struct be_resources res, u16 num_vfs, + u16 num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 4d2de47..b765c24 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1097,7 +1097,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter, return status; if (be_multi_rxq(adapter)) { - for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { + for (j = 0; j < 128; j += adapter->num_rss_qs) { for_all_rss_queues(adapter, rxo, i) { if ((j + i) >= 128) break; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a81685..5652b00 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -30,6 +30,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("GPL"); +/* num_vfs module param is obsolete. + * Use sysfs method to enable/disable VFs. + */ static unsigned int num_vfs; module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); @@ -727,48 +730,86 @@ static u16 skb_ip_proto(struct sk_buff *skb) ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; } -static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, - struct sk_buff *skb, u32 wrb_cnt, u32 len, - bool skip_hw_vlan) +static inline bool be_is_txq_full(struct be_tx_obj *txo) { - u16 vlan_tag, proto; + return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; +} - memset(hdr, 0, sizeof(*hdr)); +static inline bool be_can_txq_wake(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) < txo->q.len / 2; +} - SET_TX_WRB_HDR_BITS(crc, hdr, 1); +static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; +} + +static void be_get_wrb_params_from_skb(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params *wrb_params) +{ + u16 proto; if (skb_is_gso(skb)) { - SET_TX_WRB_HDR_BITS(lso, hdr, 1); - SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); + BE_WRB_F_SET(wrb_params->features, LSO, 1); + wrb_params->lso_mss = skb_shinfo(skb)->gso_size; if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) - SET_TX_WRB_HDR_BITS(lso6, hdr, 1); + BE_WRB_F_SET(wrb_params->features, LSO6, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) { - SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, IPCS, 1); proto = skb_inner_ip_proto(skb); } else { proto = skb_ip_proto(skb); } if (proto == IPPROTO_TCP) - SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, TCPCS, 1); else if (proto == IPPROTO_UDP) - SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, UDPCS, 1); } if (skb_vlan_tag_present(skb)) { - SET_TX_WRB_HDR_BITS(vlan, hdr, 1); - vlan_tag = be_get_tx_vlan_tag(adapter, skb); - SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); + BE_WRB_F_SET(wrb_params->features, VLAN, 1); + wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb); } - SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); - SET_TX_WRB_HDR_BITS(len, hdr, len); + BE_WRB_F_SET(wrb_params->features, CRC, 1); +} - /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0 - * When this hack is not needed, the evt bit is set while ringing DB +static void wrb_fill_hdr(struct be_adapter *adapter, + struct be_eth_hdr_wrb *hdr, + struct be_wrb_params *wrb_params, + struct sk_buff *skb) +{ + memset(hdr, 0, sizeof(*hdr)); + + SET_TX_WRB_HDR_BITS(crc, hdr, + BE_WRB_F_GET(wrb_params->features, CRC)); + SET_TX_WRB_HDR_BITS(ipcs, hdr, + BE_WRB_F_GET(wrb_params->features, IPCS)); + SET_TX_WRB_HDR_BITS(tcpcs, hdr, + BE_WRB_F_GET(wrb_params->features, TCPCS)); + SET_TX_WRB_HDR_BITS(udpcs, hdr, + BE_WRB_F_GET(wrb_params->features, UDPCS)); + + SET_TX_WRB_HDR_BITS(lso, hdr, + BE_WRB_F_GET(wrb_params->features, LSO)); + SET_TX_WRB_HDR_BITS(lso6, hdr, + BE_WRB_F_GET(wrb_params->features, LSO6)); + SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss); + + /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this + * hack is not needed, the evt bit is set while ringing DB. */ - if (skip_hw_vlan) - SET_TX_WRB_HDR_BITS(event, hdr, 1); + SET_TX_WRB_HDR_BITS(event, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW)); + SET_TX_WRB_HDR_BITS(vlan, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN)); + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag); + + SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb)); + SET_TX_WRB_HDR_BITS(len, hdr, skb->len); } static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, @@ -788,77 +829,124 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, } } -/* Returns the number of WRBs used up by the skb */ +/* Grab a WRB header for xmit */ +static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) +{ + u16 head = txo->q.head; + + queue_head_inc(&txo->q); + return head; +} + +/* Set up the WRB header for xmit */ +static void be_tx_setup_wrb_hdr(struct be_adapter *adapter, + struct be_tx_obj *txo, + struct be_wrb_params *wrb_params, + struct sk_buff *skb, u16 head) +{ + u32 num_frags = skb_wrb_cnt(skb); + struct be_queue_info *txq = &txo->q; + struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); + + wrb_fill_hdr(adapter, hdr, wrb_params, skb); + be_dws_cpu_to_le(hdr, sizeof(*hdr)); + + BUG_ON(txo->sent_skb_list[head]); + txo->sent_skb_list[head] = skb; + txo->last_req_hdr = head; + atomic_add(num_frags, &txq->used); + txo->last_req_wrb_cnt = num_frags; + txo->pend_wrb_cnt += num_frags; +} + +/* Setup a WRB fragment (buffer descriptor) for xmit */ +static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, + int len) +{ + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, len); + queue_head_inc(txq); +} + +/* Bring the queue back to the state it was in before be_xmit_enqueue() routine + * was invoked. The producer index is restored to the previous packet and the + * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. + */ +static void be_xmit_restore(struct be_adapter *adapter, + struct be_tx_obj *txo, u16 head, bool map_single, + u32 copied) +{ + struct device *dev; + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + dev = &adapter->pdev->dev; + txq->head = head; + + /* skip the first wrb (hdr); it's not mapped */ + queue_head_inc(txq); + while (copied) { + wrb = queue_head_node(txq); + unmap_tx_frag(dev, wrb, map_single); + map_single = false; + copied -= le32_to_cpu(wrb->frag_len); + queue_head_inc(txq); + } + + txq->head = head; +} + +/* Enqueue the given packet for transmit. This routine allocates WRBs for the + * packet, dma maps the packet buffers and sets up the WRBs. Returns the number + * of WRBs used up by the packet. + */ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, - struct sk_buff *skb, bool skip_hw_vlan) + struct sk_buff *skb, + struct be_wrb_params *wrb_params) { u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); struct device *dev = &adapter->pdev->dev; struct be_queue_info *txq = &txo->q; - struct be_eth_hdr_wrb *hdr; bool map_single = false; - struct be_eth_wrb *wrb; - dma_addr_t busaddr; u16 head = txq->head; + dma_addr_t busaddr; + int len; - hdr = queue_head_node(txq); - wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan); - be_dws_cpu_to_le(hdr, sizeof(*hdr)); - - queue_head_inc(txq); + head = be_tx_get_wrb_hdr(txo); if (skb->len > skb->data_len) { - int len = skb_headlen(skb); + len = skb_headlen(skb); busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; map_single = true; - wrb = queue_head_node(txq); - wrb_fill(wrb, busaddr, len); - queue_head_inc(txq); + be_tx_setup_wrb_frag(txo, busaddr, len); copied += len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(frag); - busaddr = skb_frag_dma_map(dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); + busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; - wrb = queue_head_node(txq); - wrb_fill(wrb, busaddr, skb_frag_size(frag)); - queue_head_inc(txq); - copied += skb_frag_size(frag); + be_tx_setup_wrb_frag(txo, busaddr, len); + copied += len; } - BUG_ON(txo->sent_skb_list[head]); - txo->sent_skb_list[head] = skb; - txo->last_req_hdr = head; - atomic_add(wrb_cnt, &txq->used); - txo->last_req_wrb_cnt = wrb_cnt; - txo->pend_wrb_cnt += wrb_cnt; + be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); be_tx_stats_update(txo, skb); return wrb_cnt; dma_err: - /* Bring the queue back to the state it was in before this - * routine was invoked. - */ - txq->head = head; - /* skip the first wrb (hdr); it's not mapped */ - queue_head_inc(txq); - while (copied) { - wrb = queue_head_node(txq); - unmap_tx_frag(dev, wrb, map_single); - map_single = false; - copied -= le32_to_cpu(wrb->frag_len); - adapter->drv_stats.dma_map_errors++; - queue_head_inc(txq); - } - txq->head = head; + adapter->drv_stats.dma_map_errors++; + be_xmit_restore(adapter, txo, head, map_single, copied); return 0; } @@ -869,7 +957,8 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter) static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params + *wrb_params) { u16 vlan_tag = 0; @@ -886,8 +975,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to * skip VLAN insertion */ - if (skip_hw_vlan) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } if (vlan_tag) { @@ -905,8 +993,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, vlan_tag); if (unlikely(!skb)) return skb; - if (skip_hw_vlan) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } return skb; @@ -946,7 +1033,8 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params + *wrb_params) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; unsigned int eth_hdr_len; @@ -970,7 +1058,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (be_pvid_tagging_enabled(adapter) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); /* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. @@ -978,7 +1066,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_vlan_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err; } @@ -1000,7 +1088,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err; } @@ -1014,7 +1102,7 @@ err: static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params *wrb_params) { /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or * less may cause a transmit stall on that port. So the work-around is @@ -1026,7 +1114,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, } if (BEx_chip(adapter) || lancer_chip(adapter)) { - skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); + skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); if (!skb) return NULL; } @@ -1060,24 +1148,26 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) { - bool skip_hw_vlan = false, flush = !skb->xmit_more; struct be_adapter *adapter = netdev_priv(netdev); u16 q_idx = skb_get_queue_mapping(skb); struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; - struct be_queue_info *txq = &txo->q; + struct be_wrb_params wrb_params = { 0 }; + bool flush = !skb->xmit_more; u16 wrb_cnt; - skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + skb = be_xmit_workarounds(adapter, skb, &wrb_params); if (unlikely(!skb)) goto drop; - wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan); + be_get_wrb_params_from_skb(adapter, skb, &wrb_params); + + wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) { dev_kfree_skb_any(skb); goto drop; } - if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) { + if (be_is_txq_full(txo)) { netif_stop_subqueue(netdev, q_idx); tx_stats(txo)->tx_stops++; } @@ -1991,18 +2081,23 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) } } -static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) +static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo) { - struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); + struct be_queue_info *tx_cq = &txo->cq; + struct be_tx_compl_info *txcp = &txo->txcp; + struct be_eth_tx_compl *compl = queue_tail_node(tx_cq); - if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) + if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) return NULL; + /* Ensure load ordering of valid bit dword and other dwords below */ rmb(); - be_dws_le_to_cpu(txcp, sizeof(*txcp)); + be_dws_le_to_cpu(compl, sizeof(*compl)); - txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; + txcp->status = GET_TX_COMPL_BITS(status, compl); + txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl); + compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; queue_tail_inc(tx_cq); return txcp; } @@ -2123,9 +2218,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter) { u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct device *dev = &adapter->pdev->dev; - struct be_tx_obj *txo; + struct be_tx_compl_info *txcp; struct be_queue_info *txq; - struct be_eth_tx_compl *txcp; + struct be_tx_obj *txo; int i, pending_txqs; /* Stop polling for compls when HW has been silent for 10ms */ @@ -2136,10 +2231,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter) cmpl = 0; num_wrbs = 0; txq = &txo->q; - while ((txcp = be_tx_compl_get(&txo->cq))) { - end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); - num_wrbs += be_tx_compl_process(adapter, txo, - end_idx); + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += + be_tx_compl_process(adapter, txo, + txcp->end_index); cmpl++; } if (cmpl) { @@ -2147,7 +2242,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) atomic_sub(num_wrbs, &txq->used); timeo = 0; } - if (atomic_read(&txq->used) == txo->pend_wrb_cnt) + if (!be_is_tx_compl_pending(txo)) pending_txqs--; } @@ -2362,13 +2457,19 @@ static int be_rx_cqs_create(struct be_adapter *adapter) int rc, i; /* We can create as many RSS rings as there are EQs. */ - adapter->num_rx_qs = adapter->num_evt_qs; + adapter->num_rss_qs = adapter->num_evt_qs; + + /* We'll use RSS only if atleast 2 RSS rings are supported. */ + if (adapter->num_rss_qs <= 1) + adapter->num_rss_qs = 0; - /* We'll use RSS only if atleast 2 RSS rings are supported. - * When RSS is used, we'll need a default RXQ for non-IP traffic. + adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; + + /* When the interface is not capable of RSS rings (and there is no + * need to create a default RXQ) we'll still need one RXQ */ - if (adapter->num_rx_qs > 1) - adapter->num_rx_qs++; + if (adapter->num_rx_qs == 0) + adapter->num_rx_qs = 1; adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { @@ -2387,8 +2488,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) } dev_info(&adapter->pdev->dev, - "created %d RSS queue(s) and 1 default RX queue\n", - adapter->num_rx_qs - 1); + "created %d RX queue(s)\n", adapter->num_rx_qs); return 0; } @@ -2498,7 +2598,7 @@ loop_continue: return work_done; } -static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) +static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status) { switch (status) { case BE_TX_COMP_HDR_PARSE_ERR: @@ -2513,7 +2613,7 @@ static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) } } -static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) +static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status) { switch (status) { case LANCER_TX_COMP_LSO_ERR: @@ -2538,22 +2638,18 @@ static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, int idx) { - struct be_eth_tx_compl *txcp; int num_wrbs = 0, work_done = 0; - u32 compl_status; - u16 last_idx; + struct be_tx_compl_info *txcp; - while ((txcp = be_tx_compl_get(&txo->cq))) { - last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); - num_wrbs += be_tx_compl_process(adapter, txo, last_idx); + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index); work_done++; - compl_status = GET_TX_COMPL_BITS(status, txcp); - if (compl_status) { + if (txcp->status) { if (lancer_chip(adapter)) - lancer_update_tx_err(txo, compl_status); + lancer_update_tx_err(txo, txcp->status); else - be_update_tx_err(txo, compl_status); + be_update_tx_err(txo, txcp->status); } } @@ -2564,7 +2660,7 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, /* As Tx wrbs have been freed up, wake up netdev queue * if it was stopped due to lack of tx wrbs. */ if (__netif_subqueue_stopped(adapter->netdev, idx) && - atomic_read(&txo->q.used) < txo->q.len / 2) { + be_can_txq_wake(txo)) { netif_wake_subqueue(adapter->netdev, idx); } @@ -2756,12 +2852,12 @@ void be_detect_error(struct be_adapter *adapter) sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); adapter->hw_error = true; + error_detected = true; /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && sliport_err2 == SLIPORT_ERROR_FW_RESET2) { dev_info(dev, "Firmware update in progress\n"); } else { - error_detected = true; dev_err(dev, "Error detected in the card\n"); dev_err(dev, "ERR: sliport status 0x%x\n", sliport_status); @@ -3022,12 +3118,14 @@ static int be_rx_qs_create(struct be_adapter *adapter) return rc; } - /* The FW would like the default RXQ to be created first */ - rxo = default_rxo(adapter); - rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size, - adapter->if_handle, false, &rxo->rss_id); - if (rc) - return rc; + if (adapter->need_def_rxq || !adapter->num_rss_qs) { + rxo = default_rxo(adapter); + rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, + rx_frag_size, adapter->if_handle, + false, &rxo->rss_id); + if (rc) + return rc; + } for_all_rss_queues(adapter, rxo, i) { rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, @@ -3038,8 +3136,7 @@ static int be_rx_qs_create(struct be_adapter *adapter) } if (be_multi_rxq(adapter)) { - for (j = 0; j < RSS_INDIR_TABLE_LEN; - j += adapter->num_rx_qs - 1) { + for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) { for_all_rss_queues(adapter, rxo, i) { if ((j + i) >= RSS_INDIR_TABLE_LEN) break; @@ -3130,7 +3227,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) int status = 0; u8 mac[ETH_ALEN]; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, @@ -3275,6 +3372,14 @@ static void be_cancel_worker(struct be_adapter *adapter) } } +static void be_cancel_err_detection(struct be_adapter *adapter) +{ + if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) { + cancel_delayed_work_sync(&adapter->be_err_detection_work); + adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED; + } +} + static void be_mac_clear(struct be_adapter *adapter) { if (adapter->pmac_id) { @@ -3306,8 +3411,39 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) } #endif +static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) +{ + struct be_resources res = adapter->pool_res; + u16 num_vf_qs = 1; + + /* Distribute the queue resources equally among the PF and it's VFs + * Do not distribute queue resources in multi-channel configuration. + */ + if (num_vfs && !be_is_mc(adapter)) { + /* If number of VFs requested is 8 less than max supported, + * assign 8 queue pairs to the PF and divide the remaining + * resources evenly among the VFs + */ + if (num_vfs < (be_max_vfs(adapter) - 8)) + num_vf_qs = (res.max_rss_qs - 8) / num_vfs; + else + num_vf_qs = res.max_rss_qs / num_vfs; + + /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable + * interfaces per port. Provide RSS on VFs, only if number + * of VFs requested is less than MAX_RSS_IFACES limit. + */ + if (num_vfs >= MAX_RSS_IFACES) + num_vf_qs = 1; + } + return num_vf_qs; +} + static int be_clear(struct be_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; + u16 num_vf_qs; + be_cancel_worker(adapter); if (sriov_enabled(adapter)) @@ -3316,9 +3452,14 @@ static int be_clear(struct be_adapter *adapter) /* Re-configure FW to distribute resources evenly across max-supported * number of VFs, only when VFs are not already enabled. */ - if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev)) + if (skyhawk_chip(adapter) && be_physfn(adapter) && + !pci_vfs_assigned(pdev)) { + num_vf_qs = be_calculate_vf_qs(adapter, + pci_sriov_get_totalvfs(pdev)); be_cmd_set_sriov_config(adapter, adapter->pool_res, - pci_sriov_get_totalvfs(adapter->pdev)); + pci_sriov_get_totalvfs(pdev), + num_vf_qs); + } #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); @@ -3343,7 +3484,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_RSS; + BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; en_flags &= cap_flags; @@ -3367,6 +3508,7 @@ static int be_vfs_if_create(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { status = be_cmd_get_profile_config(adapter, &res, + RESOURCE_LIMITS, vf + 1); if (!status) cap_flags = res.if_cap_flags; @@ -3533,7 +3675,8 @@ static void BEx_get_resources(struct be_adapter *adapter, /* On a SuperNIC profile, the driver needs to use the * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits */ - be_cmd_get_profile_config(adapter, &super_nic_res, 0); + be_cmd_get_profile_config(adapter, &super_nic_res, + RESOURCE_LIMITS, 0); /* Some old versions of BE3 FW don't report max_tx_qs value */ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; } else { @@ -3553,6 +3696,7 @@ static void BEx_get_resources(struct be_adapter *adapter, res->max_evt_qs = 1; res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; + res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS; if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) res->if_cap_flags &= ~BE_IF_FLAGS_RSS; } @@ -3572,13 +3716,12 @@ static void be_setup_init(struct be_adapter *adapter) static int be_get_sriov_config(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; struct be_resources res = {0}; int max_vfs, old_vfs; - /* Some old versions of BE3 FW don't report max_vfs value */ - be_cmd_get_profile_config(adapter, &res, 0); + be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0); + /* Some old versions of BE3 FW don't report max_vfs value */ if (BE3_chip(adapter) && !res.max_vfs) { max_vfs = pci_sriov_get_totalvfs(adapter->pdev); res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; @@ -3586,35 +3729,49 @@ static int be_get_sriov_config(struct be_adapter *adapter) adapter->pool_res = res; - if (!be_max_vfs(adapter)) { - if (num_vfs) - dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n"); - adapter->num_vfs = 0; - return 0; - } - - pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter)); - - /* validate num_vfs module param */ + /* If during previous unload of the driver, the VFs were not disabled, + * then we cannot rely on the PF POOL limits for the TotalVFs value. + * Instead use the TotalVFs value stored in the pci-dev struct. + */ old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { - dev_info(dev, "%d VFs are already enabled\n", old_vfs); - if (old_vfs != num_vfs) - dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); + dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n", + old_vfs); + + adapter->pool_res.max_vfs = + pci_sriov_get_totalvfs(adapter->pdev); adapter->num_vfs = old_vfs; - } else { - if (num_vfs > be_max_vfs(adapter)) { - dev_info(dev, "Resources unavailable to init %d VFs\n", - num_vfs); - dev_info(dev, "Limiting to %d VFs\n", - be_max_vfs(adapter)); - } - adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter)); } return 0; } +static void be_alloc_sriov_res(struct be_adapter *adapter) +{ + int old_vfs = pci_num_vf(adapter->pdev); + u16 num_vf_qs; + int status; + + be_get_sriov_config(adapter); + + if (!old_vfs) + pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter)); + + /* When the HW is in SRIOV capable configuration, the PF-pool + * resources are given to PF during driver load, if there are no + * old VFs. This facility is not available in BE3 FW. + * Also, this is done by FW in Lancer chip. + */ + if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { + num_vf_qs = be_calculate_vf_qs(adapter, 0); + status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0, + num_vf_qs); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to optimize SRIOV resources\n"); + } +} + static int be_get_resources(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; @@ -3635,12 +3792,23 @@ static int be_get_resources(struct be_adapter *adapter) if (status) return status; + /* If a deafault RXQ must be created, we'll use up one RSSQ*/ + if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && + !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) + res.max_rss_qs -= 1; + /* If RoCE may be enabled stash away half the EQs for RoCE */ if (be_roce_supported(adapter)) res.max_evt_qs /= 2; adapter->res = res; } + /* If FW supports RSS default queue, then skip creating non-RSS + * queue for non-IP traffic. + */ + adapter->need_def_rxq = (be_if_cap_flags(adapter) & + BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1; + dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", be_max_txqs(adapter), be_max_rxqs(adapter), be_max_rss(adapter), be_max_eqs(adapter), @@ -3649,47 +3817,33 @@ static int be_get_resources(struct be_adapter *adapter) be_max_uc(adapter), be_max_mc(adapter), be_max_vlans(adapter)); + /* Sanitize cfg_num_qs based on HW and platform limits */ + adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), + be_max_qs(adapter)); return 0; } -static void be_sriov_config(struct be_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - int status; - - status = be_get_sriov_config(adapter); - if (status) { - dev_err(dev, "Failed to query SR-IOV configuration\n"); - dev_err(dev, "SR-IOV cannot be enabled\n"); - return; - } - - /* When the HW is in SRIOV capable configuration, the PF-pool - * resources are equally distributed across the max-number of - * VFs. The user may request only a subset of the max-vfs to be - * enabled. Based on num_vfs, redistribute the resources across - * num_vfs so that each VF will have access to more number of - * resources. This facility is not available in BE3 FW. - * Also, this is done by FW in Lancer chip. - */ - if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) { - status = be_cmd_set_sriov_config(adapter, - adapter->pool_res, - adapter->num_vfs); - if (status) - dev_err(dev, "Failed to optimize SR-IOV resources\n"); - } -} - static int be_get_config(struct be_adapter *adapter) { + int status, level; u16 profile_id; - int status; + + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; status = be_cmd_query_fw_cfg(adapter); if (status) return status; + if (BEx_chip(adapter)) { + level = be_cmd_get_fw_log_level(adapter); + adapter->msg_enable = + level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + } + + be_cmd_get_acpi_wol_cap(adapter); + be_cmd_query_port_name(adapter); if (be_physfn(adapter)) { @@ -3699,9 +3853,6 @@ static int be_get_config(struct be_adapter *adapter) "Using profile 0x%x\n", profile_id); } - if (!BE2_chip(adapter) && be_physfn(adapter)) - be_sriov_config(adapter); - status = be_get_resources(adapter); if (status) return status; @@ -3711,9 +3862,6 @@ static int be_get_config(struct be_adapter *adapter) if (!adapter->pmac_id) return -ENOMEM; - /* Sanitize cfg_num_qs based on HW and platform limits */ - adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter)); - return 0; } @@ -3747,6 +3895,13 @@ static void be_schedule_worker(struct be_adapter *adapter) adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } +static void be_schedule_err_detection(struct be_adapter *adapter) +{ + schedule_delayed_work(&adapter->be_err_detection_work, + msecs_to_jiffies(1000)); + adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; +} + static int be_setup_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3829,16 +3984,61 @@ static inline int fw_major_num(const char *fw_ver) return fw_major; } +/* If any VFs are already enabled don't FLR the PF */ +static bool be_reset_required(struct be_adapter *adapter) +{ + return pci_num_vf(adapter->pdev) ? false : true; +} + +/* Wait for the FW to be ready and perform the required initialization */ +static int be_func_init(struct be_adapter *adapter) +{ + int status; + + status = be_fw_wait_ready(adapter); + if (status) + return status; + + if (be_reset_required(adapter)) { + status = be_cmd_reset_function(adapter); + if (status) + return status; + + /* Wait for interrupts to quiesce after an FLR */ + msleep(100); + + /* We can clear all errors when function reset succeeds */ + be_clear_all_error(adapter); + } + + /* Tell FW we're ready to fire cmds */ + status = be_cmd_fw_init(adapter); + if (status) + return status; + + /* Allow interrupts for other ULPs running on NIC function */ + be_intr_set(adapter, true); + + return 0; +} + static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; int status; + status = be_func_init(adapter); + if (status) + return status; + be_setup_init(adapter); if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); + if (!BE2_chip(adapter) && be_physfn(adapter)) + be_alloc_sriov_res(adapter); + status = be_get_config(adapter); if (status) goto err; @@ -3879,8 +4079,6 @@ static int be_setup(struct be_adapter *adapter) be_set_rx_mode(adapter->netdev); - be_cmd_get_acpi_wol_cap(adapter); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) @@ -4790,6 +4988,142 @@ static void be_netdev_init(struct net_device *netdev) netdev->ethtool_ops = &be_ethtool_ops; } +static void be_cleanup(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) + be_close(netdev); + rtnl_unlock(); + + be_clear(adapter); +} + +static int be_resume(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + status = be_setup(adapter); + if (status) + return status; + + if (netif_running(netdev)) { + status = be_open(netdev); + if (status) + return status; + } + + netif_device_attach(netdev); + + return 0; +} + +static int be_err_recover(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + status = be_resume(adapter); + if (status) + goto err; + + dev_info(dev, "Adapter recovery successful\n"); + return 0; +err: + if (be_physfn(adapter)) + dev_err(dev, "Adapter recovery failed\n"); + else + dev_err(dev, "Re-trying adapter recovery\n"); + + return status; +} + +static void be_err_detection_task(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, + be_err_detection_work.work); + int status = 0; + + be_detect_error(adapter); + + if (adapter->hw_error) { + be_cleanup(adapter); + + /* As of now error recovery support is in Lancer only */ + if (lancer_chip(adapter)) + status = be_err_recover(adapter); + } + + /* Always attempt recovery on VFs */ + if (!status || be_virtfn(adapter)) + be_schedule_err_detection(adapter); +} + +static void be_log_sfp_info(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_query_sfp_info(adapter); + if (!status) { + dev_err(&adapter->pdev->dev, + "Unqualified SFP+ detected on %c from %s part no: %s", + adapter->port_name, adapter->phy.vendor_name, + adapter->phy.vendor_pn); + } + adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; +} + +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + struct be_rx_obj *rxo; + int i; + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions + */ + if (!netif_running(adapter->netdev)) { + local_bh_disable(); + be_process_mcc(adapter); + local_bh_enable(); + goto reschedule; + } + + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } + + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + + for_all_rx_queues(adapter, rxo, i) { + /* Replenish RX-queues starved due to memory + * allocation failures. + */ + if (rxo->rx_post_starved) + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); + } + + be_eqd_update(adapter); + + if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + be_log_sfp_info(adapter); + +reschedule: + adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + static void be_unmap_pci_bars(struct be_adapter *adapter) { if (adapter->csr) @@ -4821,6 +5155,12 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { u8 __iomem *addr; + u32 sli_intf; + + pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); + adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> + SLI_INTF_FAMILY_SHIFT; + adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; if (BEx_chip(adapter) && be_physfn(adapter)) { adapter->csr = pci_iomap(adapter->pdev, 2, 0); @@ -4842,109 +5182,93 @@ pci_map_err: return -ENOMEM; } -static void be_ctrl_cleanup(struct be_adapter *adapter) +static void be_drv_cleanup(struct be_adapter *adapter) { struct be_dma_mem *mem = &adapter->mbox_mem_alloced; - - be_unmap_pci_bars(adapter); + struct device *dev = &adapter->pdev->dev; if (mem->va) - dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, - mem->dma); + dma_free_coherent(dev, mem->size, mem->va, mem->dma); mem = &adapter->rx_filter; if (mem->va) - dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, - mem->dma); + dma_free_coherent(dev, mem->size, mem->va, mem->dma); + + mem = &adapter->stats_cmd; + if (mem->va) + dma_free_coherent(dev, mem->size, mem->va, mem->dma); } -static int be_ctrl_init(struct be_adapter *adapter) +/* Allocate and initialize various fields in be_adapter struct */ +static int be_drv_init(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; struct be_dma_mem *rx_filter = &adapter->rx_filter; - u32 sli_intf; - int status; - - pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); - adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> - SLI_INTF_FAMILY_SHIFT; - adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; - - status = be_map_pci_bars(adapter); - if (status) - goto done; + struct be_dma_mem *stats_cmd = &adapter->stats_cmd; + struct device *dev = &adapter->pdev->dev; + int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, - mbox_mem_alloc->size, + mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); - if (!mbox_mem_alloc->va) { - status = -ENOMEM; - goto unmap_pci_bars; - } + if (!mbox_mem_alloc->va) + return -ENOMEM; + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); - rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev, - rx_filter->size, &rx_filter->dma, - GFP_KERNEL); + rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, + &rx_filter->dma, GFP_KERNEL); if (!rx_filter->va) { status = -ENOMEM; goto free_mbox; } + if (lancer_chip(adapter)) + stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats); + else if (BE2_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0); + else if (BE3_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + else + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, + &stats_cmd->dma, GFP_KERNEL); + if (!stats_cmd->va) { + status = -ENOMEM; + goto free_rx_filter; + } + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); - init_completion(&adapter->et_cmd_compl); - pci_save_state(adapter->pdev); - return 0; -free_mbox: - dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, - mbox_mem_alloc->va, mbox_mem_alloc->dma); - -unmap_pci_bars: - be_unmap_pci_bars(adapter); - -done: - return status; -} - -static void be_stats_cleanup(struct be_adapter *adapter) -{ - struct be_dma_mem *cmd = &adapter->stats_cmd; + pci_save_state(adapter->pdev); - if (cmd->va) - dma_free_coherent(&adapter->pdev->dev, cmd->size, - cmd->va, cmd->dma); -} + INIT_DELAYED_WORK(&adapter->work, be_worker); + INIT_DELAYED_WORK(&adapter->be_err_detection_work, + be_err_detection_task); -static int be_stats_init(struct be_adapter *adapter) -{ - struct be_dma_mem *cmd = &adapter->stats_cmd; + adapter->rx_fc = true; + adapter->tx_fc = true; - if (lancer_chip(adapter)) - cmd->size = sizeof(struct lancer_cmd_req_pport_stats); - else if (BE2_chip(adapter)) - cmd->size = sizeof(struct be_cmd_req_get_stats_v0); - else if (BE3_chip(adapter)) - cmd->size = sizeof(struct be_cmd_req_get_stats_v1); - else - /* ALL non-BE ASICs */ - cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + /* Must be a power of 2 or else MODULO will BUG_ON */ + adapter->be_get_temp_freq = 64; - cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, - GFP_KERNEL); - if (!cmd->va) - return -ENOMEM; return 0; + +free_rx_filter: + dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma); +free_mbox: + dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va, + mbox_mem_alloc->dma); + return status; } static void be_remove(struct pci_dev *pdev) @@ -4957,7 +5281,7 @@ static void be_remove(struct pci_dev *pdev) be_roce_dev_remove(adapter); be_intr_set(adapter, false); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); unregister_netdev(adapter->netdev); @@ -4966,9 +5290,8 @@ static void be_remove(struct pci_dev *pdev) /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); - be_stats_cleanup(adapter); - - be_ctrl_cleanup(adapter); + be_unmap_pci_bars(adapter); + be_drv_cleanup(adapter); pci_disable_pcie_error_reporting(pdev); @@ -4978,156 +5301,6 @@ static void be_remove(struct pci_dev *pdev) free_netdev(adapter->netdev); } -static int be_get_initial_config(struct be_adapter *adapter) -{ - int status, level; - - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - - /* Must be a power of 2 or else MODULO will BUG_ON */ - adapter->be_get_temp_freq = 64; - - if (BEx_chip(adapter)) { - level = be_cmd_get_fw_log_level(adapter); - adapter->msg_enable = - level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; - } - - adapter->cfg_num_qs = netif_get_num_default_rss_queues(); - return 0; -} - -static int lancer_recover_func(struct be_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - int status; - - status = lancer_test_and_set_rdy_state(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) - be_close(adapter->netdev); - - be_clear(adapter); - - be_clear_all_error(adapter); - - status = be_setup(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) { - status = be_open(adapter->netdev); - if (status) - goto err; - } - - dev_err(dev, "Adapter recovery successful\n"); - return 0; -err: - if (status == -EAGAIN) - dev_err(dev, "Waiting for resource provisioning\n"); - else - dev_err(dev, "Adapter recovery failed\n"); - - return status; -} - -static void be_func_recovery_task(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, func_recovery_work.work); - int status = 0; - - be_detect_error(adapter); - - if (adapter->hw_error && lancer_chip(adapter)) { - rtnl_lock(); - netif_device_detach(adapter->netdev); - rtnl_unlock(); - - status = lancer_recover_func(adapter); - if (!status) - netif_device_attach(adapter->netdev); - } - - /* In Lancer, for all errors other than provisioning error (-EAGAIN), - * no need to attempt further recovery. - */ - if (!status || status == -EAGAIN) - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); -} - -static void be_log_sfp_info(struct be_adapter *adapter) -{ - int status; - - status = be_cmd_query_sfp_info(adapter); - if (!status) { - dev_err(&adapter->pdev->dev, - "Unqualified SFP+ detected on %c from %s part no: %s", - adapter->port_name, adapter->phy.vendor_name, - adapter->phy.vendor_pn); - } - adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; -} - -static void be_worker(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, work.work); - struct be_rx_obj *rxo; - int i; - - /* when interrupts are not yet enabled, just reap any pending - * mcc completions */ - if (!netif_running(adapter->netdev)) { - local_bh_disable(); - be_process_mcc(adapter); - local_bh_enable(); - goto reschedule; - } - - if (!adapter->stats_cmd_sent) { - if (lancer_chip(adapter)) - lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); - else - be_cmd_get_stats(adapter, &adapter->stats_cmd); - } - - if (be_physfn(adapter) && - MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) - be_cmd_get_die_temperature(adapter); - - for_all_rx_queues(adapter, rxo, i) { - /* Replenish RX-queues starved due to memory - * allocation failures. - */ - if (rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); - } - - be_eqd_update(adapter); - - if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) - be_log_sfp_info(adapter); - -reschedule: - adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); -} - -/* If any VFs are already enabled don't FLR the PF */ -static bool be_reset_required(struct be_adapter *adapter) -{ - return pci_num_vf(adapter->pdev) ? false : true; -} - static char *mc_name(struct be_adapter *adapter) { char *str = ""; /* default */ @@ -5226,50 +5399,17 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) if (!status) dev_info(&pdev->dev, "PCIe error reporting enabled\n"); - status = be_ctrl_init(adapter); + status = be_map_pci_bars(adapter); if (status) goto free_netdev; - /* sync up with fw's ready state */ - if (be_physfn(adapter)) { - status = be_fw_wait_ready(adapter); - if (status) - goto ctrl_clean; - } - - if (be_reset_required(adapter)) { - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - - /* Wait for interrupts to quiesce after an FLR */ - msleep(100); - } - - /* Allow interrupts for other ULPs running on NIC function */ - be_intr_set(adapter, true); - - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); - if (status) - goto ctrl_clean; - - status = be_stats_init(adapter); + status = be_drv_init(adapter); if (status) - goto ctrl_clean; - - status = be_get_initial_config(adapter); - if (status) - goto stats_clean; - - INIT_DELAYED_WORK(&adapter->work, be_worker); - INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); - adapter->rx_fc = true; - adapter->tx_fc = true; + goto unmap_bars; status = be_setup(adapter); if (status) - goto stats_clean; + goto drv_cleanup; be_netdev_init(netdev); status = register_netdev(netdev); @@ -5278,8 +5418,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) be_roce_dev_add(adapter); - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); + be_schedule_err_detection(adapter); dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), func_name(adapter), mc_name(adapter), adapter->port_name); @@ -5288,10 +5427,10 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) unsetup: be_clear(adapter); -stats_clean: - be_stats_cleanup(adapter); -ctrl_clean: - be_ctrl_cleanup(adapter); +drv_cleanup: + be_drv_cleanup(adapter); +unmap_bars: + be_unmap_pci_bars(adapter); free_netdev: free_netdev(netdev); rel_reg: @@ -5306,21 +5445,14 @@ do_none: static int be_suspend(struct pci_dev *pdev, pm_message_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; if (adapter->wol_en) be_setup_wol(adapter, true); be_intr_set(adapter, false); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); - netif_device_detach(netdev); - if (netif_running(netdev)) { - rtnl_lock(); - be_close(netdev); - rtnl_unlock(); - } - be_clear(adapter); + be_cleanup(adapter); pci_save_state(pdev); pci_disable_device(pdev); @@ -5328,13 +5460,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int be_resume(struct pci_dev *pdev) +static int be_pci_resume(struct pci_dev *pdev) { - int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); + int status = 0; status = pci_enable_device(pdev); if (status) @@ -5343,30 +5472,11 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - status = be_fw_wait_ready(adapter); + status = be_resume(adapter); if (status) return status; - status = be_cmd_reset_function(adapter); - if (status) - return status; - - be_intr_set(adapter, true); - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); - if (status) - return status; - - be_setup(adapter); - if (netif_running(netdev)) { - rtnl_lock(); - be_open(netdev); - rtnl_unlock(); - } - - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); - netif_device_attach(netdev); + be_schedule_err_detection(adapter); if (adapter->wol_en) be_setup_wol(adapter, false); @@ -5386,7 +5496,7 @@ static void be_shutdown(struct pci_dev *pdev) be_roce_dev_shutdown(adapter); cancel_delayed_work_sync(&adapter->work); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); netif_device_detach(adapter->netdev); @@ -5399,22 +5509,15 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; dev_err(&adapter->pdev->dev, "EEH error detected\n"); if (!adapter->eeh_error) { adapter->eeh_error = true; - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); - rtnl_lock(); - netif_device_detach(netdev); - if (netif_running(netdev)) - be_close(netdev); - rtnl_unlock(); - - be_clear(adapter); + be_cleanup(adapter); } if (state == pci_channel_io_perm_failure) @@ -5465,43 +5568,73 @@ static void be_eeh_resume(struct pci_dev *pdev) { int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; dev_info(&adapter->pdev->dev, "EEH resume\n"); pci_save_state(pdev); - status = be_cmd_reset_function(adapter); + status = be_resume(adapter); if (status) goto err; - /* On some BE3 FW versions, after a HW reset, - * interrupts will remain disabled for each function. - * So, explicitly enable interrupts + be_schedule_err_detection(adapter); + return; +err: + dev_err(&adapter->pdev->dev, "EEH resume failed\n"); +} + +static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + u16 num_vf_qs; + int status; + + if (!num_vfs) + be_vf_clear(adapter); + + adapter->num_vfs = num_vfs; + + if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot disable VFs while they are assigned\n"); + return -EBUSY; + } + + /* When the HW is in SRIOV capable configuration, the PF-pool resources + * are equally distributed across the max-number of VFs. The user may + * request only a subset of the max-vfs to be enabled. + * Based on num_vfs, redistribute the resources across num_vfs so that + * each VF will have access to more number of resources. + * This facility is not available in BE3 FW. + * Also, this is done by FW in Lancer chip. */ - be_intr_set(adapter, true); + if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) { + num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs); + status = be_cmd_set_sriov_config(adapter, adapter->pool_res, + adapter->num_vfs, num_vf_qs); + if (status) + dev_err(&pdev->dev, + "Failed to optimize SR-IOV resources\n"); + } - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); + status = be_get_resources(adapter); if (status) - goto err; + return be_cmd_status(status); - status = be_setup(adapter); + /* Updating real_num_tx/rx_queues() requires rtnl_lock() */ + rtnl_lock(); + status = be_update_queues(adapter); + rtnl_unlock(); if (status) - goto err; + return be_cmd_status(status); - if (netif_running(netdev)) { - status = be_open(netdev); - if (status) - goto err; - } + if (adapter->num_vfs) + status = be_vf_setup(adapter); - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); - netif_device_attach(netdev); - return; -err: - dev_err(&adapter->pdev->dev, "EEH resume failed\n"); + if (!status) + return adapter->num_vfs; + + return 0; } static const struct pci_error_handlers be_eeh_handlers = { @@ -5516,8 +5649,9 @@ static struct pci_driver be_driver = { .probe = be_probe, .remove = be_remove, .suspend = be_suspend, - .resume = be_resume, + .resume = be_pci_resume, .shutdown = be_shutdown, + .sriov_configure = be_pci_sriov_configure, .err_handler = &be_eeh_handlers }; @@ -5531,6 +5665,11 @@ static int __init be_init_module(void) rx_frag_size = 2048; } + if (num_vfs > 0) { + pr_info(DRV_NAME " : Module param num_vfs is obsolete."); + pr_info(DRV_NAME " : Use sysfs method to enable VFs\n"); + } + return pci_register_driver(&be_driver); } module_init(be_init_module); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 1f9cf23..4585895 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -136,7 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) */ writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); - /* It is recommended to doulbe check the TMODE field in the + /* It is recommended to double check the TMODE field in the * TCSR register to be cleared before the first compare counter * is written into TCCR register. Just add a double check. */ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index e9c3a87..05f8839 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -414,7 +414,7 @@ enum cb_status { /** * cb_command - Command Block flags - * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory */ enum cb_command { cb_nop = 0x0000, diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 7f997d3..b548ef0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count); @@ -516,6 +521,7 @@ void e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; u32 rctl, tctl; + netif_carrier_off(netdev); /* disable receives in the hardware */ rctl = er32(RCTL); @@ -544,7 +550,6 @@ void e1000_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; - netif_carrier_off(netdev); e1000_reset(adapter); e1000_clean_all_tx_rings(adapter); @@ -1111,7 +1116,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (e1000_read_mac_addr(hw)) e_err(probe, "EEPROM Read Error\n"); } - /* don't block initalization here due to bad MAC address */ + /* don't block initialization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) @@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) msleep(1); /* e1000_down has a dependency on max_frame_size */ hw->max_frame_size = max_frame; - if (netif_running(netdev)) + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; e1000_down(adapter); + } /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index bb7ab3c..0570c66 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -141,6 +141,7 @@ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 9416e5a..a69f09e 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -132,6 +132,7 @@ enum e1000_boards { board_pchlan, board_pch2lan, board_pch_lpt, + board_pch_spt }; struct e1000_ps_page { @@ -501,6 +502,7 @@ extern const struct e1000_info e1000_ich10_info; extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 865ce45..11f486e 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: mask |= (1 << 18); break; default: break; } - if (mac->type == e1000_pch_lpt) + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 72f5475..19e8c48 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -87,6 +87,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_I218_V2 0x15A1 #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ #define E1000_REVISION_4 4 @@ -108,6 +112,7 @@ enum e1000_mac_type { e1000_pchlan, e1000_pch2lan, e1000_pch_lpt, + e1000_pch_spt, }; enum e1000_media_type { @@ -153,6 +158,7 @@ enum e1000_bus_width { e1000_bus_width_pcie_x1, e1000_bus_width_pcie_x2, e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, e1000_bus_width_32, e1000_bus_width_64, e1000_bus_width_reserved diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 48b74a5..9d81c03 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); @@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Unforce SMBus mode in PHY */ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; @@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -590,35 +601,54 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; - - /* Can't read flash registers if the register set isn't mapped. */ - if (!hw->flash_address) { - e_dbg("ERROR: Flash registers not mapped\n"); - return -E1000_ERR_CONFIG; - } + u32 nvm_size; nvm->type = e1000_nvm_flash_sw; - gfpreg = er32flash(ICH_FLASH_GFPREG); + if (hw->mac.type == e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } - /* sector_X_addr is a "sector"-aligned address (4096 bytes) - * Add 1 to sector_end_addr since this sector is included in - * the overall size. - */ - sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; - sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + gfpreg = er32flash(ICH_FLASH_GFPREG); - /* flash_base_addr is byte-aligned */ - nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; - /* find total size of the NVM, then cut in half since the total - * size represents two separate NVM banks. - */ - nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT); - nvm->flash_bank_size /= 2; - /* Adjust to word count */ - nvm->flash_bank_size /= sizeof(u16); + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; @@ -682,6 +712,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) mac->ops.rar_set = e1000_rar_set_pch2lan; /* fall-through */ case e1000_pch_lpt: + case e1000_pch_spt: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -699,7 +730,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = @@ -919,8 +950,9 @@ release: /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; - if (!link || ((status & E1000_STATUS_SPEED_100) && - (status & E1000_STATUS_FD))) + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); @@ -1100,6 +1132,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + if (ret_val) + goto release; + } + /* Force SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) @@ -1302,7 +1349,8 @@ out: static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; bool link; u16 phy_reg; @@ -1333,48 +1381,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * the IPG and reduce Rx latency in the PHY. */ if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt)) && link) { + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { u32 reg; reg = er32(STATUS); + tipg_reg = er32(TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { - u16 emi_addr; + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else { - reg = er32(TIPG); - reg &= ~E1000_TIPG_IPGT_MASK; - reg |= 0xFF; - ew32(TIPG, reg); + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } - /* Reduce Rx latency in analog PHY */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + ew32(TIPG, tipg_reg); - if (hw->mac.type == e1000_pch2lan) - emi_addr = I82579_RX_CONFIG; - else - emi_addr = I217_RX_CONFIG; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; - ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); - hw->phy.ops.release(hw); + hw->phy.ops.release(hw); - if (ret_val) - return ret_val; - } + if (ret_val) + return ret_val; } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } - - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ @@ -1386,6 +1441,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = er32(PCIEANACFG); + u32 fextnvm6 = er32(FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + ew32(FEXTNVM6, fextnvm6); + } + if (!link) return 0; /* No link detected */ @@ -1479,6 +1547,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -1929,6 +1998,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -2961,6 +3031,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) s32 ret_val; switch (hw->mac.type) { + /* In SPT, read from the CTRL_EXT reg instead of + * accessing the sector valid bits from the nvm + */ + case e1000_pch_spt: + *bank = er32(CTRL_EXT) + & E1000_CTRL_EXT_NVMVS; + if ((*bank == 0) || (*bank == 1)) { + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } else { + *bank = *bank - 2; + return 0; + } + break; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); @@ -3008,6 +3092,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) } /** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = + dev_spec->shadow_ram[offset + i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset + i].modified) || + !(dev_spec->shadow_ram[offset + i + 1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset + i].modified) + data[i] = + dev_spec->shadow_ram[offset + i].value; + else + data[i] = (u16)(dword & 0xFFFF); + if (dev_spec->shadow_ram[offset + i].modified) + data[i + 1] = + dev_spec->shadow_ram[offset + i + 1].value; + else + data[i + 1] = (u16)(dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. @@ -3090,8 +3267,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or @@ -3107,7 +3286,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { s32 i; @@ -3128,7 +3310,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { e_dbg("Flash controller busy, cannot get access\n"); } @@ -3151,9 +3337,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { @@ -3170,6 +3363,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) } /** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + /* Must convert word offset into bytes. */ + offset <<= 1; + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location @@ -3201,7 +3411,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u16 word = 0; - ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) return ret_val; @@ -3287,6 +3504,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ + +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + *data = er32flash(ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. @@ -3321,7 +3614,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, } /** - * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * e1000_update_nvm_checksum_spt - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, @@ -3331,13 +3624,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ -static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; - u16 data; + u32 dword = 0; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) @@ -3371,12 +3664,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; } - - for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { /* Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + + /* Write the data to the new bank. Offset in words */ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword */ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword */ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { @@ -3498,6 +3954,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -3583,9 +4040,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (size < 1 || size > 2 || data > size * 0xff || - offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); @@ -3596,12 +4057,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); @@ -3640,6 +4114,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ew32flash(ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. @@ -3656,6 +4214,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + /* Must convert word offset into bytes. */ + offset <<= 1; + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. @@ -3759,9 +4351,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash @@ -4180,7 +4781,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); @@ -4583,7 +5185,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || - (device_id == E1000_DEV_ID_PCH_I218_V3)) { + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); @@ -5058,6 +5661,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = { .write = e1000_write_nvm_ich8lan, }; +static const struct e1000_nvm_operations spt_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .read = e1000_read_nvm_spt, + .update = e1000_update_nvm_checksum_spt, + .reload = e1000e_reload_nvm_generic, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + const struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL @@ -5166,3 +5780,23 @@ const struct e1000_info e1000_pch_lpt_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +const struct e1000_info e1000_pch_spt_info = { + .mac = e1000_pch_spt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9018, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 8066a49..770a573 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -95,9 +95,18 @@ #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1e8c40f..4be4576 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, + [board_pch_spt] = &e1000_pch_spt_info, }; struct e1000_reg_info { @@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); @@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TCTL, tctl); hw->mac.ops.config_collision_dist(hw); + + /* SPT Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + u32 reg_val; + + reg_val = er32(IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + ew32(IOSFPC, reg_val); + + reg_val = er32(TARC(0)); + reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + ew32(TARC(0), reg_val); + } } /** @@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) struct e1000_hw *hw = &adapter->hw; u32 incvalue, incperiod, shift; - /* Make sure clock is enabled on I217 before checking the frequency */ - if ((hw->mac.type == e1000_pch_lpt) && + /* Make sure clock is enabled on I217/I218/I219 before checking + * the frequency + */ + if (((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); @@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - /* On I217, the clock frequency is 25MHz or 96MHz as - * indicated by the System Clock Frequency Indication + case e1000_pch_spt: + /* On I217, I218 and I219, the clock frequency is 25MHz + * or 96MHz as indicated by the System Clock Frequency + * Indication */ - if ((hw->mac.type != e1000_pch_lpt) || + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; @@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter) break; case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: fc->refresh_time = 0x0400; if (adapter->netdev->mtu <= ETH_DATA_LEN) { @@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. @@ -6807,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; if ((adapter->flags & FLAG_HAS_FLASH) && - (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && + (hw->mac.type < e1000_pch_spt)) { flash_start = pci_resource_start(pdev, 1); flash_len = pci_resource_len(pdev, 1); adapter->hw.flash_address = ioremap(flash_start, flash_len); @@ -7043,7 +7070,7 @@ err_hw_init: kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: - if (adapter->hw.flash_address) + if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); e1000e_reset_interrupt_capability(adapter); err_flashmap: @@ -7116,7 +7143,8 @@ static void e1000_remove(struct pci_dev *pdev) kfree(adapter->rx_ring); iounmap(adapter->hw.hw_addr); - if (adapter->hw.flash_address) + if ((adapter->hw.flash_address) && + (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); @@ -7213,6 +7241,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 978ef9c..1490f1e 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -221,7 +221,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - if ((hw->mac.type != e1000_pch_lpt) || + case e1000_pch_spt: + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index ea235bb..85eefc4 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,6 +38,7 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ @@ -67,6 +68,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ @@ -121,6 +123,7 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 42eb434..59edfd4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -439,6 +439,7 @@ extern char fm10k_driver_name[]; extern const char fm10k_driver_version[]; int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); +__be16 fm10k_tx_encap_offload(struct sk_buff *skb); netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring); void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index bf19dcc..6cfae6a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, /* Retrieve RX Owner Data */ id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); - /* Process RX Ring*/ + /* Process RX Ring */ do { rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), &q->rx_drops); @@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, * Function invalidates the index values for the queues so any updates that * may have happened are ignored and the base for the queue stats is reset. **/ - void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) { u32 i; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 651f53b..33b6106 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1019,7 +1019,7 @@ static int fm10k_set_channels(struct net_device *dev, } static int fm10k_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct ethtool_ts_info *info) { struct fm10k_intfc *interface = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 0601908..a02308f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) if (vf_idx >= iov_data->num_vfs) return FM10K_ERR_PARAM; - /* determine if an update has occured and if so notify the VF */ + /* determine if an update has occurred and if so notify the VF */ vf_info = &iov_data->vf_info[vf_idx]; if (vf_info->sw_vid != pvid) { vf_info->sw_vid = pvid; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 84ab9ee..c325bc0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) return NULL; - /* verify protocol is transparent Ethernet bridging */ - if (nvgre_hdr->proto != htons(ETH_P_TEB)) - return NULL; - /* report start of ethernet header */ if (nvgre_hdr->flags & NVGRE_TNI) return (struct ethhdr *)(nvgre_hdr + 1); @@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) return (struct ethhdr *)(&nvgre_hdr->tni); } -static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) +__be16 fm10k_tx_encap_offload(struct sk_buff *skb) { + u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; struct ethhdr *eth_hdr; - u8 l4_hdr = 0; -/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */ -#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164 - if (skb_inner_transport_header(skb) - skb_mac_header(skb) > - FM10K_MAX_ENCAP_TRANSPORT_OFFSET) + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) return 0; switch (vlan_get_protocol(skb)) { @@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) switch (eth_hdr->h_proto) { case htons(ETH_P_IP): + inner_l4_hdr = inner_ip_hdr(skb)->protocol; + break; case htons(ETH_P_IPV6): + inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; break; default: return 0; } + switch (inner_l4_hdr) { + case IPPROTO_TCP: + inner_l4_hlen = inner_tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + inner_l4_hlen = 8; + break; + default: + return 0; + } + + /* The hardware allows tunnel offloads only if the combined inner and + * outer header is 184 bytes or less + */ + if (skb_inner_transport_header(skb) + inner_l4_hlen - + skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) + return 0; + return eth_hdr->h_proto; } @@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ smp_mb(); - /* We need to check again in a case another CPU has just - * made room available. */ + /* Check again in a case another CPU has just made room available */ if (likely(fm10k_desc_unused(tx_ring) < size)) return -EBUSY; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 9f5457c..14ee696 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indicies into the fifo based on head + offset + * This function returns the indices into the fifo based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indicies into the fifo based on tail + offset + * This function returns the indices into the fifo based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem * @mbx: pointer to mailbox * - * This function will take a seciton of the Rx FIFO and copy it into the + * This function will take a section of the Rx FIFO and copy it into the mbx->tail--; * mailbox memory. The offset in mbmem is based on the lower bits of the * tail and len determines the length to copy. @@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw, * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function will take a seciton of the mailbox memory and copy it + * This function will take a section of the mailbox memory and copy it * into the Rx FIFO. The offset is based on the lower bits of the * head and len determines the length to copy. **/ @@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, * @tail: tail index of message * * This function will first validate the tail index and size for the - * incoming message. It then updates the acknowlegment number and + * incoming message. It then updates the acknowledgment number and * copies the data into the FIFO. It will return the number of messages * dequeued on success and a negative value on error. **/ @@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, err = fm10k_fifo_enqueue(&mbx->tx, msg); } - /* if we failed trhead the error */ + /* if we failed treat the error */ if (err) { mbx->timeout = 0; mbx->tx_busy++; @@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) { u32 mbmem = mbx->mbmem_reg; - /* write new msg header to notify recepient of change */ + /* write new msg header to notify recipient of change */ fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); /* write mailbox to sent interrupt */ @@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, /* we will need to pull all of the fields for verification */ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); - /* we only have lower 10 bits of error number os add upper bits */ + /* we only have lower 10 bits of error number so add upper bits */ err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); @@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, mbx->timeout = 0; mbx->udelay = FM10K_MBX_INIT_DELAY; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; @@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) mbx->local = FM10K_SM_MBX_VERSION; mbx->remote = 0; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index cfde8ba..d5b303d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) * fm10k_request_glort_range - Request GLORTs for use in configuring rules * @interface: board private structure * - * This function allocates a range of glorts for this inteface to use. + * This function allocates a range of glorts for this interface to use. **/ static void fm10k_request_glort_range(struct fm10k_intfc *interface) { @@ -781,7 +781,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) fm10k_mbx_lock(interface); - /* only need to update the VLAN if not in promiscous mode */ + /* only need to update the VLAN if not in promiscuous mode */ if (!(netdev->flags & IFF_PROMISC)) { err = hw->mac.ops.update_vlan(hw, vid, 0, set); if (err) @@ -970,7 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev) fm10k_mbx_lock(interface); - /* syncronize all of the addresses */ + /* synchronize all of the addresses */ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) @@ -1051,7 +1051,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid, true, 0); } - /* syncronize all of the addresses */ + /* synchronize all of the addresses */ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) @@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) } } +static netdev_features_t fm10k_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) + return features; + + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); +} + static const struct net_device_ops fm10k_netdev_ops = { .ndo_open = fm10k_open, .ndo_stop = fm10k_close, @@ -1372,6 +1382,7 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_do_ioctl = fm10k_ioctl, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, + .ndo_features_check = fm10k_features_check, }; #define DEFAULT_DEBUG_LEVEL_SHIFT 3 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4f5892c..8978d55 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -648,7 +648,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Configure the Rx buffer size for one buff without split */ srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; - /* Configure the Rx ring to supress loopback packets */ + /* Configure the Rx ring to suppress loopback packets */ srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7e47119..159cd84 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) vid = (vid << 17) >> 17; /* verify the reserved 0 fields are 0 */ - if (len >= FM10K_VLAN_TABLE_VID_MAX || - vid >= FM10K_VLAN_TABLE_VID_MAX) + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* Loop through the table updating all required VLANs */ @@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_xc_addr_pf - Update device addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -356,7 +355,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_uc_addr_pf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -454,7 +453,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) break; } - /* always reset VFITR2[0] to point to last enabled PF vector*/ + /* always reset VFITR2[0] to point to last enabled PF vector */ fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); /* reset ITR2[0] to point to last enabled PF vector */ @@ -812,7 +811,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -951,7 +950,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, if (vf_info->mbx.ops.disconnect) vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -1035,7 +1034,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, ((u32)vf_info->mac[2]); } - /* map queue pairs back to VF from last to first*/ + /* map queue pairs back to VF from last to first */ for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); @@ -1141,7 +1140,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, * * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1160,7 +1159,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, * * This function is a default handler for MAC/VLAN requests from the VF. * The assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1404,7 +1403,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, &stats->vlan_drop); loopback_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1573,7 +1572,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) s32 ret_val = 0; u32 dma_ctrl2; - /* verify the switch is ready for interraction */ + /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) goto out; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index fd0a05f..9b29d7b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) /** * fm10k_tlv_msg_test - Validate all results on test message receive * @hw: Pointer to hardware structure - * @results: Pointer array to attributes in the mesage + * @results: Pointer array to attributes in the message * @mbx: Pointer to mailbox information structure * * This function does a check to verify all attributes match what the test diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 7c6d9d5..4af9668 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -356,6 +356,9 @@ struct fm10k_hw; #define FM10K_QUEUE_DISABLE_TIMEOUT 100 #define FM10K_RESET_TIMEOUT 150 +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + /* VF registers */ #define FM10K_VFCTRL 0x00000 #define FM10K_VFCTRL_RST 0x00000008 @@ -593,7 +596,7 @@ struct fm10k_vf_info { u16 sw_vid; /* Switch API assigned VLAN */ u16 pf_vid; /* PF assigned Default VLAN */ u8 mac[ETH_ALEN]; /* PF Default MAC address */ - u8 vsi; /* VSI idenfifier */ + u8 vsi; /* VSI identifier */ u8 vf_idx; /* which VF this is */ u8 vf_flags; /* flags indicating what modes * are supported for the port diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f0aa0f9..1721967 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) if (err) return err; - /* If permenant address is set then we need to restore it */ + /* If permanent address is set then we need to restore it */ if (is_valid_ether_addr(perm_addr)) { bal = (((u32)perm_addr[3]) << 24) | (((u32)perm_addr[4]) << 16) | @@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) * fm10k_reset_hw_vf - VF hardware reset * @hw: pointer to hardware structure * - * This function should return the hardare to a state similar to the + * This function should return the hardware to a state similar to the * one it is in after just being initialized. **/ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) @@ -252,7 +252,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) } /** - * fm10k_update_uc_addr_vf - Update device unicast address + * fm10k_update_uc_addr_vf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -282,7 +282,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -295,7 +295,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_mc_addr_vf - Update device multicast address + * fm10k_update_mc_addr_vf - Update device multicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -319,7 +319,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, if (!is_multicast_ether_addr(mac)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -515,7 +515,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) * @hw: pointer to the hardware structure * * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanosecods. In order to guarantee the value is accurate + * value measured in nanoseconds. In order to guarantee the value is accurate * we check the 32 most significant bits both before and after reading the * 32 least significant bits to verify they didn't change as we were reading * the registers. diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index c405819..b4729ba 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2b65cdc..1c8bd7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,6 +36,7 @@ #include <linux/aer.h> #include <linux/netdevice.h> #include <linux/ioport.h> +#include <linux/iommu.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> @@ -49,6 +50,7 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -70,6 +72,7 @@ #define I40E_MAX_NUM_DESCRIPTORS 4096 #define I40E_MAX_REGISTER 0x800000 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 #define I40E_MIN_NUM_DESCRIPTORS 64 @@ -94,6 +97,9 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) +/* Ethtool Private Flags */ +#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) + #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 @@ -140,6 +146,7 @@ enum i40e_state_t { __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, + __I40E_EMP_RESET_INTR_RECEIVED, __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -168,6 +175,9 @@ struct i40e_lump_tracking { #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 #define I40E_FDIR_BUFFER_FULL_MARGIN 10 #define I40E_FDIR_BUFFER_HEAD_ROOM 32 +#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) + +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -232,17 +242,17 @@ struct i40e_pf { bool fc_autoneg_status; u16 eeprom_version; - u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */ + u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ - u16 num_req_vfs; /* num vfs requested for this vf */ - u16 num_vf_qps; /* num queue pairs per vf */ + u16 num_req_vfs; /* num VFs requested for this VF */ + u16 num_vf_qps; /* num queue pairs per VF */ #ifdef I40E_FCOE - u16 num_fcoe_qps; /* num fcoe queues this pf has set up */ + u16 num_fcoe_qps; /* num fcoe queues this PF has set up */ u16 num_fcoe_msix; /* num queue vectors per fcoe pool */ #endif /* I40E_FCOE */ - u16 num_lan_qps; /* num lan queues this pf has set up */ - u16 num_lan_msix; /* num queue vectors for the base pf vsi */ + u16 num_lan_qps; /* num lan queues this PF has set up */ + u16 num_lan_msix; /* num queue vectors for the base PF vsi */ int queues_left; /* queues left unclaimed */ u16 rss_size; /* num queues in the RSS array */ u16 rss_size_max; /* HW defined max RSS queues */ @@ -269,7 +279,7 @@ struct i40e_pf { enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; - u16 msg_enable; + u32 msg_enable; char int_name[I40E_INT_NAME_STR_LEN]; u16 adminq_work_limit; /* num of admin receive queue desc to process */ unsigned long service_timer_period; @@ -383,6 +393,9 @@ struct i40e_pf { bool ptp_tx; bool ptp_rx; u16 rss_table_size; + /* These are only valid in NPAR modes */ + u32 npar_max_bw; + u32 npar_min_bw; }; struct i40e_mac_filter { @@ -405,6 +418,7 @@ struct i40e_veb { u16 uplink_seid; u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; + u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ u16 flags; u16 bw_limit; u8 bw_max_quanta; @@ -461,6 +475,9 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; + u16 rss_table_size; + u16 rss_size; + u16 max_frame; u16 rx_hdr_len; u16 rx_buf_len; @@ -478,6 +495,7 @@ struct i40e_vsi { u16 base_queue; /* vsi's first queue in hw array */ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ @@ -504,6 +522,9 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* current rxnfc data */ + struct ethtool_rxnfc rxnfc; /* current rss hash opts */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -544,14 +565,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw) static char buf[32]; snprintf(buf, sizeof(buf), - "f%d.%d a%d.%d n%02x.%02x e%08x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + "f%d.%d.%05d a%d.%d n%x.%02x e%x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack); + (hw->nvm.eetrack & 0xffffff)); return buf; } @@ -593,7 +614,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) /** * i40e_get_fd_cnt_all - get the total FD filter space available - * @pf: pointer to the pf struct + * @pf: pointer to the PF struct **/ static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) { @@ -618,9 +639,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add); void i40e_fdir_check_and_reenable(struct i40e_pf *pf); -int i40e_get_current_fd_count(struct i40e_pf *pf); -int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); -int i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_current_fd_count(struct i40e_pf *pf); +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_global_fd_count(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, @@ -680,6 +702,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev, int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid); #endif +int i40e_open(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); @@ -690,7 +713,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE -int i40e_open(struct net_device *netdev); int i40e_close(struct net_device *netdev); int i40e_setup_tc(struct net_device *netdev, u8 tc); void i40e_netpoll(struct net_device *netdev); @@ -712,6 +734,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); #ifdef CONFIG_I40E_DCB void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); void i40e_dcbnl_set_all(struct i40e_vsi *vsi); void i40e_dcbnl_setup(struct i40e_vsi *vsi); @@ -727,4 +750,8 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 77f6254..3e0d200 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) ret_code = i40e_aq_get_firmware_version(hw, &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, + &hw->aq.fw_build, &hw->aq.api_maj_ver, &hw->aq.api_min_ver, NULL); @@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_free_arq; /* get the NVM version info */ - i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index de17b6f..28e519a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aea65d..d9f1fcb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -85,46 +85,53 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -534,7 +541,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure @@ -685,7 +691,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure - * @queue: target pf queue index + * @queue: target PF queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable @@ -827,6 +833,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: @@ -947,7 +955,7 @@ void i40e_clear_hw(struct i40e_hw *hw) u32 val; u32 eol = 0x7ff; - /* get number of interrupts, queues, and vfs */ + /* get number of interrupts, queues, and VFs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; @@ -1076,8 +1084,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) return gpio_val; } -#define I40E_LED0 22 +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_LED0 22 /** * i40e_led_get - return current on/off mode @@ -1090,6 +1101,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) **/ u32 i40e_led_get(struct i40e_hw *hw) { + u32 current_mode = 0; u32 mode = 0; int i; @@ -1102,6 +1114,20 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; @@ -1121,6 +1147,7 @@ u32 i40e_led_get(struct i40e_hw *hw) **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { + u32 current_mode = 0; int i; if (mode & 0xfffffff0) @@ -1135,6 +1162,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & @@ -1298,14 +1339,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -1441,6 +1482,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, else hw_link_info->lse_enable = false; + if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + /* save link status information */ if (link) *link = *hw_link_info; @@ -1453,35 +1498,6 @@ aq_get_link_info_exit: } /** - * i40e_update_link_info - * @hw: pointer to the hw struct - * @enable_lse: enable/disable LinkStatusEvent reporting - * - * Returns the link status of the adapter - **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) -{ - struct i40e_aq_get_phy_abilities_resp abilities; - i40e_status status; - - status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL); - if (status) - return status; - - status = i40e_aq_get_phy_capabilities(hw, false, false, - &abilities, NULL); - if (status) - return status; - - if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED) - hw->phy.link_info.an_enabled = true; - else - hw->phy.link_info.an_enabled = false; - - return status; -} - -/** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set @@ -1760,6 +1776,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version + * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL @@ -1768,6 +1785,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, **/ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { @@ -1781,13 +1799,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - if (fw_major_version != NULL) + if (fw_major_version) *fw_major_version = le16_to_cpu(resp->fw_major); - if (fw_minor_version != NULL) + if (fw_minor_version) *fw_minor_version = le16_to_cpu(resp->fw_minor); - if (api_major_version != NULL) + if (fw_build) + *fw_build = le32_to_cpu(resp->fw_build); + if (api_major_version) *api_major_version = le16_to_cpu(resp->api_major); - if (api_minor_version != NULL) + if (api_minor_version) *api_minor_version = le16_to_cpu(resp->api_minor); } @@ -1817,7 +1837,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI); + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; @@ -1997,7 +2017,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; - buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data); + buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); @@ -2039,7 +2059,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; - buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data); + buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); @@ -2061,7 +2081,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure - * @vfid: vf id to send msg + * @vfid: VF id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer @@ -2106,7 +2126,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, * Read the register using the admin queue commands **/ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, - u32 reg_addr, u64 *reg_val, + u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2117,17 +2137,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, if (reg_val == NULL) return I40E_ERR_PARAM; - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_debug_read_reg); + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = cpu_to_le32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - *reg_val = ((u64)cmd_resp->value_high << 32) | - (u64)cmd_resp->value_low; - *reg_val = le64_to_cpu(*reg_val); + *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | + (u64)le32_to_cpu(cmd_resp->value_low); } return status; @@ -3377,6 +3395,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = cpu_to_le32(reg_addr0); + cmd_resp->address1 = cpu_to_le32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (!status) { + *reg_val0 = le32_to_cpu(cmd_resp->data0); + + if (reg_val1) + *reg_val1 = le32_to_cpu(cmd_resp->data1); + } + + return status; +} + +/** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL @@ -3440,3 +3499,79 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) break; } } + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + i40e_status status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, + cmd_details); + + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 3ce4358..6e14667 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -459,7 +459,7 @@ static void i40e_cee_to_dcb_v1_config( sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add APPs if Error is False and Oper/Sync is True */ - if (!err && sync && oper) { + if (!err) { /* CEE operating configuration supports FCoE/iSCSI/FIP only */ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index a11c70c..400fb28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -223,7 +223,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, /** * i40e_dcbnl_del_app - Delete APP on all VSIs - * @pf: the corresponding pf + * @pf: the corresponding PF * @app: APP to delete * * Delete given APP from all the VSIs for given PF @@ -268,23 +268,22 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, /** * i40e_dcbnl_flush_apps - Delete all removed APPs - * @pf: the corresponding pf + * @pf: the corresponding PF + * @old_cfg: old DCBX configuration data * @new_cfg: new DCBX configuration data * * Find and delete all APPs that are not present in the passed * DCB configuration **/ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { struct i40e_dcb_app_priority_table app; - struct i40e_dcbx_config *dcbxcfg; - struct i40e_hw *hw = &pf->hw; int i; - dcbxcfg = &hw->local_dcbx_config; - for (i = 0; i < dcbxcfg->numapps; i++) { - app = dcbxcfg->app[i]; + for (i = 0; i < old_cfg->numapps; i++) { + app = old_cfg->app[i]; /* The APP is not available anymore delete it */ if (!i40e_dcbnl_find_app(new_cfg, &app)) i40e_dcbnl_del_app(pf, &app); @@ -306,9 +305,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return; - /* Do not setup DCB NL ops for MFP mode */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - dev->dcbnl_ops = &dcbnl_ops; + dev->dcbnl_ops = &dcbnl_ops; /* Set initial IEEE DCB settings */ i40e_dcbnl_set_all(vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c17ee77..daa8826 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -35,7 +35,7 @@ static struct dentry *i40e_dbg_root; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid - * @pf - the pf structure to search for the vsi + * @pf - the PF structure to search for the vsi * @seid - seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) @@ -54,7 +54,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) /** * i40e_dbg_find_veb - searches for the veb with the given seid - * @pf - the pf structure to search for the veb + * @pf - the PF structure to search for the veb * @seid - seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) @@ -112,7 +112,7 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, /** * i40e_dbg_prep_dump_buf - * @pf: the pf we're working with + * @pf: the PF we're working with * @buflen: the desired buffer length * * Return positive if success, 0 if failed @@ -318,7 +318,7 @@ static const struct file_operations i40e_dbg_dump_fops = { * setup, adding or removing filters, or other things. Many of * these will be useful for some forms of unit testing. **************************************************************/ -static char i40e_dbg_command_buf[256] = "hello world"; +static char i40e_dbg_command_buf[256] = ""; /** * i40e_dbg_command_read - read for command datum @@ -390,6 +390,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", vsi->netdev_registered, vsi->current_netdev_flags, vsi->state, vsi->flags); + if (vsi == pf->vsi[pf->lan_vsi]) + dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + pf->hw.mac.addr, + pf->hw.mac.san_addr, + pf->hw.mac.port_addr); list_for_each_entry(f, &vsi->mac_filter_list, list) { dev_info(&pf->pdev->dev, " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", @@ -675,7 +680,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); if (vsi->back) - dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back); + dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); dev_info(&pf->pdev->dev, " tc_config: numtc = %d, enabled_tc = 0x%x\n", @@ -921,9 +926,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, - "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n", + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, - veb->uplink_seid); + veb->uplink_seid, + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); i40e_dbg_dump_eth_stats(pf, &veb->stats); } @@ -945,7 +951,7 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) /** * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR - * @pf: the pf that would be altered + * @pf: the PF that would be altered * @flag: flag that needs enabling or disabling * @enable: Enable/disable FD SD/ATR **/ @@ -957,7 +963,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) pf->flags &= ~flag; pf->auto_disable_flags |= flag; } - dev_info(&pf->pdev->dev, "requesting a pf reset\n"); + dev_info(&pf->pdev->dev, "requesting a PF reset\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } @@ -1487,11 +1493,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); } - } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { - i40e_pf_reset_stats(pf); - dev_info(&pf->pdev->dev, "pf clear stats called\n"); + } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { + if (pf->hw.partition_id == 1) { + i40e_pf_reset_stats(pf); + dev_info(&pf->pdev->dev, "port stats cleared\n"); + } else { + dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); + } } else { - dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); + dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; @@ -1897,7 +1907,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " read <reg>\n"); dev_info(&pf->pdev->dev, " write <reg> <value>\n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); - dev_info(&pf->pdev->dev, " clear_stats pf\n"); + dev_info(&pf->pdev->dev, " clear_stats port\n"); dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); @@ -1935,7 +1945,7 @@ static const struct file_operations i40e_dbg_command_fops = { * The netdev_ops entry in debugfs is for giving the driver commands * to be executed from the netdev operations. **************************************************************/ -static char i40e_dbg_netdev_ops_buf[256] = "hello world"; +static char i40e_dbg_netdev_ops_buf[256] = ""; /** * i40e_dbg_netdev_ops - read for netdev_ops datum @@ -2123,8 +2133,8 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { }; /** - * i40e_dbg_pf_init - setup the debugfs directory for the pf - * @pf: the pf that is starting up + * i40e_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { @@ -2160,8 +2170,8 @@ create_failed: } /** - * i40e_dbg_pf_exit - clear out the pf's debugfs entries - * @pf: the pf that is stopping + * i40e_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping **/ void i40e_dbg_pf_exit(struct i40e_pf *pf) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b8230dc..b7d0aaa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("tx_errors", stats.eth.tx_errors), I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), - I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("crc_errors", stats.crc_errors), I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), @@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { + "NPAR", +}; + +#define I40E_PRIV_FLAGS_STR_LEN \ + (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) + /** * i40e_partition_setting_complaint - generic complaint for MFP restriction * @pf: the PF struct @@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) } /** - * i40e_get_settings - Get Link Speed and Duplex settings + * i40e_get_settings_link_up - Get the Link settings for when link is up + * @hw: hw structure + * @ecmd: ethtool command to fill in * @netdev: network interface device structure - * @ecmd: ethtool command * - * Reports speed/duplex settings based on media_type **/ -static int i40e_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static void i40e_get_settings_link_up(struct i40e_hw *hw, + struct ethtool_cmd *ecmd, + struct net_device *netdev) { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_pf *pf = np->vsi->back; - struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; - bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; u32 link_speed = hw_link_info->link_speed; - /* hardware is either in 40G mode or 10G mode - * NOTE: this section initializes supported and advertising - */ - if (!link_up) { - /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up - */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full; - break; - } - - /* skip phy_type use as it is zero when link is down */ - goto no_valid_phy_type; - } - + /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: @@ -304,6 +257,11 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->advertising = ADVERTISED_Autoneg | ADVERTISED_40000baseCR4_Full; break; + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ecmd->supported = SUPPORTED_40000baseCR4_Full; + break; case I40E_PHY_TYPE_40GBASE_KR4: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_40000baseKR4_Full; @@ -311,8 +269,6 @@ static int i40e_get_settings(struct net_device *netdev, ADVERTISED_40000baseKR4_Full; break; case I40E_PHY_TYPE_40GBASE_SR4: - case I40E_PHY_TYPE_XLPPI: - case I40E_PHY_TYPE_XLAUI: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: @@ -334,55 +290,56 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseKX_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseKX_Full; break; - case I40E_PHY_TYPE_10GBASE_CR1_CU: - case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + ADVERTISED_10000baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_SFI: case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_10GBASE_AOC: ecmd->supported = SUPPORTED_10000baseT_Full; break; - case I40E_PHY_TYPE_1000BASE_KX: - case I40E_PHY_TYPE_1000BASE_T: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; break; default: /* if we got here and link is up something bad is afoot */ @@ -390,8 +347,118 @@ static int i40e_get_settings(struct net_device *netdev, hw_link_info->phy_type); } -no_valid_phy_type: - /* this is if autoneg is enabled or disabled */ + /* Set speed and duplex */ + switch (link_speed) { + case I40E_LINK_SPEED_40GB: + /* need a SPEED_40000 in ethtool.h */ + ethtool_cmd_speed_set(ecmd, 40000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; +} + +/** + * i40e_get_settings_link_down - Get the Link settings for when link is down + * @hw: hw structure + * @ecmd: ethtool command to fill in + * + * Reports link settings that can be determined when link is down + **/ +static void i40e_get_settings_link_down(struct i40e_hw *hw, + struct ethtool_cmd *ecmd) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + + /* link is down and the driver needs to fall back on + * device ID to determine what kinds of info to display, + * it's mostly a guess that may change when link is up + */ + switch (hw->device_id) { + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + /* pluggable QSFP */ + ecmd->supported = SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseLR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseLR4_Full; + break; + case I40E_DEV_ID_KX_B: + /* backplane 40G */ + ecmd->supported = SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_40000baseKR4_Full; + break; + case I40E_DEV_ID_KX_C: + /* backplane 10G */ + ecmd->supported = SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_10000baseKR_Full; + break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + default: + /* all the rest are 10G/1G */ + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + } + + /* With no link speed and duplex are unknown */ + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; +} + +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + + if (link_up) + i40e_get_settings_link_up(hw, ecmd, netdev); + else + i40e_get_settings_link_down(hw, ecmd); + + /* Now set the settings that don't rely on link being up/down */ + + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -424,11 +491,13 @@ no_valid_phy_type: break; } + /* Set transceiver */ ecmd->transceiver = XCVR_EXTERNAL; + /* Set flow control settings */ ecmd->supported |= SUPPORTED_Pause; - switch (hw->fc.current_mode) { + switch (hw->fc.requested_mode) { case I40E_FC_FULL: ecmd->advertising |= ADVERTISED_Pause; break; @@ -445,30 +514,6 @@ no_valid_phy_type: break; } - if (link_up) { - switch (link_speed) { - case I40E_LINK_SPEED_40GB: - /* need a SPEED_40000 in ethtool.h */ - ethtool_cmd_speed_set(ecmd, 40000); - break; - case I40E_LINK_SPEED_10GB: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case I40E_LINK_SPEED_1GB: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case I40E_LINK_SPEED_100MB: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - default: - break; - } - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } - return 0; } @@ -601,6 +646,8 @@ static int i40e_set_settings(struct net_device *netdev, config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; /* set link and auto negotiation so changes take effect */ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* If link is up put link down */ @@ -621,7 +668,7 @@ static int i40e_set_settings(struct net_device *netdev, return -EAGAIN; } - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) netdev_info(netdev, "Updating link info failed with error %d\n", status); @@ -767,7 +814,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", + netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", status, hw->aq.asq_last_status); err = -EAGAIN; } @@ -870,7 +917,9 @@ static int i40e_get_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val) + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || + (hw->debug_mask & I40E_DEBUG_NVM))) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -974,7 +1023,10 @@ static int i40e_set_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && + hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || + (hw->debug_mask & I40E_DEBUG_NVM))) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -998,6 +1050,7 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1176,7 +1229,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_TEST: return I40E_TEST_LEN; case ETH_SS_STATS: - if (vsi == pf->vsi[pf->lan_vsi]) { + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { int len = I40E_PF_STATS_LEN(netdev); if (pf->lan_veb != I40E_NO_VEB) @@ -1185,6 +1238,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } else { return I40E_VSI_STATS_LEN(netdev); } + case ETH_SS_PRIV_FLAGS: + return I40E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1247,7 +1302,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i += 2; } rcu_read_unlock(); - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; if (pf->lan_veb != I40E_NO_VEB) { @@ -1320,7 +1375,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); p += ETH_GSTRING_LEN; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; if (pf->lan_veb != I40E_NO_VEB) { @@ -1358,6 +1413,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + break; } } @@ -1473,6 +1537,7 @@ static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); + bool if_running = netif_running(netdev); struct i40e_pf *pf = np->vsi->back; if (eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -1480,6 +1545,12 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + /* If the device is online then take it offline */ + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); /* Link test performed before hardware reset * so autoneg doesn't interfere with test result @@ -1502,6 +1573,9 @@ static void i40e_diag_test(struct net_device *netdev, clear_bit(__I40E_TESTING, &pf->state); i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + if (if_running) + dev_open(netdev); } else { /* Online tests */ netif_info(pf, drv, netdev, "online testing starting\n"); @@ -1599,6 +1673,8 @@ static int i40e_set_phys_id(struct net_device *netdev, case ETHTOOL_ID_INACTIVE: i40e_led_set(hw, pf->led_status, false); break; + default: + break; } return 0; @@ -1703,6 +1779,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { cmd->data = 0; + if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { + cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; + cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; + return 0; + } /* Report default options for RSS on i40e */ switch (cmd->flow_type) { case TCP_V4_FLOW: @@ -1974,6 +2055,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); + /* Save setting for future output/update */ + pf->vsi[pf->lan_vsi]->rxnfc = *nfc; + return 0; } @@ -2281,10 +2365,6 @@ static int i40e_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ - /* cap RSS limit */ - if (count > pf->rss_size_max) - count = pf->rss_size_max; - /* use rss_reconfig to rebuild with new queue count and update traffic * class queue mapping */ @@ -2295,6 +2375,133 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +/** + * i40e_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_key_size(struct net_device *netdev) +{ + return I40E_HKEY_ARRAY_SIZE; +} + +/** + * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) +{ + return I40E_HLUT_ARRAY_SIZE; +} + +static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HLUT(i)); + indir[j++] = reg_val & 0xff; + indir[j++] = (reg_val >> 8) & 0xff; + indir[j++] = (reg_val >> 16) & 0xff; + indir[j++] = (reg_val >> 24) & 0xff; + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HKEY(i)); + key[j++] = (u8)(reg_val & 0xff); + key[j++] = (u8)((reg_val >> 8) & 0xff); + key[j++] = (u8)((reg_val >> 16) & 0xff); + key[j++] = (u8)((reg_val >> 24) & 0xff); + } + } + return 0; +} + +/** + * i40e_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = indir[j++]; + reg_val |= indir[j++] << 8; + reg_val |= indir[j++] << 16; + reg_val |= indir[j++] << 24; + wr32(hw, I40E_PFQF_HLUT(i), reg_val); + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = key[j++]; + reg_val |= key[j++] << 8; + reg_val |= key[j++] << 16; + reg_val |= key[j++] << 24; + wr32(hw, I40E_PFQF_HKEY(i), reg_val); + } + } + return 0; +} + +/** + * i40e_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40e_get_priv_flags(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 ret_flags = 0; + + ret_flags |= pf->hw.func_caps.npar_enable ? + I40E_PRIV_FLAGS_NPAR_FLAG : 0; + + return ret_flags; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .set_settings = i40e_set_settings, @@ -2323,9 +2530,14 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_ethtool_stats = i40e_get_ethtool_stats, .get_coalesce = i40e_get_coalesce, .set_coalesce = i40e_set_coalesce, + .get_rxfh_key_size = i40e_get_rxfh_key_size, + .get_rxfh_indir_size = i40e_get_rxfh_indir_size, + .get_rxfh = i40e_get_rxfh, + .set_rxfh = i40e_set_rxfh, .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, + .get_priv_flags = i40e_get_priv_flags, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 27c206e..1ca4845 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,7 +24,6 @@ * ******************************************************************************/ - #include <linux/if_ether.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -150,7 +149,7 @@ static inline bool i40e_fcoe_xid_is_valid(u16 xid) /** * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated - * @pf: pointer to pf + * @pf: pointer to PF * @ddp: sw DDP context * * Unmap the scatter-gather list associated with the given SW DDP context @@ -269,7 +268,7 @@ out: /** * i40e_fcoe_sw_init - sets up the HW for FCoE - * @pf: pointer to pf + * @pf: pointer to PF * * Returns 0 if FCoE is supported otherwise the error code **/ @@ -329,7 +328,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) /** * i40e_get_fcoe_tc_map - Return TC map for FCoE APP - * @pf: pointer to pf + * @pf: pointer to PF * **/ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) @@ -381,12 +380,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) ctxt->pf_num = hw->pf_id; ctxt->vf_num = 0; ctxt->uplink_seid = vsi->uplink_seid; - ctxt->connection_type = 0x1; + ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt->flags = I40E_AQ_VSI_TYPE_PF; /* FCoE VSI would need the following sections */ - info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID | - I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); /* FCoE VSI does not need these sections */ info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | @@ -395,7 +393,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) I40E_AQ_VSI_PROP_INGRESS_UP_VALID | I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); - info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + info->valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + info->switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } enabled_tc = i40e_get_fcoe_tc_map(pf); i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); @@ -1443,7 +1446,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev, return 0; } - static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -1470,6 +1472,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_set_features = i40e_fcoe_set_features, }; +/* fcoe network device type */ +static struct device_type fcoe_netdev_type = { + .name = "fcoe", +}; + /** * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI * @vsi: pointer to the associated VSI struct @@ -1503,6 +1510,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); netdev->mtu = FCOE_MTU; SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); /* set different dev_port value 1 for FCoE netdev than the default * zero dev_port value for PF netdev, this helps biosdevname user * tool to differentiate them correctly while both attached to the @@ -1523,7 +1531,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) /** * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI - * @pf: the pf that VSI is associated with + * @pf: the PF that VSI is associated with * **/ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) @@ -1550,7 +1558,7 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); if (vsi) { dev_dbg(&pf->pdev->dev, - "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n", + "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n", vsi->seid, vsi->id, vsi->uplink_seid, seid); } else { dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h index 21e0f58..0d49e2d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -37,7 +37,6 @@ #define I40E_FILTER_CONTEXT_DESC(R, i) \ (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i])) - /* receive queue descriptor filter status for FCoE */ #define I40E_RX_DESC_FLTSTAT_FCMASK 0x3 #define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 4627588..0079ad7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits, if (ce_info->width < 32) mask = ((u32)1 << ce_info->width) - 1; else - mask = 0xFFFFFFFF; + mask = ~(u32)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines @@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits, if (ce_info->width < 64) mask = ((u64)1 << ce_info->width) - 1; else - mask = 0xFFFFFFFFFFFFFFFF; + mask = ~(u64)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dadda3c..c1eaab5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 6 +#define DRV_VERSION_BUILD 37 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -450,7 +450,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) } /** - * i40e_pf_reset_stats - Reset all of the stats for the given pf + * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset **/ void i40e_pf_reset_stats(struct i40e_pf *pf) @@ -896,7 +896,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) } /** - * i40e_update_pf_stats - Update the pf statistics counters. + * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated **/ static void i40e_update_pf_stats(struct i40e_pf *pf) @@ -919,11 +919,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); - i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), - pf->stat_offsets_loaded, - &osd->eth.tx_discards, - &nsd->eth.tx_discards); - i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, @@ -1133,7 +1128,7 @@ void i40e_update_stats(struct i40e_vsi *vsi) * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL @@ -1161,7 +1156,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns the first filter with the provided MAC address or NULL if @@ -1209,7 +1204,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered - * @is_vf: true if it is a vf + * @is_vf: true if it is a VF * @is_netdev: true if it is a netdev * * Goes through all the macvlan filters and adds a @@ -1270,7 +1265,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. @@ -1330,7 +1325,7 @@ add_filter_out: * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure it's a vf filter, else doesn't matter + * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter **/ void i40e_del_filter(struct i40e_vsi *vsi, @@ -1357,7 +1352,7 @@ void i40e_del_filter(struct i40e_vsi *vsi, f->counter--; } } else { - /* make sure we don't remove a filter in use by vf or netdev */ + /* make sure we don't remove a filter in use by VF or netdev */ int min_f = 0; min_f += (f->is_vf ? 1 : 0); min_f += (f->is_netdev ? 1 : 0); @@ -1546,7 +1541,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; - /* find the power-of-2 of the number of queue pairs */ + /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; while (num_qps && ((1 << pow) < qcount)) { @@ -1576,6 +1571,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; + if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -2398,20 +2399,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) struct i40e_vsi *vsi = ring->vsi; cpumask_var_t mask; - if (ring->q_vector && ring->netdev) { - /* Single TC mode enable XPS */ - if (vsi->tc_config.numtc <= 1 && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + if (!ring->q_vector || !ring->netdev) + return; + + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1) { + if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, ring->queue_index); - } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - /* Disable XPS to allow selection based on TC */ - bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); - netif_set_xps_queue(ring->netdev, mask, - ring->queue_index); - free_cpumask_var(mask); - } + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, ring->queue_index); + free_cpumask_var(mask); } } @@ -2596,7 +2597,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (ring_is_ps_enabled(ring)) { + i40e_alloc_rx_headers(ring); + i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); + } else { + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + } return 0; } @@ -3183,7 +3189,7 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); } } @@ -3825,6 +3831,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; + kfree(pf->irq_pile); + pf->irq_pile = NULL; } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { pci_disable_msi(pf->pdev); } @@ -4021,7 +4029,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) #endif /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP - * @pf: pointer to pf + * @pf: pointer to PF * * Get TC map for ISCSI PF type that will include iSCSI TC * and LAN TC. @@ -4119,7 +4127,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else - enabled_tc = pf->hw.func_caps.enabled_tcmap; + return 1; /* Only TC0 */ /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); @@ -4169,11 +4177,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); - /* MPF enabled and iSCSI PF type */ + /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else - return pf->hw.func_caps.enabled_tcmap; + return i40e_pf_get_default_tc(pf); } /** @@ -4196,7 +4204,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi bw config, err %d, aq_err %d\n", + "couldn't get PF vsi bw config, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); return -EINVAL; } @@ -4206,7 +4214,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", + "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); return -EINVAL; } @@ -4563,6 +4571,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err = 0; + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) + goto out; + /* Get the initial DCB configuration */ err = i40e_init_dcb(hw); if (!err) { @@ -4853,11 +4866,7 @@ exit: * * Returns 0 on success, negative value on failure **/ -#ifdef I40E_FCOE int i40e_open(struct net_device *netdev) -#else -static int i40e_open(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4967,7 +4976,7 @@ err_setup_tx: /** * i40e_fdir_filter_exit - Cleans up the Flow Director accounting - * @pf: Pointer to pf + * @pf: Pointer to PF * * This function destroys the hlist where all the Flow Director * filters were saved. @@ -5055,24 +5064,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { - - /* Request a Firmware Reset - * - * Same as Global reset, plus restarting the - * embedded firmware engine. - */ - /* enable EMP Reset */ - val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); - val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; - wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); - - /* force the reset */ - val = rd32(&pf->hw, I40E_GLGEN_RTRIG); - val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; - wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset @@ -5195,7 +5186,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; - struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; @@ -5228,8 +5218,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); /* Store the old configuration */ - tmp_dcbx_cfg = *dcbx_cfg; + memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg)); + /* Reset the old DCBx configuration data */ + memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { @@ -5238,20 +5230,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, } /* No change detected in DCBX configs */ - if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { + if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, + sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, + &hw->local_dcbx_config); - i40e_dcbnl_flush_apps(pf, dcbx_cfg); + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ - if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; @@ -5351,9 +5345,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf) * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters * @pf: board private structure **/ -int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) { - int val, fcnt_prog; + u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); @@ -5361,12 +5355,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) } /** - * i40e_get_current_fd_count - Get the count of total FD filters programmed + * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ -int i40e_get_current_fd_count(struct i40e_pf *pf) +u32 i40e_get_current_fd_count(struct i40e_pf *pf) { - int val, fcnt_prog; + u32 val, fcnt_prog; + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> @@ -5375,6 +5370,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf) } /** + * i40e_get_global_fd_count - Get total FD filters programmed on device + * @pf: board private structure + **/ +u32 i40e_get_global_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); + fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> + I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + return fcnt_prog; +} + +/** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ @@ -5388,7 +5398,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) /* Check if, FD SB or ATR was auto disabled and if there is enough room * to re-enable */ - fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); + fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || @@ -5410,13 +5420,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) } #define I40E_MIN_FD_FLUSH_INTERVAL 10 +#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 /** * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB * @pf: board private structure **/ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) { + unsigned long min_flush_time; int flush_wait_retry = 50; + bool disable_atr = false; + int fd_room; int reg; if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) @@ -5424,9 +5438,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + /* If the flush is happening too quick and we have mostly + * SB rules we should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + disable_atr = true; + } + pf->fd_flush_timestamp = jiffies; - pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, @@ -5446,10 +5471,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } @@ -5460,7 +5483,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed * @pf: board private structure **/ -int i40e_get_current_atr_cnt(struct i40e_pf *pf) +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) { return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } @@ -5486,9 +5509,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; - if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && - (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && - (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); @@ -5919,6 +5940,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) } /** + * i40e_enable_pf_switch_lb + * @pf: pointer to the PF structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the PF structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_config_bridge_mode - Configure the HW bridge mode + * @veb: pointer to the bridge instance + * + * Configure the loop back mode for the LAN VSI that is downlink to the + * specified HW bridge instance. It is expected this function is called + * when a new HW bridge is instantiated. + **/ +static void i40e_config_bridge_mode(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (veb->bridge_mode & BRIDGE_MODE_VEPA) + i40e_disable_pf_switch_lb(pf); + else + i40e_enable_pf_switch_lb(pf); +} + +/** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * @@ -5964,8 +6073,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; - /* Enable LB mode for the main VSI now that it is on a VEB */ - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -6137,7 +6245,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * - * Close up the VFs and other things in prep for pf Reset. + * Close up the VFs and other things in prep for PF Reset. **/ static void i40e_prep_for_reset(struct i40e_pf *pf) { @@ -6222,10 +6330,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { - clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) i40e_verify_eeprom(pf); - } i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); @@ -6335,13 +6441,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } - msleep(75); - ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (ret) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6364,7 +6471,7 @@ clear_recovery: } /** - * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild + * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild * @pf: board private structure * * Close up the VFs and other things in prep for a Core Reset, @@ -6378,7 +6485,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf) /** * i40e_handle_mdd_event - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * Called from the MDD irq handler to identify possibly malicious vfs **/ @@ -6407,7 +6514,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; @@ -6728,6 +6835,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? + pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; INIT_LIST_HEAD(&vsi->mac_filter_list); @@ -6808,7 +6917,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) goto unlock_vsi; } - /* updates the pf for this cleared vsi */ + /* updates the PF for this cleared vsi */ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); @@ -6921,15 +7030,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) * * Work with the OS to set up the MSIX vectors needed. * - * Returns 0 on success, negative on failure + * Returns the number of vectors reserved or negative on failure **/ static int i40e_init_msix(struct i40e_pf *pf) { - i40e_status err = 0; struct i40e_hw *hw = &pf->hw; - int other_vecs = 0; + int vectors_left; int v_budget, i; - int vec; + int v_actual; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; @@ -6951,24 +7059,62 @@ static int i40e_init_msix(struct i40e_pf *pf) * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ - pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); - pf->num_vmdq_msix = pf->num_vmdq_qps; - other_vecs = 1; - other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) - other_vecs++; - - /* Scale down if necessary, and the rings will share vectors */ - pf->num_lan_msix = min_t(int, pf->num_lan_msix, - (hw->func_caps.num_msix_vectors - other_vecs)); - v_budget = pf->num_lan_msix + other_vecs; + vectors_left = hw->func_caps.num_msix_vectors; + v_budget = 0; + + /* reserve one vector for miscellaneous handler */ + if (vectors_left) { + v_budget++; + vectors_left--; + } + + /* reserve vectors for the main PF traffic queues */ + pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + vectors_left -= pf->num_lan_msix; + v_budget += pf->num_lan_msix; + + /* reserve one vector for sideband flow director */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (vectors_left) { + v_budget++; + vectors_left--; + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + } + } #ifdef I40E_FCOE + /* can we reserve enough for FCoE? */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_fcoe_msix = pf->num_fcoe_qps; + if (!vectors_left) + pf->num_fcoe_msix = 0; + else if (vectors_left >= pf->num_fcoe_qps) + pf->num_fcoe_msix = pf->num_fcoe_qps; + else + pf->num_fcoe_msix = 1; v_budget += pf->num_fcoe_msix; + vectors_left -= pf->num_fcoe_msix; } + #endif + /* any vectors left over go for VMDq support */ + if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); + + /* if we're short on vectors for what's desired, we limit + * the queues per vmdq. If this is still more than are + * available, the user will need to change the number of + * queues/vectors used by the PF later with the ethtool + * channels command + */ + if (vmdq_vecs < vmdq_vecs_wanted) + pf->num_vmdq_qps = 1; + pf->num_vmdq_msix = pf->num_vmdq_qps; + + v_budget += vmdq_vecs; + vectors_left -= vmdq_vecs; + } pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -6977,9 +7123,9 @@ static int i40e_init_msix(struct i40e_pf *pf) for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; - vec = i40e_reserve_msix_vectors(pf, v_budget); + v_actual = i40e_reserve_msix_vectors(pf, v_budget); - if (vec != v_budget) { + if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable @@ -6992,26 +7138,30 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_msix = 0; } - if (vec < I40E_MIN_MSIX) { + if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; return -ENODEV; - } else if (vec == I40E_MIN_MSIX) { + } else if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; - } else if (vec != v_budget) { + } else if (v_actual != v_budget) { + int vec; + /* reserve the misc vector */ - vec--; + vec = v_actual - 1; /* Scale vector usage down */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; + pf->num_vmdq_qps = 1; + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; /* partition out the remaining vectors */ switch (vec) { @@ -7037,10 +7187,8 @@ static int i40e_init_msix(struct i40e_pf *pf) vec--; } #endif - pf->num_lan_msix = min_t(int, (vec / 2), - pf->num_lan_qps); - pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), - I40E_DEFAULT_NUM_VMDQ_VSI); + /* give the rest to the PF */ + pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); break; } } @@ -7057,7 +7205,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_FCOE_ENABLED; } #endif - return err; + return v_actual; } /** @@ -7134,11 +7282,12 @@ err_out: **/ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) { - int err = 0; + int vectors = 0; + ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - err = i40e_init_msix(pf); - if (err) { + vectors = i40e_init_msix(pf); + if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | @@ -7158,18 +7307,26 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); - err = pci_enable_msi(pf->pdev); - if (err) { - dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); + vectors = pci_enable_msi(pf->pdev); + if (vectors < 0) { + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", + vectors); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } + vectors = 1; /* one MSI or Legacy vector */ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); + /* set up vector assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + pf->irq_pile->num_entries = vectors; + pf->irq_pile->search_hint = 0; + /* track first vector for misc interrupts */ - err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); } /** @@ -7219,6 +7376,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) static int i40e_config_rss(struct i40e_pf *pf) { u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; @@ -7236,15 +7394,14 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + /* Check capability and Set table size and register per hw expectation*/ reg_val = rd32(hw, I40E_PFQF_CTL_0); - if (hw->func_caps.rss_table_size == 512) { + if (pf->rss_table_size == 512) reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; - pf->rss_table_size = 512; - } else { - pf->rss_table_size = 128; + else reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; - } wr32(hw, I40E_PFQF_CTL_0, reg_val); /* Populate the LUT with max no. of queues in round robin fashion */ @@ -7257,7 +7414,7 @@ static int i40e_config_rss(struct i40e_pf *pf) * If LAN VSI is the only consumer for RSS then this requirement * is not necessary. */ - if (j == pf->rss_size) + if (j == vsi->rss_size) j = 0; /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (j & @@ -7281,15 +7438,19 @@ static int i40e_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + int new_rss_size; + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; - queue_count = min_t(int, queue_count, pf->rss_size_max); + new_rss_size = min_t(int, queue_count, pf->rss_size_max); - if (queue_count != pf->rss_size) { + if (queue_count != vsi->num_queue_pairs) { + vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = queue_count; + pf->rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); i40e_config_rss(pf); @@ -7299,6 +7460,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) } /** + * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) +{ + i40e_status status; + bool min_valid, max_valid; + u32 max_bw, min_bw; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (!status) { + if (min_valid) + pf->npar_min_bw = min_bw; + if (max_valid) + pf->npar_max_bw = max_bw; + } + + return status; +} + +/** + * i40e_set_npar_bw_setting - Set BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) +{ + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + + /* Set the valid bit for this PF */ + bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; + bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + + return status; +} + +/** + * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) +{ + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status ret; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on partition 1! This is partition %d", + pf->hw.partition_id); + ret = I40E_NOT_SUPPORTED; + goto bw_commit_out; + } + + /* Acquire NVM for read access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for read access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + ret = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Wait a bit for NVM release to complete */ + msleep(50); + + /* Acquire NVM for write access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for write access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + ret = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED, err %d aq_err %d\n", + ret, last_aq_status); +bw_commit_out: + + return ret; +} + +/** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * @@ -7324,8 +7607,12 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_RX_1BUF_ENABLED; + I40E_FLAG_MSIX_ENABLED; + + if (iommu_present(&pci_bus_type)) + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + else + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; @@ -7336,6 +7623,7 @@ static int i40e_sw_init(struct i40e_pf *pf) */ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; pf->rss_size = 1; + pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { @@ -7347,6 +7635,13 @@ static int i40e_sw_init(struct i40e_pf *pf) if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + if (i40e_get_npar_bw_setting(pf)) + dev_warn(&pf->pdev->dev, + "Could not get NPAR bw settings\n"); + else + dev_info(&pf->pdev->dev, + "Min BW = %8.8x, Max BW = %8.8x\n", + pf->npar_min_bw, pf->npar_max_bw); } /* FW/NVM is not yet fixed in this regard */ @@ -7354,11 +7649,11 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per pf */ + /* Setup a counter for fd_atr per PF */ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per pf */ + /* Setup a counter for fd_sb per PF */ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); } else { dev_info(&pf->pdev->dev, @@ -7406,22 +7701,14 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->qp_pile->search_hint = 0; - /* set up vector assignment tracking */ - size = sizeof(struct i40e_lump_tracking) - + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); - pf->irq_pile = kzalloc(size, GFP_KERNEL); - if (!pf->irq_pile) { - kfree(pf->qp_pile); - err = -ENOMEM; - goto sw_init_done; - } - pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; - pf->irq_pile->search_hint = 0; - pf->tx_timeout_recovery_level = 1; mutex_init(&pf->switch_mutex); + /* If NPAR is enabled nudge the Tx scheduler */ + if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) + i40e_set_npar_bw_setting(pf); + sw_init_done: return err; } @@ -7653,6 +7940,118 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + **/ +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_BRIDGE_FILTER +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_BRIDGE_FILTER */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ + static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -7687,6 +8086,10 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = i40e_ndo_bridge_getlink, + .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -7799,6 +8202,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi) } /** + * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB + * @vsi: the VSI being queried + * + * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode + **/ +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + struct i40e_pf *pf = vsi->back; + + /* Uplink is not a bridge so default to VEB */ + if (vsi->veb_idx == I40E_NO_VEB) + return 1; + + veb = pf->veb[vsi->veb_idx]; + /* Uplink is a bridge in VEPA mode */ + if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + return 0; + + /* Uplink is a bridge in VEB mode */ + return 1; +} + +/** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * @@ -7830,7 +8257,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi config, err %d, aq_err %d\n", + "couldn't get PF vsi config, err %d, aq_err %d\n", ret, pf->hw.aq.asq_last_status); return -ENOENT; } @@ -7883,12 +8310,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections |= + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id = + ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7896,16 +8325,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = 0; - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); @@ -7915,15 +8346,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; @@ -8281,7 +8715,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) @@ -8724,7 +9158,7 @@ err_alloc: } /** - * i40e_setup_pf_switch_element - set pf vars based on switch type + * i40e_setup_pf_switch_element - set PF vars based on switch type * @pf: board private structure * @ele: element we are building info from * @num_reported: total number of elements @@ -8930,15 +9364,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); - i40e_link_event(pf); - - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - - /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -9008,7 +9434,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_DCB_CAPABLE; dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } - pf->num_lan_qps = pf->rss_size_max; + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + pf->num_lan_qps = min_t(int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + queues_left -= pf->num_lan_qps; } @@ -9061,7 +9491,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) * i40e_setup_pf_filter_control - Setup PF static filter control * @pf: PF to be setup * - * i40e_setup_pf_filter_control sets up a pf's initial filter control + * i40e_setup_pf_filter_control sets up a PF's initial filter control * settings. If PE/FCoE are enabled then it will also set the per PF * based filter sizes required for them. It also enables Flow director, * ethertype and macvlan type filter settings for the pf. @@ -9106,8 +9536,10 @@ static void i40e_print_features(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); + buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) buf += sprintf(buf, "RSS "); @@ -9136,14 +9568,16 @@ static void i40e_print_features(struct i40e_pf *pf) * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl * - * i40e_probe initializes a pf identified by a pci_dev structure. - * The OS initialization, configuring of the pf private structure, + * i40e_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, * and a hardware reset occur. * * Returns 0 on success, negative on failure **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct i40e_aq_get_phy_abilities_resp abilities; + unsigned long ioremap_len; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; @@ -9195,8 +9629,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &pf->hw; hw->back = pf; - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + + ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", @@ -9274,7 +9711,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); - i40e_verify_eeprom(pf); /* Rev 0 hardware was never productized */ @@ -9409,13 +9845,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); - msleep(75); - err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (err) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. @@ -9499,6 +9936,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } + /* get the requested speeds from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); + if (err) + dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", + err); + pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* print a string summarizing features */ i40e_print_features(pf); @@ -9517,7 +9961,6 @@ err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); err_init_lan_hmc: kfree(pf->qp_pile); - kfree(pf->irq_pile); err_sw_init: err_adminq_setup: (void)i40e_shutdown_adminq(hw); @@ -9617,7 +10060,6 @@ static void i40e_remove(struct pci_dev *pdev) } kfree(pf->qp_pile); - kfree(pf->irq_pile); kfree(pf->vsi); iounmap(pf->hw.hw_addr); @@ -9844,6 +10286,7 @@ static int __init i40e_init_module(void) pr_info("%s: %s - version %s\n", i40e_driver_name, i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + i40e_dbg_init(); return pci_register_driver(&i40e_driver); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 5defe0d..e49acd2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } /** - * i40e_read_nvm_word - Reads Shadow RAM + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) +static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; @@ -212,7 +212,21 @@ read_nvm_exit: } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read @@ -222,8 +236,8 @@ read_nvm_exit: * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { i40e_status ret_code = 0; u16 index, word; @@ -231,7 +245,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, /* Loop thru the selected region */ for (word = 0; word < *words; word++) { index = offset + word; - ret_code = i40e_read_nvm_word(hw, index, &data[word]); + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); if (ret_code) break; } @@ -243,6 +257,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning @@ -302,11 +333,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { i40e_status ret_code = 0; + struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; - u16 word = 0; - u32 i = 0; + u16 *data; + u16 i = 0; + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; /* read pointer to VPD area */ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); @@ -317,7 +355,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to PCIe Alt Auto-load module */ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, - &pcie_alt_module); + &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -327,33 +365,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + /* Skip Checksum word */ if (i == I40E_SR_SW_CHECKSUM_WORD) - i++; + continue; /* Skip VPD module (convert byte size to word count) */ - if (i == (u32)vpd_module) { - i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; } /* Skip PCIe ALT module (convert byte size to word count) */ - if (i == (u32)pcie_alt_module) { - i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; } - ret_code = i40e_read_nvm_word(hw, (u16)i, &word); - if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; - goto i40e_calc_nvm_checksum_exit; - } - checksum_local += word; + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 68e852a..fea0d37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, @@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse); i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details); @@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); bool i40e_get_link_status(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); @@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); -i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset, - u16 *data); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 65d3c8b..522d6df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index bbf1b12..9b11f2e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40e.h" #include "i40e_prototype.h" @@ -44,7 +45,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, * i40e_program_fdir_filter - Program a Flow Director filter * @fdir_data: Packet data that will be filter parameters * @raw_packet: the pre-allocated packet buffer for FDir - * @pf: The pf pointer + * @pf: The PF pointer * @add: True for add/update, False for remove **/ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, @@ -227,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", @@ -302,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", fd_data->pctype, fd_data->fd_id); @@ -375,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", @@ -470,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", rx_desc->wb.qword0.hi_dword.fd_id); + /* Check if the programming error is for ATR. + * If so, auto disable ATR and set a state for + * flush in progress. Next time we come here if flush is in + * progress do nothing, once flush is complete the state will + * be cleared. + */ + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + pf->fd_add_err++; /* store the current atr filter count */ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); + if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + } + /* filter programming failed most likely due to table full */ - fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); + fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; /* If ATR is running fcnt_prog can quickly change, * if we are very close to full, it makes sense to disable @@ -754,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -841,6 +859,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -1031,6 +1050,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -1089,6 +1124,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40e_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1148,11 +1214,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_alloc_rx_buffers - Replace used receive buffers; packet split + * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1192,40 +1323,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -1279,10 +1378,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -1410,13 +1509,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -1432,25 +1531,51 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (budget <= 0) return 0; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); - goto next_desc; + I40E_RX_INCREMENT(rx_ring, i); + continue; } rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); - + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) + rx_ring->rx_stats.alloc_buff_failed++; + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -1465,40 +1590,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1520,22 +1635,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1544,7 +1653,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1570,33 +1679,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) #ifdef I40E_FCOE if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { dev_kfree_skb_any(skb); - goto next_desc; + continue; } #endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers(rx_ring, cleaned_count); + i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); + + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1604,10 +1829,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40e_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1628,6 +1850,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1647,8 +1870,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1715,6 +1944,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; + if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + /* if sampling is disabled do nothing */ if (!tx_ring->atr_sample_rate) return; @@ -1838,6 +2070,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } + if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + goto out; + /* Insert 802.1p priority into VLAN header */ if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL)) { @@ -1858,6 +2093,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_HW_VLAN; } } + +out: *flags = tx_flags; return 0; } @@ -1982,8 +2219,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -2006,8 +2251,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index dff0bae..4b0b810 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -152,6 +160,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -224,8 +233,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -281,7 +290,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index e9901ef..83032d2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -175,12 +175,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -1143,7 +1143,7 @@ struct i40e_hw_port_stats { #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D @@ -1401,6 +1401,19 @@ struct i40e_lldp_variables { u16 crc8; }; +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 61dd1b18..2d20af2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 40f042a..0a93684 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -30,8 +30,8 @@ /** * i40e_vc_disable_vf - * @pf: pointer to the pf info - * @vf: pointer to the vf info + * @pf: pointer to the PF info + * @vf: pointer to the VF info * * Disable the VF through a SW reset **/ @@ -48,10 +48,10 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) /** * i40e_vc_isvalid_vsi_id - * @vf: pointer to the vf info - * @vsi_id: vf relative vsi id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id * - * check for the valid vsi id + * check for the valid VSI id **/ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) { @@ -62,7 +62,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) /** * i40e_vc_isvalid_queue_id - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_id: vsi id * @qid: vsi relative queue id * @@ -78,8 +78,8 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, /** * i40e_vc_isvalid_vector_id - * @vf: pointer to the vf info - * @vector_id: vf relative vector id + * @vf: pointer to the VF info + * @vector_id: VF relative vector id * * check for the valid vector id **/ @@ -94,11 +94,11 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) /** * i40e_vc_get_pf_queue_id - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_idx: index of VSI in PF struct * @vsi_queue_id: vsi relative queue id * - * return pf relative queue id + * return PF relative queue id **/ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, u8 vsi_queue_id) @@ -120,7 +120,7 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, /** * i40e_config_irq_link_list - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_idx: index of VSI in PF struct * @vecmap: irq map info * @@ -220,7 +220,7 @@ irq_list_done: /** * i40e_config_vsi_tx_queue - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_idx: index of VSI in PF struct * @vsi_queue_id: vsi relative queue index * @info: config. info @@ -287,7 +287,7 @@ error_context: /** * i40e_config_vsi_rx_queue - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_idx: index of VSI in PF struct * @vsi_queue_id: vsi relative queue index * @info: config. info @@ -378,10 +378,10 @@ error_param: /** * i40e_alloc_vsi_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @type: type of VSI to allocate * - * alloc vf vsi context & resources + * alloc VF vsi context & resources **/ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) { @@ -394,7 +394,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (!vsi) { dev_err(&pf->pdev->dev, - "add vsi failed for vf %d, aq_err %d\n", + "add vsi failed for VF %d, aq_err %d\n", vf->vf_id, pf->hw.aq.asq_last_status); ret = -ENOENT; goto error_alloc_vsi_res; @@ -403,9 +403,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; vf->lan_vsi_index = vsi->idx; vf->lan_vsi_id = vsi->id; - dev_info(&pf->pdev->dev, - "VF %d assigned LAN VSI index %d, VSI id %d\n", - vf->vf_id, vsi->idx, vsi->id); /* If the port VLAN has been configured and then the * VF driver was removed then the VSI port VLAN * configuration was destroyed. Check if there is @@ -446,9 +443,9 @@ error_alloc_vsi_res: /** * i40e_enable_vf_mappings - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * enable vf mappings + * enable VF mappings **/ static void i40e_enable_vf_mappings(struct i40e_vf *vf) { @@ -496,9 +493,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /** * i40e_disable_vf_mappings - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * disable vf mappings + * disable VF mappings **/ static void i40e_disable_vf_mappings(struct i40e_vf *vf) { @@ -516,9 +513,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf) /** * i40e_free_vf_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * free vf resources + * free VF resources **/ static void i40e_free_vf_res(struct i40e_vf *vf) { @@ -571,9 +568,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf) /** * i40e_alloc_vf_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * allocate vf resources + * allocate VF resources **/ static int i40e_alloc_vf_res(struct i40e_vf *vf) { @@ -589,11 +586,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime - * vf req validation + * VF req validation */ vf->num_queue_pairs = total_queue_pairs; - /* vf is now completely initialized */ + /* VF is now completely initialized */ set_bit(I40E_VF_STAT_INIT, &vf->vf_states); error_alloc: @@ -607,7 +604,7 @@ error_alloc: #define VF_TRANS_PENDING_MASK 0x20 /** * i40e_quiesce_vf_pci - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * * Wait for VF PCI transactions to be cleared after reset. Returns -EIO * if the transactions never clear. @@ -634,10 +631,10 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) /** * i40e_reset_vf - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * reset the vf + * reset the VF **/ void i40e_reset_vf(struct i40e_vf *vf, bool flr) { @@ -657,7 +654,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * just need to clean up, so don't hit the VFRTRIG register. */ if (!flr) { - /* reset vf using VPGEN_VFRTRIG reg */ + /* reset VF using VPGEN_VFRTRIG reg */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); @@ -700,7 +697,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); complete_reset: - /* reallocate vf resources to reset the VSI state */ + /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); @@ -713,78 +710,10 @@ complete_reset: } /** - * i40e_enable_pf_switch_lb - * @pf: pointer to the pf structure - * - * enable switch loop back or die - no point in a return value - **/ -void i40e_enable_pf_switch_lb(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - struct i40e_vsi_context ctxt; - int aq_ret; - - ctxt.seid = pf->main_vsi_seid; - ctxt.pf_num = pf->hw.pf_id; - ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s couldn't get pf vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); - return; - } - ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); - } -} - -/** - * i40e_disable_pf_switch_lb - * @pf: pointer to the pf structure - * - * disable switch loop back or die - no point in a return value - **/ -static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - struct i40e_vsi_context ctxt; - int aq_ret; - - ctxt.seid = pf->main_vsi_seid; - ctxt.pf_num = pf->hw.pf_id; - ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s couldn't get pf vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); - return; - } - ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); - } -} - -/** * i40e_free_vfs - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * - * free vf resources + * free VF resources **/ void i40e_free_vfs(struct i40e_pf *pf) { @@ -806,7 +735,7 @@ void i40e_free_vfs(struct i40e_pf *pf) msleep(20); /* let any messages in transit get finished up */ - /* free up vf resources */ + /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { @@ -832,7 +761,6 @@ void i40e_free_vfs(struct i40e_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); } - i40e_disable_pf_switch_lb(pf); } else { dev_warn(&pf->pdev->dev, "unable to disable SR-IOV because VFs are assigned.\n"); @@ -843,10 +771,10 @@ void i40e_free_vfs(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV /** * i40e_alloc_vfs - * @pf: pointer to the pf structure - * @num_alloc_vfs: number of vfs to allocate + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate * - * allocate vf resources + * allocate VF resources **/ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) { @@ -883,15 +811,14 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - /* vf resources get allocated during reset */ + /* VF resources get allocated during reset */ i40e_reset_vf(&vfs[i], false); - /* enable vf vplan_qtable mappings */ + /* enable VF vplan_qtable mappings */ i40e_enable_vf_mappings(&vfs[i]); } pf->num_alloc_vfs = num_alloc_vfs; - i40e_enable_pf_switch_lb(pf); err_alloc: if (ret) i40e_free_vfs(pf); @@ -905,7 +832,7 @@ err_iov: /** * i40e_pci_sriov_enable * @pdev: pointer to a pci_dev structure - * @num_vfs: number of vfs to allocate + * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs **/ @@ -945,7 +872,7 @@ err_out: /** * i40e_pci_sriov_configure * @pdev: pointer to a pci_dev structure - * @num_vfs: number of vfs to allocate + * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs. Called when the user updates the number * of VFs in sysfs. @@ -970,13 +897,13 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) /** * i40e_vc_send_msg_to_vf - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length * - * send msg to vf + * send msg to VF **/ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) @@ -1025,11 +952,11 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /** * i40e_vc_send_resp_to_vf - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @opcode: operation code * @retval: return value * - * send resp msg to vf + * send resp msg to VF **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, enum i40e_virtchnl_ops opcode, @@ -1040,9 +967,9 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, /** * i40e_vc_get_version_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * called from the vf to request the API version used by the PF + * called from the VF to request the API version used by the PF **/ static int i40e_vc_get_version_msg(struct i40e_vf *vf) { @@ -1058,11 +985,11 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf) /** * i40e_vc_get_vf_resources_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to request its resources + * called from the VF to request its resources **/ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) { @@ -1109,7 +1036,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); err: - /* send the response back to the vf */ + /* send the response back to the VF */ ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, (u8 *)vfres, len); @@ -1119,13 +1046,13 @@ err: /** * i40e_vc_reset_vf_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to reset itself, - * unlike other virtchnl messages, pf driver - * doesn't send the response back to the vf + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF **/ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { @@ -1135,12 +1062,12 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) /** * i40e_vc_config_promiscuous_mode_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the promiscuous mode of - * vf vsis + * called from the VF to configure the promiscuous mode of + * VF vsis **/ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1167,7 +1094,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, allmulti, NULL); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, aq_ret); @@ -1175,11 +1102,11 @@ error_param: /** * i40e_vc_config_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the rx/tx + * called from the VF to configure the rx/tx * queues **/ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1221,22 +1148,22 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - /* set vsi num_queue_pairs in use to num configured by vf */ + /* set vsi num_queue_pairs in use to num configured by VF */ pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret); } /** * i40e_vc_config_irq_map_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the irq to + * called from the VF to configure the irq to * queue map **/ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1288,18 +1215,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_config_irq_link_list(vf, vsi_id, map); } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret); } /** * i40e_vc_enable_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to enable all or specific queue(s) + * called from the VF to enable all or specific queue(s) **/ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { @@ -1326,18 +1253,18 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) aq_ret = I40E_ERR_TIMEOUT; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, aq_ret); } /** * i40e_vc_disable_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to disable all or specific + * called from the VF to disable all or specific * queue(s) **/ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1366,18 +1293,18 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_TIMEOUT; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, aq_ret); } /** * i40e_vc_get_stats_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to get vsi stats + * called from the VF to get vsi stats **/ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { @@ -1409,14 +1336,14 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) stats = vsi->eth_stats; error_param: - /* send the response back to the vf */ + /* send the response back to the VF */ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, (u8 *)&stats, sizeof(stats)); } /** * i40e_check_vf_permission - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @macaddr: pointer to the MAC Address being checked * * Check if the VF has permission to add or delete unicast MAC address @@ -1450,7 +1377,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) /** * i40e_vc_add_mac_addr_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1507,14 +1434,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, ret); } /** * i40e_vc_del_mac_addr_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1558,14 +1485,14 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, ret); } /** * i40e_vc_add_vlan_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1613,13 +1540,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); } /** * i40e_vc_remove_vlan_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1664,13 +1591,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); } /** * i40e_vc_validate_vf_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @msghndl: msg handle @@ -1776,14 +1703,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, /** * i40e_vc_process_vf_msg - * @pf: pointer to the pf structure - * @vf_id: source vf id + * @pf: pointer to the PF structure + * @vf_id: source VF id * @msg: pointer to the msg buffer * @msglen: msg length * @msghndl: msg handle * * called from the common aeq/arq handler to - * process request from vf + * process request from VF **/ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) @@ -1801,7 +1728,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); if (ret) { - dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); return ret; } @@ -1849,7 +1776,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, break; case I40E_VIRTCHNL_OP_UNKNOWN: default: - dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_NOT_IMPLEMENTED); @@ -1861,10 +1788,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, /** * i40e_vc_process_vflr_event - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * called from the vlfr irq handler to - * free up vf resources and state variables + * free up VF resources and state variables **/ int i40e_vc_process_vflr_event(struct i40e_pf *pf) { @@ -1885,7 +1812,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - /* read GLGEN_VFLRSTAT register to find out the flr vfs */ + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & (1 << bit_idx)) { @@ -1902,7 +1829,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /** * i40e_vc_vf_broadcast - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * @opcode: operation code * @retval: return value * @msg: pointer to the msg buffer @@ -1921,7 +1848,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* Not all vfs are enabled so skip the ones that are not */ + /* Not all VFs are enabled so skip the ones that are not */ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) continue; @@ -1936,7 +1863,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, /** * i40e_vc_notify_link_state - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * send a link status message to all VFs on a given PF **/ @@ -1969,7 +1896,7 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) /** * i40e_vc_notify_reset - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * indicate a pending reset to all VFs on a given PF **/ @@ -1985,7 +1912,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) /** * i40e_vc_notify_vf_reset - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * * indicate a pending reset to the given VF **/ @@ -2015,10 +1942,10 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) /** * i40e_ndo_set_vf_mac * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @mac: mac address * - * program vf mac address + * program VF mac address **/ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { @@ -2083,11 +2010,11 @@ error_param: /** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @vlan_id: mac address * @qos: priority setting * - * program vf vlan id and/or qos + * program VF vlan id and/or qos **/ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) @@ -2196,10 +2123,10 @@ error_pvid: /** * i40e_ndo_set_vf_bw * @netdev: network interface device structure - * @vf_id: vf identifier - * @tx_rate: tx rate + * @vf_id: VF identifier + * @tx_rate: Tx rate * - * configure vf tx rate + * configure VF Tx rate **/ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) @@ -2219,7 +2146,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } if (min_tx_rate) { - dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n", + dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); return -EINVAL; } @@ -2247,7 +2174,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } if (max_tx_rate > speed) { - dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.", + dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", max_tx_rate, vf->vf_id); ret = -EINVAL; goto error; @@ -2276,10 +2203,10 @@ error: /** * i40e_ndo_get_vf_config * @netdev: network interface device structure - * @vf_id: vf identifier - * @ivi: vf configuration structure + * @vf_id: VF identifier + * @ivi: VF configuration structure * - * return vf configuration + * return VF configuration **/ int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) @@ -2331,7 +2258,7 @@ error_param: /** * i40e_ndo_set_vf_link_state * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @link: required link state * * Set the link state of a specified VF, regardless of physical link state @@ -2394,7 +2321,7 @@ error_out: /** * i40e_ndo_set_vf_spoofchk * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @enable: flag to enable or disable feature * * Enable or disable VF spoof checking @@ -2427,7 +2354,8 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) ctxt.pf_num = pf->hw.pf_id; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); if (enable) - ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 9452f52..9c3a410 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -71,12 +71,12 @@ enum i40e_vf_capabilities { struct i40e_vf { struct i40e_pf *pf; - /* vf id in the pf space */ + /* VF id in the PF space */ u16 vf_id; - /* all vf vsis connect to the same parent */ + /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; - /* vf Port Extender (PE) stag if used */ + /* VF Port Extender (PE) stag if used */ u16 stag; struct i40e_virtchnl_ether_addr default_lan_addr; @@ -91,7 +91,7 @@ struct i40e_vf { u8 lan_vsi_index; /* index into PF struct */ u8 lan_vsi_id; /* ID as used by firmware */ - u8 num_queue_pairs; /* num of qps assigned to vf vsis */ + u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ u64 num_valid_msgs; /* num of valid msgs detected */ @@ -100,7 +100,7 @@ struct i40e_vf { unsigned long vf_states; /* vf's runtime states */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; - bool link_up; /* only valid if vf link is forced */ + bool link_up; /* only valid if VF link is forced */ bool spoofchk; }; @@ -113,7 +113,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf); void i40e_reset_vf(struct i40e_vf *vf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); -/* vf configuration related iplink handlers */ +/* VF configuration related iplink handlers */ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); @@ -126,6 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); -void i40e_enable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 60f04e9..ef43d68 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 28c40c5..f07b9ff 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -85,46 +85,53 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -535,7 +542,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 9173834..58e37a4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); i40e_status i40e_set_mac_type(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c1f6a59..3cc7376 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7088915..f41da5d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40evf.h" #include "i40e_prototype.h" @@ -288,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -368,6 +371,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -529,6 +533,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -587,6 +607,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40evf_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -646,11 +697,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split + * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -690,40 +806,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -777,10 +861,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -906,13 +990,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -925,20 +1009,46 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u8 rx_ptype; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); - + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) + rx_ring->rx_stats.alloc_buff_failed++; + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -953,40 +1063,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1008,22 +1108,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1032,7 +1126,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1048,30 +1142,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); + + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1079,10 +1277,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1103,6 +1298,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1122,8 +1318,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1262,8 +1464,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -1286,8 +1496,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c950a03..1e49bb1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -151,6 +159,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -223,8 +232,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -278,7 +287,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3d0fdaa..eba6e4b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -175,12 +175,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -1116,7 +1116,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e0c8208..59f62f0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69b97ba..b68b731 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +29,6 @@ #include <linux/uaccess.h> - struct i40evf_stats { char stat_string[ETH_GSTRING_LEN]; int stat_offset; @@ -180,7 +179,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) } /** - * i40evf_get_msglevel - Set debug message level + * i40evf_set_msglevel - Set debug message level * @netdev: network interface device structure * @data: message level * @@ -191,6 +190,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; adapter->msg_enable = data; } @@ -640,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; + if (indir) { + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); + indir[j++] = hlut_val & 0xff; + indir[j++] = (hlut_val >> 8) & 0xff; + indir[j++] = (hlut_val >> 16) & 0xff; + indir[j++] = (hlut_val >> 24) & 0xff; + } } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8d8c201..812b120 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.0" +#define DRV_VERSION "1.2.25" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -244,6 +244,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << (i - 1))) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); } } @@ -263,6 +264,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } @@ -270,6 +272,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); } @@ -524,7 +527,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx"); + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, &i40evf_msix_aq, 0, adapter->misc_vector_name, netdev); @@ -761,13 +765,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; + int count = 50; if (!macaddr) return NULL; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) + return NULL; + } f = i40evf_find_filter(adapter, macaddr); if (!f) { @@ -828,6 +836,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -838,8 +847,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) { + dev_err(&adapter->pdev->dev, + "Failed to get lock in %s\n", __func__); + return; + } + } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { bool found = false; @@ -920,7 +935,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, ring->count); + i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -959,6 +974,7 @@ void i40evf_down(struct i40evf_adapter *adapter) usleep_range(500, 1000); i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -985,8 +1001,6 @@ void i40evf_down(struct i40evf_adapter *adapter) netif_tx_stop_all_queues(netdev); - i40evf_napi_disable_all(adapter); - msleep(20); netif_carrier_off(netdev); @@ -1481,9 +1495,11 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_adapter *adapter = container_of(work, struct i40evf_adapter, reset_task); + struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; - int i = 0, err; + struct i40evf_mac_filter *f; uint32_t rstat_val; + int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) @@ -1528,7 +1544,11 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_down(adapter); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1560,22 +1580,38 @@ static void i40evf_reset_task(struct work_struct *work) continue_reset: adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - i40evf_down(adapter); + i40evf_irq_disable(adapter); + + if (netif_running(adapter->netdev)) { + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + adapter->state = __I40EVF_RESETTING; /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) - dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; err = i40evf_init_adminq(hw); if (err) - dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); - adapter->aq_pending = 0; - adapter->aq_required = 0; i40evf_map_queues(adapter); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->add = true; + } + adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1977,7 +2013,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) * * This task completes the work that was begun in probe. Due to the nature * of VF-PF communications, we may need to wait tens of milliseconds to get - * reponses back from the PF. Rather than busy-wait in probe and bog down the + * responses back from the PF. Rather than busy-wait in probe and bog down the * whole system, we'll do it in a task so we can sleep. * This task only runs during driver init. Once we've established * communications with the PF driver and set up our netdev, the watchdog @@ -2368,7 +2404,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * i40evf_resume - Power managment resume routine + * i40evf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index d20fc8e..52d01b8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -30,7 +30,7 @@ * * Neither the 82576 nor the 82580 offer registers wide enough to hold * nanoseconds time values for very long. For the 82580, SYSTIM always - * counts nanoseconds, but the upper 24 bits are not availible. The + * counts nanoseconds, but the upper 24 bits are not available. The * frequency is adjusted by changing the 32 bit fractional nanoseconds * register, TIMINCA. * @@ -358,7 +358,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) { u32 *ptr = pin < 2 ? ctrl : ctrl_ext; - u32 mask[IGB_N_SDP] = { + static const u32 mask[IGB_N_SDP] = { E1000_CTRL_SDP0_DIR, E1000_CTRL_SDP1_DIR, E1000_CTRL_EXT_SDP2_DIR, @@ -373,16 +373,16 @@ static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) { - struct e1000_hw *hw = &igb->hw; - u32 aux0_sel_sdp[IGB_N_SDP] = { + static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, }; - u32 aux1_sel_sdp[IGB_N_SDP] = { + static const u32 aux1_sel_sdp[IGB_N_SDP] = { AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, }; - u32 ts_sdp_en[IGB_N_SDP] = { + static const u32 ts_sdp_en[IGB_N_SDP] = { TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, }; + struct e1000_hw *hw = &igb->hw; u32 ctrl, ctrl_ext, tssdp = 0; ctrl = rd32(E1000_CTRL); @@ -409,28 +409,28 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) { - struct e1000_hw *hw = &igb->hw; - u32 aux0_sel_sdp[IGB_N_SDP] = { + static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, }; - u32 aux1_sel_sdp[IGB_N_SDP] = { + static const u32 aux1_sel_sdp[IGB_N_SDP] = { AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, }; - u32 ts_sdp_en[IGB_N_SDP] = { + static const u32 ts_sdp_en[IGB_N_SDP] = { TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, }; - u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, }; - u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, }; - u32 ts_sdp_sel_clr[IGB_N_SDP] = { + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, }; + struct e1000_hw *hw = &igb->hw; u32 ctrl, ctrl_ext, tssdp = 0; ctrl = rd32(E1000_CTRL); @@ -468,7 +468,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; unsigned long flags; struct timespec ts; - int pin; + int pin = -1; s64 ns; switch (rq->type) { diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index d9fa999..ae3f283 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,94 +28,93 @@ #define _E1000_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* IVAR valid bit */ -#define E1000_IVAR_VALID 0x80 +#define E1000_IVAR_VALID 0x80 /* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ - -#define E1000_RXDEXT_STATERR_LB 0x00040000 -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) /* Device Control */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ /* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 /* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 /* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ /* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_MBX 15 +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 /* Additional Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2178f87..c6996fe 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -36,7 +35,6 @@ #include "igbvf.h" #include <linux/if_vlan.h> - struct igbvf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) static int igbvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev, } static int igbvf_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { return -EOPNOTSUPP; } static void igbvf_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { } static int igbvf_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { return -EOPNOTSUPP; } @@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev, static u32 igbvf_get_msglevel(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void igbvf_set_msglevel(struct net_device *netdev, u32 data) { struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev) } static void igbvf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev) } static int igbvf_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static int igbvf_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static void igbvf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev, } static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev, } static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; @@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring->count) && @@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { adapter->tx_ring->count = new_tx_count; @@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_down(adapter); - /* - * We can't just free everything and then setup again, + /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers - * to the tx and rx ring structs. + * to the Tx and Rx ring structs. */ if (new_tx_count != adapter->tx_ring->count) { memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); @@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_free_rx_resources(adapter->rx_ring); - memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); } err_setup: igbvf_up(adapter); @@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) } static void igbvf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) + struct ethtool_test *eth_test, u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); set_bit(__IGBVF_TESTING, &adapter->state); - /* - * Link test performed before hardware reset so autoneg doesn't + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igbvf_link_test(adapter, &data[0])) @@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev, } static void igbvf_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; } static int igbvf_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { return -EOPNOTSUPP; } static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev, } static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && - (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { + (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { adapter->current_itr = ec->rx_coalesce_usecs << 2; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); @@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = IGBVF_START_ITR; adapter->requested_itr = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { - /* - * The user's desire is to turn off interrupt throttling + /* The user's desire is to turn off interrupt throttling * altogether, but due to HW limitations, we can't do that. * Instead we set a very small value in EITR, which would * allow ~967k interrupts per second, but allow the adapter's @@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = 4; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); - } else + } else { return -EINVAL; + } writel(adapter->current_itr, hw->hw_addr + adapter->rx_ring->itr_register); @@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev, static int igbvf_nway_reset(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) igbvf_reinit_locked(adapter); return 0; } - static void igbvf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) + struct ethtool_stats *stats, + u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); int i; @@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev, igbvf_update_stats(adapter); for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { char *p = (char *)adapter + - igbvf_gstrings_stats[i].stat_offset; + igbvf_gstrings_stats[i].stat_offset; char *b = (char *)adapter + - igbvf_gstrings_stats[i].base_stat_offset; + igbvf_gstrings_stats[i].base_stat_offset; data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : - (*(u32 *)p - *(u32 *)b)); + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); } - } static int igbvf_get_sset_count(struct net_device *dev, int stringset) { - switch(stringset) { + switch (stringset) { case ETH_SS_TEST: return IGBVF_TEST_LEN; case ETH_SS_STATS: @@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset) } static void igbvf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) + u8 *data) { u8 *p = data; int i; diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7d6a25c..f166baa 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -43,10 +42,10 @@ struct igbvf_info; struct igbvf_adapter; /* Interrupt defines */ -#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ -#define IGBVF_4K_ITR 980 -#define IGBVF_20K_ITR 196 -#define IGBVF_70K_ITR 56 +#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ +#define IGBVF_4K_ITR 980 +#define IGBVF_20K_ITR 196 +#define IGBVF_70K_ITR 56 enum latency_range { lowest_latency = 0, @@ -55,56 +54,55 @@ enum latency_range { latency_invalid = 255 }; - /* Interrupt modes, as used by the IntMode parameter */ -#define IGBVF_INT_MODE_LEGACY 0 -#define IGBVF_INT_MODE_MSI 1 -#define IGBVF_INT_MODE_MSIX 2 +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 /* Tx/Rx descriptor defines */ -#define IGBVF_DEFAULT_TXD 256 -#define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 -#define IGBVF_DEFAULT_RXD 256 -#define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 -#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of - * descriptors available in its onboard memory. - * Setting this to 0 disables RX descriptor prefetch. + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. * HTHRESH - MAC will only prefetch if there are at least this many descriptors - * available in host memory. - * If PTHRESH is 0, this should also be 0. + * available in host memory. + * If PTHRESH is 0, this should also be 0. * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back - * descriptors until either it has this many to write back, or the - * ITR timer expires. + * descriptors until either it has this many to write back, or the + * ITR timer expires. */ -#define IGBVF_RX_PTHRESH 16 -#define IGBVF_RX_HTHRESH 8 -#define IGBVF_RX_WTHRESH 1 +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 /* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 -#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGBVF_TX_QUEUE_WAKE 32 +#define IGBVF_TX_QUEUE_WAKE 32 /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 -#define IGBVF_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 -#define IGBVF_MNG_VLAN_NONE (-1) +#define IGBVF_MNG_VLAN_NONE (-1) /* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) enum igbvf_boards { board_vf, @@ -116,8 +114,7 @@ struct igbvf_queue_stats { u64 bytes; }; -/* - * wrappers around a pointer to a socket buffer, +/* wrappers around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igbvf_buffer { @@ -148,10 +145,10 @@ union igbvf_desc { struct igbvf_ring { struct igbvf_adapter *adapter; /* backlink */ - union igbvf_desc *desc; /* pointer to ring memory */ - dma_addr_t dma; /* phys address of ring */ - unsigned int size; /* length of ring in bytes */ - unsigned int count; /* number of desc. in ring */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ u16 next_to_use; u16 next_to_clean; @@ -202,9 +199,7 @@ struct igbvf_adapter { u32 requested_itr; /* ints/sec or adaptive */ u32 current_itr; /* Actual ITR register value, not ints/sec */ - /* - * Tx - */ + /* Tx */ struct igbvf_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; @@ -226,9 +221,7 @@ struct igbvf_adapter { u32 tx_fifo_size; u32 tx_dma_failed; - /* - * Rx - */ + /* Rx */ struct igbvf_ring *rx_ring; u32 rx_int_delay; @@ -249,7 +242,7 @@ struct igbvf_adapter { struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; - spinlock_t stats_lock; /* prevent concurrent stats updates */ + spinlock_t stats_lock; /* prevent concurrent stats updates */ /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -286,16 +279,16 @@ struct igbvf_adapter { }; struct igbvf_info { - enum e1000_mac_type mac; - unsigned int flags; - u32 pba; - void (*init_ops)(struct e1000_hw *); - s32 (*get_variants)(struct igbvf_adapter *); + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); }; /* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) -#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) #define IGBVF_RX_DESC_ADV(R, i) \ (&((((R).desc))[i].rx_desc)) #define IGBVF_TX_DESC_ADV(R, i) \ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index b4b65bc..7b6cb4c 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -54,10 +53,10 @@ out: } /** - * e1000_poll_for_ack - Wait for message acknowledgement + * e1000_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns SUCCESS if it successfully received a message acknowledgement + * returns SUCCESS if it successfully received a message acknowledgment **/ static s32 e1000_poll_for_ack(struct e1000_hw *hw) { @@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | - E1000_V2PMAILBOX_RSTI))) { + E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } @@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) /* Take ownership of the buffer */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) ret_val = E1000_SUCCESS; @@ -283,7 +282,7 @@ out_no_write: } /** - * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * e1000_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -315,17 +314,18 @@ out_no_read: } /** - * e1000_init_mbx_params_vf - set initial values for vf mailbox + * e1000_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to being communications */ + * value to being communications + */ mbx->timeout = 0; mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; @@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) return E1000_SUCCESS; } - diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 24370bc..f800bf8 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,44 +29,44 @@ #include "vf.h" -#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* We have a total wait time of 1s for vf mailbox posted messages */ -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ -#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ -#define E1000_VT_MSGINFO_SHIFT 16 +#define E1000_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ void e1000_init_mbx_ops_generic(struct e1000_hw *hw); s32 e1000_init_mbx_params_vf(struct e1000_hw *); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ebf9d4a..c17ea4b 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *); static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); static struct igbvf_info igbvf_vf_info = { - .mac = e1000_vfadapt, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static struct igbvf_info igbvf_i350_vf_info = { - .mac = e1000_vfadapt_i350, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt_i350, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static const struct igbvf_info *igbvf_info_tbl[] = { - [board_vf] = &igbvf_vf_info, - [board_i350_vf] = &igbvf_i350_vf_info, + [board_vf] = &igbvf_vf_info, + [board_i350_vf] = &igbvf_i350_vf_info, }; /** * igbvf_desc_unused - calculate if we have unused descriptors + * @rx_ring: address of receive ring structure **/ static int igbvf_desc_unused(struct igbvf_ring *ring) { @@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring) * @skb: pointer to sk_buff to be indicated to stack **/ static void igbvf_receive_skb(struct igbvf_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, - u32 status, u16 vlan) + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) { u16 vid; @@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, } static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, - u32 status_err, struct sk_buff *skb) + u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, * @cleaned_count: number of buffers to repopulate **/ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, - int cleaned_count) + int cleaned_count) { struct igbvf_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; @@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } buffer_info->page_dma = dma_map_page(&pdev->dev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, + buffer_info->page_offset, + PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->page_dma)) { @@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, buffer_info->skb = skb; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - bufsz, + bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(buffer_info->skb); @@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } } /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ + * each write-back erases this info. + */ if (adapter->rx_ps_hdr_size) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } @@ -247,7 +247,8 @@ no_buffers: /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->tail); } @@ -261,7 +262,7 @@ no_buffers: * is no guarantee that everything was cleaned **/ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, - int *work_done, int work_to_do) + int *work_done, int work_to_do) { struct igbvf_ring *rx_ring = adapter->rx_ring; struct net_device *netdev = adapter->netdev; @@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, * that case, it fills the header buffer and spills the rest * into the page. */ - hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) + & E1000_RXDADV_HDRBUFLEN_MASK) >> + E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > adapter->rx_ps_hdr_size) hlen = adapter->rx_ps_hdr_size; @@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, buffer_info->skb = NULL; if (!adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; skb_put(skb, length); @@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, if (!skb_shinfo(skb)->nr_frags) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); skb_put(skb, hlen); } if (length) { dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); + buffer_info->page, + buffer_info->page_offset, + length); if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || (page_count(buffer_info->page) != 1)) @@ -370,7 +372,7 @@ send_up: skb->protocol = eth_type_trans(skb, netdev); igbvf_receive_skb(adapter, netdev, skb, staterr, - rx_desc->wb.upper.vlan); + rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error = 0; @@ -402,7 +404,7 @@ next_desc: } static void igbvf_put_txbuf(struct igbvf_adapter *adapter, - struct igbvf_buffer *buffer_info) + struct igbvf_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, * Return 0 on success, negative on failure **/ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring) + struct igbvf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, err: vfree(tx_ring->buffer_info); dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -501,7 +503,7 @@ err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the receive descriptor ring\n"); + "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } @@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { - if (adapter->rx_ps_hdr_size){ + if (adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); } else { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); } buffer_info->dma = 0; @@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) if (buffer_info->page_dma) dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); put_page(buffer_info->page); buffer_info->page = NULL; @@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + rx_ring->dma); rx_ring->desc = NULL; } @@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte counts during the last + * interrupt. The advantage of per interrupt computation is faster updates + * and more accurate ITR for the current traffic pattern. Constants in this + * function were computed based on theoretical maximum wire speed and thresholds + * were set based on testing data as well as attempting to minimize response + * time while increasing bulk throughput. **/ static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, enum latency_range itr_setting, @@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); - if (new_itr != adapter->tx_ring->itr_val) { u32 current_itr = adapter->tx_ring->itr_val; - /* - * this attempts to bias the interrupt rate towards Bulk + /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->tx_ring->itr_val = new_itr; adapter->tx_ring->set_itr = 1; @@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) if (new_itr != adapter->rx_ring->itr_val) { u32 current_itr = adapter->rx_ring->itr_val; + new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->rx_ring->itr_val = new_itr; adapter->rx_ring->set_itr = 1; @@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; + skb->len; total_packets += segs; total_bytes += bytecount; } @@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) tx_ring->next_to_clean = i; - if (unlikely(count && - netif_carrier_ok(netdev) && - igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + if (unlikely(count && netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; - /* auto mask will automatically reenable the interrupt when we write - * EICS */ + /* auto mask will automatically re-enable the interrupt when we write + * EICS + */ if (!igbvf_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(EICS, tx_ring->eims_value); @@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) #define IGBVF_NO_QUEUE -1 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, - int tx_queue, int msix_vector) + int tx_queue, int msix_vector) { struct e1000_hw *hw = &adapter->hw; u32 ivar, index; /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ + * Each queue has a single entry in the table to which we write + * a vector number along with a "valid" bit. Sadly, the layout + * of the table is somewhat counterintuitive. + */ if (rx_queue > IGBVF_NO_QUEUE) { index = (rx_queue >> 1); ivar = array_er32(IVAR0, index); @@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, /** * igbvf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure * * igbvf_configure_msix sets up the hardware to properly * generate MSI-X interrupts. @@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) /** * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) int err = -ENOMEM; int i; - /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), - GFP_KERNEL); + GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix_range(adapter->pdev, - adapter->msix_entries, 3, 3); + adapter->msix_entries, 3, 3); } if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, - "Failed to initialize MSI-X interrupts.\n"); + "Failed to initialize MSI-X interrupts.\n"); igbvf_reset_interrupt_capability(adapter); } } /** * igbvf_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure * * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. @@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) } err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_tx, 0, adapter->tx_ring->name, - netdev); + igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); if (err) goto out; @@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_rx, 0, adapter->rx_ring->name, - netdev); + igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); if (err) goto out; @@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_msix_other, 0, netdev->name, netdev); + igbvf_msix_other, 0, netdev->name, netdev); if (err) goto out; @@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) /** * igbvf_request_irq - initialize interrupts + * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter) return err; dev_err(&adapter->pdev->dev, - "Unable to allocate interrupt, Error: %d\n", err); + "Unable to allocate interrupt, Error: %d\n", err); return err; } @@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter) /** * igbvf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure **/ static void igbvf_irq_disable(struct igbvf_adapter *adapter) { @@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter) /** * igbvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure **/ static void igbvf_irq_enable(struct igbvf_adapter *adapter) { @@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, - "Failed to remove vlan id %d\n", vid); + "Failed to remove vlan id %d\n", vid); return -EINVAL; } clear_bit(vid, adapter->active_vlans); @@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* Turn off Relaxed Ordering on head write-backs. The writebacks * MUST be delivered in order or it will completely screw up - * our bookeeping. + * our bookkeeping. */ dca_txctrl = er32(DCA_TXCTRL(0)); dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; @@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) u32 srrctl = 0; srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | - E1000_SRRCTL_BSIZEHDR_MASK | - E1000_SRRCTL_BSIZEPKT_MASK); + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); /* Enable queue drop to avoid head of line blocking */ srrctl |= E1000_SRRCTL_DROP_EN; /* Setup buffer sizes */ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; + E1000_SRRCTL_BSIZEPKT_SHIFT; if (adapter->rx_buffer_len < 2048) { adapter->rx_ps_hdr_size = 0; @@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) } else { adapter->rx_ps_hdr_size = 128; srrctl |= adapter->rx_ps_hdr_size << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } @@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and + /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; @@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter) igbvf_setup_srrctl(adapter); igbvf_configure_rx(adapter); igbvf_alloc_rx_buffers(adapter->rx_ring, - igbvf_desc_unused(adapter->rx_ring)); + igbvf_desc_unused(adapter->rx_ring)); } /* igbvf_reset - bring the hardware into a known good state + * @adapter: private board structure * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be @@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter) hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies + 1); - return 0; } @@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rxdctl, txdctl; - /* - * signal that we're down so the interrupt handler does not + /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -1547,7 +1552,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter) { might_sleep(); while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igbvf_down(adapter); igbvf_up(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); @@ -1662,8 +1667,7 @@ static int igbvf_open(struct net_device *netdev) if (err) goto err_setup_rx; - /* - * before we allocate an interrupt, we must be ready to handle it. + /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. @@ -1725,6 +1729,7 @@ static int igbvf_close(struct net_device *netdev) return 0; } + /** * igbvf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -1753,15 +1758,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) return 0; } -#define UPDATE_VF_COUNTER(reg, name) \ - { \ - u32 current_counter = er32(reg); \ - if (current_counter < adapter->stats.last_##name) \ - adapter->stats.name += 0x100000000LL; \ - adapter->stats.last_##name = current_counter; \ - adapter->stats.name &= 0xFFFFFFFF00000000LL; \ - adapter->stats.name |= current_counter; \ - } +#define UPDATE_VF_COUNTER(reg, name) \ +{ \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ +} /** * igbvf_update_stats - Update the board statistics counters @@ -1772,8 +1777,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - /* - * Prevent stats update while adapter is being reset, link is down + /* Prevent stats update while adapter is being reset, link is down * or if the pci connection is down. */ if (adapter->link_speed == 0) @@ -1832,7 +1836,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) **/ static void igbvf_watchdog(unsigned long data) { - struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -1841,8 +1845,8 @@ static void igbvf_watchdog(unsigned long data) static void igbvf_watchdog_task(struct work_struct *work) { struct igbvf_adapter *adapter = container_of(work, - struct igbvf_adapter, - watchdog_task); + struct igbvf_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -1855,8 +1859,8 @@ static void igbvf_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); igbvf_print_link_info(adapter); netif_carrier_on(netdev); @@ -1876,10 +1880,9 @@ static void igbvf_watchdog_task(struct work_struct *work) igbvf_update_stats(adapter); } else { tx_pending = (igbvf_desc_unused(tx_ring) + 1 < - tx_ring->count); + tx_ring->count); if (tx_pending) { - /* - * We've lost link, so the controller stops DMA, + /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). @@ -1898,15 +1901,15 @@ static void igbvf_watchdog_task(struct work_struct *work) round_jiffies(jiffies + (2 * HZ))); } -#define IGBVF_TX_FLAGS_CSUM 0x00000001 -#define IGBVF_TX_FLAGS_VLAN 0x00000002 -#define IGBVF_TX_FLAGS_TSO 0x00000004 -#define IGBVF_TX_FLAGS_IPV4 0x00000008 -#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, __be16 protocol) { @@ -1930,17 +1933,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, + IPPROTO_TCP, + 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; @@ -1984,7 +1988,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, } static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol) { @@ -2005,8 +2009,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= (skb_transport_header(skb) - - skb_network_header(skb)); - + skb_network_header(skb)); context_desc->vlan_macip_lens = cpu_to_le32(info); @@ -2055,6 +2058,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again just in case room has been made available */ @@ -2067,11 +2074,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) return 0; } -#define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb) { struct igbvf_buffer *buffer_info; @@ -2093,7 +2100,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { const struct skb_frag_struct *frag; @@ -2111,7 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; } @@ -2133,7 +2139,7 @@ dma_error: /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -2144,10 +2150,10 @@ dma_error: } static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, - u8 hdr_len) + u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; struct igbvf_buffer *buffer_info; @@ -2155,7 +2161,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, unsigned int i; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); + E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGBVF_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; @@ -2182,7 +2188,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); + cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) @@ -2193,14 +2199,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ + * at a time, it synchronizes IO on IA64/Altix systems + */ mmiowb(); } @@ -2225,11 +2233,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, return NETDEV_TX_OK; } - /* - * need: count + 4 desc gap to keep tail from touching - * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, - * + 1 desc for context descriptor, + /* need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, * head, otherwise try next time */ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { @@ -2258,11 +2265,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && - (skb->ip_summed == CHECKSUM_PARTIAL)) + (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; - /* - * count reflects descriptors mapped, if 0 then mapping error + /* count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ count = igbvf_tx_map_adv(adapter, tx_ring, skb); @@ -2313,6 +2319,7 @@ static void igbvf_tx_timeout(struct net_device *netdev) static void igbvf_reset_task(struct work_struct *work) { struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); igbvf_reinit_locked(adapter); @@ -2356,14 +2363,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igbvf_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; if (netif_running(netdev)) igbvf_down(adapter); - /* - * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab @@ -2382,15 +2388,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = PAGE_SIZE / 2; #endif - /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + - ETH_FCS_LEN; + ETH_FCS_LEN; dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -2477,8 +2482,7 @@ static void igbvf_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ @@ -2503,7 +2507,7 @@ static void igbvf_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2583,7 +2587,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) } static int igbvf_set_features(struct net_device *netdev, - netdev_features_t features) + netdev_features_t features) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2596,21 +2600,21 @@ static int igbvf_set_features(struct net_device *netdev, } static const struct net_device_ops igbvf_netdev_ops = { - .ndo_open = igbvf_open, - .ndo_stop = igbvf_close, - .ndo_start_xmit = igbvf_xmit_frame, - .ndo_get_stats = igbvf_get_stats, - .ndo_set_rx_mode = igbvf_set_multi, - .ndo_set_mac_address = igbvf_set_mac, - .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, - .ndo_tx_timeout = igbvf_tx_timeout, - .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_rx_mode = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igbvf_netpoll, + .ndo_poll_controller = igbvf_netpoll, #endif - .ndo_set_features = igbvf_set_features, + .ndo_set_features = igbvf_set_features, }; /** @@ -2645,8 +2649,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } @@ -2686,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + pci_resource_len(pdev, 0)); if (!adapter->hw.hw_addr) goto err_ioremap; @@ -2712,16 +2716,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -2742,7 +2746,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pdev->dev, "Error reading MAC address.\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) - dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); } @@ -2751,11 +2756,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); memcpy(adapter->hw.mac.addr, netdev->dev_addr, - netdev->addr_len); + netdev->addr_len); } setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, - (unsigned long) adapter); + (unsigned long)adapter); INIT_WORK(&adapter->reset_task, igbvf_reset_task); INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); @@ -2818,8 +2823,7 @@ static void igbvf_remove(struct pci_dev *pdev) struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* - * The watchdog timer may be rescheduled, so explicitly + /* The watchdog timer may be rescheduled, so explicitly * disable it from being rescheduled. */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -2832,9 +2836,8 @@ static void igbvf_remove(struct pci_dev *pdev) igbvf_reset_interrupt_capability(adapter); - /* - * it is important to delete the napi struct prior to freeing the - * rx ring so that you do not end up with null pointer refs + /* it is important to delete the NAPI struct prior to freeing the + * Rx ring so that you do not end up with null pointer refs */ netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); @@ -2866,17 +2869,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { - .name = igbvf_driver_name, - .id_table = igbvf_pci_tbl, - .probe = igbvf_probe, - .remove = igbvf_remove, + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = igbvf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, + .suspend = igbvf_suspend, + .resume = igbvf_resume, #endif - .shutdown = igbvf_shutdown, - .err_handler = &igbvf_err_handler + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler }; /** @@ -2888,6 +2891,7 @@ static struct pci_driver igbvf_driver = { static int __init igbvf_init_module(void) { int ret; + pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); pr_info("%s\n", igbvf_copyright); @@ -2909,7 +2913,6 @@ static void __exit igbvf_exit_module(void) } module_exit(igbvf_exit_module); - MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 7dc6341..86a7c12 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,81 +27,81 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -/* - * Convenience macros +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ + +/* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ - (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ - (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ - (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ - (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ - (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ - (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ - (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ - (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ - (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ - (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ - (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ - (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ - (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) /* Statistics registers */ -#define E1000_VFGPRC 0x00F10 -#define E1000_VFGORC 0x00F18 -#define E1000_VFMPRC 0x00F3C -#define E1000_VFGPTC 0x00F14 -#define E1000_VFGOTC 0x00F34 -#define E1000_VFGOTLBC 0x00F50 -#define E1000_VFGPTLBC 0x00F44 -#define E1000_VFGORLBC 0x00F48 -#define E1000_VFGPRLBC 0x00F40 +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 /* These act per VF so an array friendly macro is used */ -#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) /* Define macros for handling registers */ -#define er32(reg) readl(hw->hw_addr + E1000_##reg) -#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) #define array_er32(reg, offset) \ readl(hw->hw_addr + E1000_##reg + (offset << 2)) #define array_ew32(reg, offset, val) \ writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) -#define e1e_flush() er32(STATUS) +#define e1e_flush() er32(STATUS) #endif diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 955ad8c..a13baa9 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,17 +24,16 @@ *******************************************************************************/ - #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex); + u16 *duplex); static s32 e1000_init_hw_vf(struct e1000_hw *hw); static s32 e1000_reset_hw_vf(struct e1000_hw *hw); static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, - u32, u32, u32); + u32, u32, u32); static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); @@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) * the status register's data which is often stale and inaccurate. **/ static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex) + u16 *duplex) { s32 status; @@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) u8 *addr = (u8 *)(&msgbuf[1]); u32 ctrl; - /* assert vf queue/interrupt reset */ + /* assert VF queue/interrupt reset */ ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_RST); @@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* mailbox timeout can now become active */ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; - /* notify pf of vf reset completion */ + /* notify PF of VF reset completion */ msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); @@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) memcpy(hw->mac.perm_addr, addr, ETH_ALEN); else ret_val = -E1000_ERR_MAC_INIT; @@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; - /* - * The bit_shift is the number of left-shifts + /* The bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); + (((u16)mc_addr[5]) << bit_shift))); return hash_value; } @@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) * unless there are workarounds that change this. **/ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count) + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[E1000_VFMAILBOX_SIZE]; @@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) * @addr: pointer to the receive address * @index: receive address array register **/ -static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; @@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) s32 ret_val = E1000_SUCCESS; u32 in_msg = 0; - /* - * We only want to run this if there has been a rst asserted. + /* We only want to run this if there has been a rst asserted. * in this case that could mean a link change, device reset, * or a virtual function reset */ @@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) if (!mac->get_link_status) goto out; - /* if link status is down no point in checking to see if pf is up */ + /* if link status is down no point in checking to see if PF is up */ if (!(er32(STATUS) & E1000_STATUS_LU)) goto out; /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; /* if incoming message isn't clear to send we are waiting on response */ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { - /* message is not CTS and is NACK we must have lost CTS status */ + /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & E1000_VT_MSGTYPE_NACK) ret_val = -E1000_ERR_MAC_INIT; goto out; } - /* the pf is talking, if we timed out in the past we reinit */ + /* the PF is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -E1000_ERR_MAC_INIT; goto out; } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 57db3c6..0f1eca6 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -38,30 +37,29 @@ struct e1000_hw; -#define E1000_DEV_ID_82576_VF 0x10CA -#define E1000_DEV_ID_I350_VF 0x1520 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 -#define E1000_REVISION_4 4 +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 -/* - * Receive Address Register Count +/* Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * These entries are also used for MAC-based filtering. */ -#define E1000_RAR_ENTRIES_VF 1 +#define E1000_RAR_ENTRIES_VF 1 /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ } read; struct { struct { @@ -69,53 +67,53 @@ union e1000_adv_rx_desc { u32 data; struct { u16 pkt_info; /* RSS/Packet type */ - u16 hdr_info; /* Split Header, - * hdr buffer length */ + /* Split Header, hdr buffer length */ + u16 hdr_info; } hs_rss; } lo_dword; union { - u32 rss; /* RSS Hash */ + u32 rss; /* RSS Hash */ struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { struct { - u64 buffer_addr; /* Address of descriptor's data buf */ + u64 buffer_addr; /* Address of descriptor's data buf */ u32 cmd_type_len; u32 olinfo_status; } read; struct { - u64 rsvd; /* Reserved */ + u64 rsvd; /* Reserved */ u32 nxtseq_seed; u32 status; } wb; }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc { u32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ enum e1000_mac_type { e1000_undefined = 0, @@ -262,5 +260,4 @@ struct e1000_hw { void e1000_rlpml_set_vf(struct e1000_hw *, u16); void e1000_init_function_pointers_vf(struct e1000_hw *hw); - #endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 70cc4c5..21aea7e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2609,7 +2609,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidently clear them if + * which should be masked here in order to not accidentally clear them if * the bits are high when ixgbe_msix_other is called. There is a race * condition otherwise which results in possible performance loss * especially if the ixgbe_msix_other interrupt is triggering @@ -3924,7 +3924,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) for (i = 0; i < hw->mac.num_rar_entries; i++) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; } ixgbe_sync_mac_table(adapter); @@ -3992,7 +3992,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) adapter->mac_table[i].queue == queue) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; ixgbe_sync_mac_table(adapter); return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 79c00f5..bd46f5d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -488,7 +488,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) * @work: pointer to the work struct * * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware - * timestamp has been taken for the current skb. It is necesary, because the + * timestamp has been taken for the current skb. It is necessary, because the * descriptor's "done" bit does not correlate with the timestamp event. */ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f37fe7..09a291b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -141,7 +141,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the - * physical function. If the user requests greater thn + * physical function. If the user requests greater than * 63 VFs then it is an error - reset to default of zero. */ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fc5ecee5..8451f9a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1690,7 +1690,7 @@ enum { #define IXGBE_MACC_FS 0x00040000 #define IXGBE_MAC_RX2TX_LPBK 0x00000002 -/* Veto Bit definiton */ +/* Veto Bit definition */ #define IXGBE_MMNGC_MNG_VETO 0x00000001 /* LINKS Bit Masks */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index cdb53be..f510a58 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -65,7 +65,7 @@ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) * ixgbevf_reset_hw_vf - Performs hardware reset * @hw: pointer to hardware structure * - * Resets the hardware by reseting the transmit and receive units, masks and + * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts. **/ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index fdf3e38..3e8b1bf 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1423,7 +1423,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) { struct mvpp2_prs_entry pe; - /* Promiscous mode - Accept unknown packets */ + /* Promiscuous mode - Accept unknown packets */ if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { /* Entry exist - update port only */ @@ -3402,7 +3402,7 @@ static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool for (i = 0; i < bm_pool->buf_num; i++) { u32 vaddr; - /* Get buffer virtual adress (indirect access) */ + /* Get buffer virtual address (indirect access) */ mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); if (!vaddr) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a681d7c..20b3c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1499,6 +1499,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_ACCESS_REG_wrapper, }, + { + .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, /* Native multicast commands are not available for guests */ { .opcode = MLX4_CMD_QP_ATTACH, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index c95ca25..cde14fa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -36,6 +36,49 @@ #include "mlx4_en.h" +/* Definitions for QCN + */ + +struct mlx4_congestion_control_mb_prio_802_1_qau_params { + __be32 modify_enable_high; + __be32 modify_enable_low; + __be32 reserved1; + __be32 extended_enable; + __be32 rppp_max_rps; + __be32 rpg_time_reset; + __be32 rpg_byte_reset; + __be32 rpg_threshold; + __be32 rpg_max_rate; + __be32 rpg_ai_rate; + __be32 rpg_hai_rate; + __be32 rpg_gd; + __be32 rpg_min_dec_fac; + __be32 rpg_min_rate; + __be32 max_time_rise; + __be32 max_byte_rise; + __be32 max_qdelta; + __be32 min_qoffset; + __be32 gd_coefficient; + __be32 reserved2[5]; + __be32 cp_sample_base; + __be32 reserved3[39]; +}; + +struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { + __be64 rppp_rp_centiseconds; + __be32 reserved1; + __be32 ignored_cnm; + __be32 rppp_created_rps; + __be32 estimated_total_rate; + __be32 max_active_rate_limiter_index; + __be32 dropped_cnms_busy_fw; + __be32 reserved2; + __be32 cnms_handled_successfully; + __be32 min_total_limiters_rate; + __be32 max_total_limiters_rate; + __be32 reserved3[4]; +}; + static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { @@ -242,6 +285,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, return 0; } +#define RPG_ENABLE_BIT 31 +#define CN_TAG_BIT 30 + +static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, + inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + + qcn->rpg_enable[i] = + be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT; + qcn->rppp_max_rps[i] = + be32_to_cpu(hw_qcn->rppp_max_rps); + qcn->rpg_time_reset[i] = + be32_to_cpu(hw_qcn->rpg_time_reset); + qcn->rpg_byte_reset[i] = + be32_to_cpu(hw_qcn->rpg_byte_reset); + qcn->rpg_threshold[i] = + be32_to_cpu(hw_qcn->rpg_threshold); + qcn->rpg_max_rate[i] = + be32_to_cpu(hw_qcn->rpg_max_rate); + qcn->rpg_ai_rate[i] = + be32_to_cpu(hw_qcn->rpg_ai_rate); + qcn->rpg_hai_rate[i] = + be32_to_cpu(hw_qcn->rpg_hai_rate); + qcn->rpg_gd[i] = + be32_to_cpu(hw_qcn->rpg_gd); + qcn->rpg_min_dec_fac[i] = + be32_to_cpu(hw_qcn->rpg_min_dec_fac); + qcn->rpg_min_rate[i] = + be32_to_cpu(hw_qcn->rpg_min_rate); + qcn->cndd_state_machine[i] = + priv->cndd_state[i]; + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + +static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_in = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; +#define MODIFY_ENABLE_HIGH_MASK 0xc0000000 +#define MODIFY_ENABLE_LOW_MASK 0xffc00000 + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_in)) + return -ENOMEM; + + mailbox_in_dma = mailbox_in->dma; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + + /* Before updating QCN parameter, + * need to set it's modify enable bit to 1 + */ + + hw_qcn->modify_enable_high = cpu_to_be32( + MODIFY_ENABLE_HIGH_MASK); + hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK); + + hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT); + hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]); + hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]); + hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]); + hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]); + hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]); + hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]); + hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]); + hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]); + hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]); + hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]); + priv->cndd_state[i] = qcn->cndd_state_machine[i]; + if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY) + hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT); + + err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod, + MLX4_CONGESTION_CONTROL_SET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return err; + } + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return 0; +} + +static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, + struct ieee_qcn_stats *qcn_stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + + hw_qcn_stats = + (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, inmod, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + qcn_stats->rppp_rp_centiseconds[i] = + be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); + qcn_stats->rppp_created_rps[i] = + be32_to_cpu(hw_qcn_stats->rppp_created_rps); + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { .ieee_getets = mlx4_en_dcbnl_ieee_getets, .ieee_setets = mlx4_en_dcbnl_ieee_setets, @@ -252,6 +467,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, }; const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4..c59ed92 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1685,7 +1685,7 @@ int mlx4_en_start_port(struct net_device *dev) } /* Attach rx QP to bradcast address */ - memset(&mc_list[10], 0xff, ETH_ALEN); + eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, priv->port, 0, MLX4_PROT_ETH, @@ -1788,7 +1788,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) } /* Detach All multicasts */ - memset(&mc_list[10], 0xff, ETH_ALEN); + eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, priv->broadcast_id); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index a61009f..b66e03d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); packet = (unsigned char *)skb_put(skb, packet_size); memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); - memset(ethh->h_source, 0, ETH_ALEN); + eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_set_mac_header(skb, 0); for (i = 0; i < packet_size; ++i) /* fill our packet */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 5a21e5d..242bcee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -143,7 +143,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [18] = "More than 80 VFs support", [19] = "Performance optimized for limited rule configuration flow steering support", [20] = "Recoverable error events support", - [21] = "Port Remap support" + [21] = "Port Remap support", + [22] = "QCN support" }; int i; @@ -675,7 +676,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a -#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a +#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -777,6 +778,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); dev_cap->fs_max_num_qp_per_entry = field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + if (field & 0x1) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); @@ -1149,6 +1153,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, DEV_CAP_EXT_2_FLAG_FSM); MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + /* turn off QCN for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + field &= 0xfe; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 1409d0cd6..0b16db0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -175,7 +175,7 @@ enum mlx4_res_tracker_free_type { /* *Virtual HCR structures. - * mlx4_vhcr is the sw representation, in machine endianess + * mlx4_vhcr is the sw representation, in machine endianness * * mlx4_vhcr_cmd is the formalized structure, the one that is passed * to FW to go through communication channel. diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2a8268e..94553b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -608,6 +608,7 @@ struct mlx4_en_priv { #ifdef CONFIG_MLX4_EN_DCB struct ieee_ets ets; u16 maxrate[IEEE_8021QAZ_MAX_TCS]; + enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; #endif #ifdef CONFIG_RFS_ACCEL spinlock_t filters_lock; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index d97ca88..d43e259 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3027,7 +3027,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, /* Call the SW implementation of write_mtt: * - Prepare a dummy mtt struct - * - Translate inbox contents to simple addresses in host endianess */ + * - Translate inbox contents to simple addresses in host endianness */ mtt.offset = 0; /* TBD this is broken but I don't handle it since we don't really use it */ mtt.order = 0; diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 10988fb..6f332eb 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) for (i = 0; i < hw->addr_list_size; i++) { if (ether_addr_equal(hw->address[i], mac_addr)) { - memset(hw->address[i], 0, ETH_ALEN); + eth_zero_addr(hw->address[i]); writel(0, hw->io + ADD_ADDR_INCR * i + KS_ADD_ADDR_0_HI); return 0; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 6c72e74..81d0f1c 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -150,7 +150,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->rx_head = 0; - /* reset the MAC controler TX/RX desciptor base address */ + /* reset the MAC controller TX/RX desciptor base address */ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index a4cdf2f..092dcae 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -1343,7 +1343,7 @@ static int init_nic(struct s2io_nic *nic) TX_PA_CFG_IGNORE_L2_ERR; writeq(val64, &bar0->tx_pa_cfg); - /* Rx DMA intialization. */ + /* Rx DMA initialization. */ val64 = 0; for (i = 0; i < config->rx_ring_num; i++) { struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 4fe8ea9..f6fcf74 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -394,7 +394,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev, } /** - * pch_gbe_set_pauseparam - Set pause paramters + * pch_gbe_set_pauseparam - Set pause parameters * @netdev: Network interface device structure * @pause: Pause parameters structure * Returns: diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 319d9d4..13d88a6 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -350,7 +350,7 @@ V. Recent Changes incorrectly defined and corrected (as per Michel Mueller). 02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots - were available before reseting the tbusy and tx_full flags + were available before resetting the tbusy and tx_full flags (as per Michel Mueller). 03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support. diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 716fc37..db80eb1 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev) u8 null_addr[ETH_ALEN]; int i; - memset(null_addr, 0, ETH_ALEN); + eth_zero_addr(null_addr); if (netdev->flags & IFF_PROMISC) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index f3346a3..69f828e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -205,7 +205,7 @@ struct qlcnic_add_rings_mbx_out { * @phys_addr_{low|high}: DMA address of the transmit buffer * @cnsmr_index_{low|high}: host consumer index * @size: legth of transmit buffer ring - * @intr_id: interrput id + * @intr_id: interrupt id * @src: src of interrupt */ struct qlcnic_tx_mbx { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 2bb48d5..33669c2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -269,7 +269,7 @@ static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter, } QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0); - /* Clear gracefull reset bit */ + /* Clear graceful reset bit */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val &= ~QLC_83XX_IDC_GRACEFULL_RESET; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); @@ -889,7 +889,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) * @adapter: adapter structure * * Device will remain in this state until: - * Reset request ACK's are recieved from all the functions + * Reset request ACK's are received from all the functions * Wait time exceeds max time limit * * Returns: Error code or Success(0) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8011ef3..25800a1 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Set Mac addr %pM\n", addr); } else { - memset(zero_mac_addr, 0, ETH_ALEN); + eth_zero_addr(zero_mac_addr); addr = &zero_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Clearing MAC address\n"); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 2c811f6..4a42e96 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -571,7 +571,7 @@ qcaspi_spi_thread(void *data) } /* can only handle other interrupts - * if sync has occured + * if sync has occurred */ if (qca->sync == QCASPI_SYNC_READY) { if (intr_cause & SPI_INT_PKT_AVLBL) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 736d5d1..7fb244f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -52,7 +52,12 @@ NETIF_MSG_RX_ERR| \ NETIF_MSG_TX_ERR) +#define SH_ETH_OFFSET_DEFAULTS \ + [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID + static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [EDSR] = 0x0000, [EDMR] = 0x0400, [EDTRR] = 0x0408, @@ -132,9 +137,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_POST3] = 0x0078, [TSU_POST4] = 0x007c, [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRH31] = 0x01f8, - [TSU_ADRL31] = 0x01fc, [TXNLCR0] = 0x0080, [TXALCR0] = 0x0084, @@ -151,6 +153,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [EDSR] = 0x0000, [EDMR] = 0x0400, [EDTRR] = 0x0408, @@ -199,9 +203,6 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADSBSY] = 0x0060, [TSU_TEN] = 0x0064, [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRH31] = 0x01f8, - [TSU_ADRL31] = 0x01fc, [TXNLCR0] = 0x0080, [TXALCR0] = 0x0084, @@ -210,6 +211,8 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [ECMR] = 0x0300, [RFLR] = 0x0308, [ECSR] = 0x0310, @@ -256,6 +259,8 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [ECMR] = 0x0100, [RFLR] = 0x0108, [ECSR] = 0x0110, @@ -308,6 +313,8 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [EDMR] = 0x0000, [EDTRR] = 0x0004, [EDRRR] = 0x0008, @@ -392,8 +399,6 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [FWALCR1] = 0x00b4, [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRL31] = 0x01fc, }; static void sh_eth_rcv_snd_disable(struct net_device *ndev); @@ -588,6 +593,7 @@ static struct sh_eth_cpu_data sh7757_data = { .no_ade = 1, .rpadir = 1, .rpadir_value = 2 << 16, + .rtrate = 1, }; #define SH_GIGA_ETH_BASE 0xfee00000UL @@ -1411,6 +1417,9 @@ static int sh_eth_txfree(struct net_device *ndev) break; /* TACT bit must be checked before all the following reads */ rmb(); + netif_info(mdp, tx_done, ndev, + "tx entry %d status 0x%08x\n", + entry, edmac_to_cpu(mdp, txdesc->status)); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { dma_unmap_single(&ndev->dev, txdesc->addr, @@ -1456,6 +1465,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (--boguscnt < 0) break; + netif_info(mdp, rx_status, ndev, + "rx entry %d status 0x%08x len %d\n", + entry, desc_status, pkt_len); + if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; @@ -1500,6 +1513,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pkt_len; + if (desc_status & RD_RFS8) + ndev->stats.multicast++; } entry = (++mdp->cur_rx) % mdp->num_rx_ring; rxdesc = &mdp->rx_ring[entry]; @@ -1542,7 +1557,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* If we don't need to check status, don't. -KDU */ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { /* fix the values for the next receiving if RDE is set */ - if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) { + if (intr_status & EESR_RDE && + mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) { u32 count = (sh_eth_read(ndev, RDFAR) - sh_eth_read(ndev, RDLAR)) >> 4; @@ -1929,6 +1945,192 @@ error_exit: return ret; } +/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the + * version must be bumped as well. Just adding registers up to that + * limit is fine, as long as the existing register indices don't + * change. + */ +#define SH_ETH_REG_DUMP_VERSION 1 +#define SH_ETH_REG_DUMP_MAX_REGS 256 + +static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + u32 *valid_map; + size_t len; + + BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS); + + /* Dump starts with a bitmap that tells ethtool which + * registers are defined for this chip. + */ + len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32); + if (buf) { + valid_map = buf; + buf += len; + } else { + valid_map = NULL; + } + + /* Add a register to the dump, if it has a defined offset. + * This automatically skips most undefined registers, but for + * some it is also necessary to check a capability flag in + * struct sh_eth_cpu_data. + */ +#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32) +#define add_reg_from(reg, read_expr) do { \ + if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ + if (buf) { \ + mark_reg_valid(reg); \ + *buf++ = read_expr; \ + } \ + ++len; \ + } \ + } while (0) +#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg)) +#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) + + add_reg(EDSR); + add_reg(EDMR); + add_reg(EDTRR); + add_reg(EDRRR); + add_reg(EESR); + add_reg(EESIPR); + add_reg(TDLAR); + add_reg(TDFAR); + add_reg(TDFXR); + add_reg(TDFFR); + add_reg(RDLAR); + add_reg(RDFAR); + add_reg(RDFXR); + add_reg(RDFFR); + add_reg(TRSCER); + add_reg(RMFCR); + add_reg(TFTR); + add_reg(FDR); + add_reg(RMCR); + add_reg(TFUCR); + add_reg(RFOCR); + if (cd->rmiimode) + add_reg(RMIIMODE); + add_reg(FCFTR); + if (cd->rpadir) + add_reg(RPADIR); + if (!cd->no_trimd) + add_reg(TRIMD); + add_reg(ECMR); + add_reg(ECSR); + add_reg(ECSIPR); + add_reg(PIR); + if (!cd->no_psr) + add_reg(PSR); + add_reg(RDMLR); + add_reg(RFLR); + add_reg(IPGR); + if (cd->apr) + add_reg(APR); + if (cd->mpr) + add_reg(MPR); + add_reg(RFCR); + add_reg(RFCF); + if (cd->tpauser) + add_reg(TPAUSER); + add_reg(TPAUSECR); + add_reg(GECMR); + if (cd->bculr) + add_reg(BCULR); + add_reg(MAHR); + add_reg(MALR); + add_reg(TROCR); + add_reg(CDCR); + add_reg(LCCR); + add_reg(CNDCR); + add_reg(CEFCR); + add_reg(FRECR); + add_reg(TSFRCR); + add_reg(TLFRCR); + add_reg(CERCR); + add_reg(CEECR); + add_reg(MAFCR); + if (cd->rtrate) + add_reg(RTRATE); + if (cd->hw_crc) + add_reg(CSMR); + if (cd->select_mii) + add_reg(RMII_MII); + add_reg(ARSTR); + if (cd->tsu) { + add_tsu_reg(TSU_CTRST); + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + add_tsu_reg(TSU_FWSLC); + add_tsu_reg(TSU_QTAG0); + add_tsu_reg(TSU_QTAG1); + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + add_tsu_reg(TSU_ADSBSY); + add_tsu_reg(TSU_TEN); + add_tsu_reg(TSU_POST1); + add_tsu_reg(TSU_POST2); + add_tsu_reg(TSU_POST3); + add_tsu_reg(TSU_POST4); + if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) { + /* This is the start of a table, not just a single + * register. + */ + if (buf) { + unsigned int i; + + mark_reg_valid(TSU_ADRH0); + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) + *buf++ = ioread32( + mdp->tsu_addr + + mdp->reg_offset[TSU_ADRH0] + + i * 4); + } + len += SH_ETH_TSU_CAM_ENTRIES * 2; + } + } + +#undef mark_reg_valid +#undef add_reg_from +#undef add_reg +#undef add_tsu_reg + + return len * 4; +} + +static int sh_eth_get_regs_len(struct net_device *ndev) +{ + return __sh_eth_get_regs(ndev, NULL); +} + +static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + regs->version = SH_ETH_REG_DUMP_VERSION; + + pm_runtime_get_sync(&mdp->pdev->dev); + __sh_eth_get_regs(ndev, buf); + pm_runtime_put_sync(&mdp->pdev->dev); +} + static int sh_eth_nway_reset(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2074,6 +2276,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev, static const struct ethtool_ops sh_eth_ethtool_ops = { .get_settings = sh_eth_get_settings, .set_settings = sh_eth_set_settings, + .get_regs_len = sh_eth_get_regs_len, + .get_regs = sh_eth_get_regs, .nway_reset = sh_eth_nway_reset, .get_msglevel = sh_eth_get_msglevel, .set_msglevel = sh_eth_set_msglevel, @@ -2213,6 +2417,22 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +/* The statistics registers have write-clear behaviour, which means we + * will lose any increment between the read and write. We mitigate + * this by only clearing when we read a non-zero value, so we will + * never falsely report a total of zero. + */ +static void +sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg) +{ + u32 delta = sh_eth_read(ndev, reg); + + if (delta) { + *stat += delta; + sh_eth_write(ndev, 0, reg); + } +} + static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2223,21 +2443,18 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) if (!mdp->is_opened) return &ndev->stats; - ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); - sh_eth_write(ndev, 0, TROCR); /* (write clear) */ - ndev->stats.collisions += sh_eth_read(ndev, CDCR); - sh_eth_write(ndev, 0, CDCR); /* (write clear) */ - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); - sh_eth_write(ndev, 0, LCCR); /* (write clear) */ + sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); + sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); if (sh_eth_is_gether(mdp)) { - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); - sh_eth_write(ndev, 0, CERCR); /* (write clear) */ - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); - sh_eth_write(ndev, 0, CEECR); /* (write clear) */ + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CERCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CEECR); } else { - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); - sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CNDCR); } return &ndev->stats; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 259d03f..06dbbe5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -32,6 +32,10 @@ #define SH_ETH_TSU_CAM_ENTRIES 32 enum { + /* IMPORTANT: To keep ethtool register dump working, add new + * register names immediately before SH_ETH_MAX_REGISTER_OFFSET. + */ + /* E-DMAC registers */ EDSR = 0, EDMR, @@ -131,9 +135,7 @@ enum { TSU_POST3, TSU_POST4, TSU_ADRH0, - TSU_ADRL0, - TSU_ADRH31, - TSU_ADRL31, + /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */ TXNLCR0, TXALCR0, @@ -491,6 +493,7 @@ struct sh_eth_cpu_data { unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ + unsigned rtrate:1; /* EtherC has RTRATE register */ }; struct sh_eth_private { @@ -543,19 +546,29 @@ static inline void sh_eth_soft_swap(char *src, int len) #endif } +#define SH_ETH_OFFSET_INVALID ((u16) ~0) + static inline void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) { struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; - iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]); + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); } static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) { struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; - return ioread32(mdp->addr + mdp->reg_offset[enum_index]); + return ioread32(mdp->addr + offset); } static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 9fb6948..65e1403 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -32,6 +32,9 @@ #include <linux/bitops.h> #include <net/switchdev.h> #include <net/rtnetlink.h> +#include <net/ip_fib.h> +#include <net/netevent.h> +#include <net/arp.h> #include <asm-generic/io-64-nonatomic-lo-hi.h> #include <generated/utsrelease.h> @@ -49,12 +52,12 @@ struct rocker_flow_tbl_key { enum rocker_of_dpa_table_id tbl_id; union { struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; enum rocker_of_dpa_table_id goto_tbl; } ig_port; struct { - u32 in_lport; + u32 in_pport; __be16 vlan_id; __be16 vlan_id_mask; enum rocker_of_dpa_table_id goto_tbl; @@ -62,8 +65,8 @@ struct rocker_flow_tbl_key { __be16 new_vlan_id; } vlan; struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; __be16 eth_type; u8 eth_dst[ETH_ALEN]; u8 eth_dst_mask[ETH_ALEN]; @@ -91,8 +94,8 @@ struct rocker_flow_tbl_key { bool copy_to_cpu; } bridge; struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; u8 eth_src[ETH_ALEN]; u8 eth_src_mask[ETH_ALEN]; u8 eth_dst[ETH_ALEN]; @@ -111,9 +114,10 @@ struct rocker_flow_tbl_key { struct rocker_flow_tbl_entry { struct hlist_node entry; - u32 ref_count; + u32 cmd; u64 cookie; struct rocker_flow_tbl_key key; + size_t key_len; u32 key_crc32; /* key */ }; @@ -148,7 +152,7 @@ struct rocker_fdb_tbl_entry { u32 key_crc32; /* key */ bool learned; struct rocker_fdb_tbl_key { - u32 lport; + u32 pport; u8 addr[ETH_ALEN]; __be16 vlan_id; } key; @@ -161,6 +165,16 @@ struct rocker_internal_vlan_tbl_entry { __be16 vlan_id; }; +struct rocker_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + struct rocker_desc_info { char *data; /* mapped */ size_t data_size; @@ -200,7 +214,7 @@ struct rocker_port { struct net_device *bridge_dev; struct rocker *rocker; unsigned int port_number; - u32 lport; + u32 pport; __be16 internal_vlan_id; int stp_state; u32 brport_flags; @@ -234,6 +248,9 @@ struct rocker { unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; DECLARE_HASHTABLE(internal_vlan_tbl, 8); spinlock_t internal_vlan_tbl_lock; + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; + u32 neigh_tbl_next_index; }; static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; @@ -256,7 +273,6 @@ enum { ROCKER_PRIORITY_VLAN = 1, ROCKER_PRIORITY_TERM_MAC_UCAST = 0, ROCKER_PRIORITY_TERM_MAC_MCAST = 1, - ROCKER_PRIORITY_UNICAST_ROUTING = 1, ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, ROCKER_PRIORITY_BRIDGING_VLAN = 3, @@ -789,7 +805,30 @@ static u32 __pos_inc(u32 pos, size_t limit) static int rocker_desc_err(struct rocker_desc_info *desc_info) { - return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; } static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) @@ -1257,9 +1296,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); if (enable) - val |= 1ULL << rocker_port->lport; + val |= 1ULL << rocker_port->pport; else - val &= ~(1ULL << rocker_port->lport); + val &= ~(1ULL << rocker_port->pport); rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); } @@ -1312,11 +1351,11 @@ static int rocker_event_link_change(struct rocker *rocker, struct rocker_port *rocker_port; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) return -EIO; port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); if (port_number >= rocker->port_count) @@ -1353,12 +1392,12 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker, __be16 vlan_id; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] || + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) return -EIO; port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1; + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); @@ -1517,8 +1556,8 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; @@ -1606,8 +1645,8 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, ethtool_cmd_speed(ecmd))) @@ -1637,8 +1676,8 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, ETH_ALEN, macaddr)) @@ -1661,8 +1700,8 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, !!(rocker_port->brport_flags & BR_LEARNING))) @@ -1715,11 +1754,11 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port) static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.ig_port.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.ig_port.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.ig_port.goto_tbl)) @@ -1731,8 +1770,8 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.vlan.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.vlan.vlan_id)) @@ -1754,11 +1793,11 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.term_mac.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.term_mac.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, entry->key.term_mac.eth_type)) @@ -1845,11 +1884,11 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.acl.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.acl.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, ETH_ALEN, entry->key.acl.eth_src)) @@ -1917,8 +1956,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker, struct rocker_tlv *cmd_info; int err = 0; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD)) + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) @@ -1975,8 +2013,7 @@ static int rocker_cmd_flow_tbl_del(struct rocker *rocker, const struct rocker_flow_tbl_entry *entry = priv; struct rocker_tlv *cmd_info; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL)) + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) @@ -1993,7 +2030,7 @@ static int rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT, + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, ROCKER_GROUP_PORT_GET(entry->group_id))) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, @@ -2145,9 +2182,9 @@ static int rocker_cmd_group_tbl_del(struct rocker *rocker, return 0; } -/***************************************** - * Flow, group, FDB, internal VLAN tables - *****************************************/ +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ static int rocker_init_tbls(struct rocker *rocker) { @@ -2163,6 +2200,9 @@ static int rocker_init_tbls(struct rocker *rocker) hash_init(rocker->internal_vlan_tbl); spin_lock_init(&rocker->internal_vlan_tbl_lock); + hash_init(rocker->neigh_tbl); + spin_lock_init(&rocker->neigh_tbl_lock); + return 0; } @@ -2173,6 +2213,7 @@ static void rocker_free_tbls(struct rocker *rocker) struct rocker_group_tbl_entry *group_entry; struct rocker_fdb_tbl_entry *fdb_entry; struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; + struct rocker_neigh_tbl_entry *neigh_entry; struct hlist_node *tmp; int bkt; @@ -2196,16 +2237,22 @@ static void rocker_free_tbls(struct rocker *rocker) tmp, internal_vlan_entry, entry) hash_del(&internal_vlan_entry->entry); spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); + hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); } static struct rocker_flow_tbl_entry * rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match) { struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); hash_for_each_possible(rocker->flow_tbl, found, entry, match->key_crc32) { - if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + if (memcmp(&found->key, &match->key, key_len) == 0) return found; } @@ -2218,42 +2265,34 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, { struct rocker *rocker = rocker_port->rocker; struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); unsigned long flags; - bool add_to_hw = false; - int err = 0; - match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + match->key_crc32 = crc32(~0, &match->key, key_len); spin_lock_irqsave(&rocker->flow_tbl_lock, flags); found = rocker_flow_tbl_find(rocker, match); if (found) { - kfree(match); + match->cookie = found->cookie; + hash_del(&found->entry); + kfree(found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; } else { found = match; found->cookie = rocker->flow_tbl_next_cookie++; - hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); - add_to_hw = true; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; } - found->ref_count++; + hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - if (add_to_hw) { - err = rocker_cmd_exec(rocker, rocker_port, - rocker_cmd_flow_tbl_add, - found, NULL, NULL, nowait); - if (err) { - spin_lock_irqsave(&rocker->flow_tbl_lock, flags); - hash_del(&found->entry); - spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - kfree(found); - } - } - - return err; + return rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_add, + found, NULL, NULL, nowait); } static int rocker_flow_tbl_del(struct rocker_port *rocker_port, @@ -2262,29 +2301,26 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, { struct rocker *rocker = rocker_port->rocker; struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); unsigned long flags; - bool del_from_hw = false; int err = 0; - match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + match->key_crc32 = crc32(~0, &match->key, key_len); spin_lock_irqsave(&rocker->flow_tbl_lock, flags); found = rocker_flow_tbl_find(rocker, match); if (found) { - found->ref_count--; - if (found->ref_count == 0) { - hash_del(&found->entry); - del_from_hw = true; - } + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; } spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); kfree(match); - if (del_from_hw) { + if (found) { err = rocker_cmd_exec(rocker, rocker_port, rocker_cmd_flow_tbl_del, found, NULL, NULL, nowait); @@ -2311,7 +2347,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port, } static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - int flags, u32 in_lport, u32 in_lport_mask, + int flags, u32 in_pport, u32 in_pport_mask, enum rocker_of_dpa_table_id goto_tbl) { struct rocker_flow_tbl_entry *entry; @@ -2322,15 +2358,15 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, entry->key.priority = ROCKER_PRIORITY_IG_PORT; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_lport = in_lport; - entry->key.ig_port.in_lport_mask = in_lport_mask; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; entry->key.ig_port.goto_tbl = goto_tbl; return rocker_flow_tbl_do(rocker_port, flags, entry); } static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - int flags, u32 in_lport, + int flags, u32 in_pport, __be16 vlan_id, __be16 vlan_id_mask, enum rocker_of_dpa_table_id goto_tbl, bool untagged, __be16 new_vlan_id) @@ -2343,7 +2379,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, entry->key.priority = ROCKER_PRIORITY_VLAN; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_lport = in_lport; + entry->key.vlan.in_pport = in_pport; entry->key.vlan.vlan_id = vlan_id; entry->key.vlan.vlan_id_mask = vlan_id_mask; entry->key.vlan.goto_tbl = goto_tbl; @@ -2355,7 +2391,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, } static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - u32 in_lport, u32 in_lport_mask, + u32 in_pport, u32 in_pport_mask, __be16 eth_type, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 vlan_id, __be16 vlan_id_mask, bool copy_to_cpu, @@ -2378,8 +2414,8 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, } entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_lport = in_lport; - entry->key.term_mac.in_lport_mask = in_lport_mask; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; entry->key.term_mac.eth_type = eth_type; ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); @@ -2444,9 +2480,34 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, return rocker_flow_tbl_do(rocker_port, flags, entry); } +static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct rocker_flow_tbl_key, + ucast_routing.group_id); + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - int flags, u32 in_lport, - u32 in_lport_mask, + int flags, u32 in_pport, + u32 in_pport_mask, const u8 *eth_src, const u8 *eth_src_mask, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 eth_type, @@ -2472,8 +2533,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, entry->key.priority = priority; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_lport = in_lport; - entry->key.acl.in_lport_mask = in_lport_mask; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; if (eth_src) ether_addr_copy(entry->key.acl.eth_src, eth_src); @@ -2531,7 +2592,6 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, struct rocker *rocker = rocker_port->rocker; struct rocker_group_tbl_entry *found; unsigned long flags; - int err = 0; spin_lock_irqsave(&rocker->group_tbl_lock, flags); @@ -2551,12 +2611,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - if (found->cmd) - err = rocker_cmd_exec(rocker, rocker_port, - rocker_cmd_group_tbl_add, - found, NULL, NULL, nowait); - - return err; + return rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_add, + found, NULL, NULL, nowait); } static int rocker_group_tbl_del(struct rocker_port *rocker_port, @@ -2604,7 +2661,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port, static int rocker_group_l2_interface(struct rocker_port *rocker_port, int flags, __be16 vlan_id, - u32 out_lport, int pop_vlan) + u32 out_pport, int pop_vlan) { struct rocker_group_tbl_entry *entry; @@ -2612,7 +2669,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port, if (!entry) return -ENOMEM; - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); entry->l2_interface.pop_vlan = pop_vlan; return rocker_group_tbl_do(rocker_port, flags, entry); @@ -2652,13 +2709,253 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port, group_id); } +static int rocker_group_l3_unicast(struct rocker_port *rocker_port, + int flags, u32 index, u8 *src_mac, + u8 *dst_mac, __be16 vlan_id, + bool ttl_check, u32 pport) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static struct rocker_neigh_tbl_entry * + rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr) +{ + struct rocker_neigh_tbl_entry *found; + + hash_for_each_possible(rocker->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void _rocker_neigh_add(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry) +{ + entry->index = rocker->neigh_tbl_next_index++; + entry->ref_count++; + hash_add(rocker->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void _rocker_neigh_del(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry) +{ + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + kfree(entry); + } +} + +static void _rocker_neigh_update(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry, + u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else { + entry->ref_count++; + } +} + +static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, + int flags, __be32 ip_addr, u8 *eth_dst) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + _rocker_neigh_add(rocker, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + _rocker_neigh_del(rocker, found); + } else if (updating) { + _rocker_neigh_update(rocker, found, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = rocker_group_l3_unicast(rocker_port, flags, + entry->index, + rocker_port->dev->dev_addr, + entry->eth_dst, + rocker_port->internal_vlan_id, + entry->ttl_check, + rocker_port->pport); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = rocker_flow_tbl_ucast4_routing(rocker_port, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(rocker_port->dev, + "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + kfree(entry); + + return err; +} + +static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, + __be32 ip_addr) +{ + struct net_device *dev = rocker_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (!n) + return -ENOMEM; + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + return err; +} + +static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags, + __be32 ip_addr, u32 *index) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + _rocker_neigh_add(rocker, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + _rocker_neigh_del(rocker, found); + } else if (updating) { + _rocker_neigh_update(rocker, found, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (!adding) + kfree(entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = rocker_port_ipv4_resolve(rocker_port, ip_addr); + + return err; +} + static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, int flags, __be16 vlan_id) { struct rocker_port *p; struct rocker *rocker = rocker_port->rocker; u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 group_ids[rocker->port_count]; + u32 group_ids[ROCKER_FP_PORTS_MAX]; u8 group_count = 0; int err; int i; @@ -2674,8 +2971,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, continue; if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, - p->lport); + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); } } @@ -2700,7 +2996,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, struct rocker *rocker = rocker_port->rocker; struct rocker_port *p; bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_lport; + u32 out_pport; int ref = 0; int err; int i; @@ -2711,14 +3007,14 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, if (rocker_port->stp_state == BR_STATE_LEARNING || rocker_port->stp_state == BR_STATE_FORWARDING) { - out_lport = rocker_port->lport; + out_pport = rocker_port->pport; err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for lport %d\n", - err, out_lport); + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); return err; } } @@ -2737,9 +3033,9 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, if ((!adding || ref != 1) && (adding || ref != 0)) return 0; - out_lport = 0; + out_pport = 0; err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, @@ -2799,9 +3095,9 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, int flags, struct rocker_ctrl *ctrl, __be16 vlan_id) { - u32 in_lport = rocker_port->lport; - u32 in_lport_mask = 0xffffffff; - u32 out_lport = 0; + u32 in_pport = rocker_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; u8 *eth_src = NULL; u8 *eth_src_mask = NULL; __be16 vlan_id_mask = htons(0xffff); @@ -2809,11 +3105,11 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, u8 ip_proto_mask = 0; u8 ip_tos = 0; u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); int err; err = rocker_flow_tbl_acl(rocker_port, flags, - in_lport, in_lport_mask, + in_pport, in_pport_mask, eth_src, eth_src_mask, ctrl->eth_dst, ctrl->eth_dst_mask, ctrl->eth_type, @@ -2856,7 +3152,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, int flags, struct rocker_ctrl *ctrl, __be16 vlan_id) { - u32 in_lport_mask = 0xffffffff; + u32 in_pport_mask = 0xffffffff; __be16 vlan_id_mask = htons(0xffff); int err; @@ -2864,7 +3160,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, vlan_id = rocker_port->internal_vlan_id; err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, ctrl->eth_type, ctrl->eth_dst, ctrl->eth_dst_mask, vlan_id, vlan_id_mask, ctrl->copy_to_cpu, @@ -2934,7 +3230,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, { enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_lport = rocker_port->lport; + u32 in_pport = rocker_port->pport; __be16 vlan_id = htons(vid); __be16 vlan_id_mask = htons(0xffff); __be16 internal_vlan_id; @@ -2978,7 +3274,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, } err = rocker_flow_tbl_vlan(rocker_port, flags, - in_lport, vlan_id, vlan_id_mask, + in_pport, vlan_id, vlan_id_mask, goto_tbl, untagged, internal_vlan_id); if (err) netdev_err(rocker_port->dev, @@ -2990,20 +3286,20 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) { enum rocker_of_dpa_table_id goto_tbl; - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; int err; /* Normal Ethernet Frames. Matches pkts from any local physical * ports. Goto VLAN tbl. */ - in_lport = 0; - in_lport_mask = 0xffff0000; + in_pport = 0; + in_pport_mask = 0xffff0000; goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; err = rocker_flow_tbl_ig_port(rocker_port, flags, - in_lport, in_lport_mask, + in_pport, in_pport_mask, goto_tbl); if (err) netdev_err(rocker_port->dev, @@ -3047,7 +3343,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, struct rocker_fdb_learn_work *lw; enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_lport = rocker_port->lport; + u32 out_pport = rocker_port->pport; u32 tunnel_id = 0; u32 group_id = ROCKER_GROUP_NONE; bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); @@ -3055,7 +3351,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, int err; if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); if (!(flags & ROCKER_OP_FLAG_REFRESH)) { err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, @@ -3114,7 +3410,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, return -ENOMEM; fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->key.lport = rocker_port->lport; + fdb->key.pport = rocker_port->pport; ether_addr_copy(fdb->key.addr, addr); fdb->key.vlan_id = vlan_id; fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); @@ -3161,7 +3457,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port) spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.lport != rocker_port->lport) + if (found->key.pport != rocker_port->pport) continue; if (!found->learned) continue; @@ -3182,7 +3478,7 @@ err_out: static int rocker_port_router_mac(struct rocker_port *rocker_port, int flags, __be16 vlan_id) { - u32 in_lport_mask = 0xffffffff; + u32 in_pport_mask = 0xffffffff; __be16 eth_type; const u8 *dst_mac_mask = ff_mac; __be16 vlan_id_mask = htons(0xffff); @@ -3194,7 +3490,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, eth_type = htons(ETH_P_IP); err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, eth_type, rocker_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); @@ -3203,7 +3499,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, eth_type = htons(ETH_P_IPV6); err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, eth_type, rocker_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); @@ -3214,7 +3510,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, static int rocker_port_fwding(struct rocker_port *rocker_port) { bool pop_vlan; - u32 out_lport; + u32 out_pport; __be16 vlan_id; u16 vid; int flags = ROCKER_OP_FLAG_NOWAIT; @@ -3231,19 +3527,19 @@ static int rocker_port_fwding(struct rocker_port *rocker_port) rocker_port->stp_state != BR_STATE_FORWARDING) flags |= ROCKER_OP_FLAG_REMOVE; - out_lport = rocker_port->lport; + out_pport = rocker_port->pport; for (vid = 1; vid < VLAN_N_VID; vid++) { if (!test_bit(vid, rocker_port->vlan_bitmap)) continue; vlan_id = htons(vid); pop_vlan = rocker_vlan_id_is_internal(vlan_id); err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for lport %d\n", - err, out_lport); + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); return err; } } @@ -3302,6 +3598,26 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) return rocker_port_fwding(rocker_port); } +static int rocker_port_fwd_enable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING); +} + +static int rocker_port_fwd_disable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); +} + static struct rocker_internal_vlan_tbl_entry * rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) { @@ -3387,6 +3703,51 @@ not_found: spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); } +static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst, + int dst_len, struct fib_info *fi, u32 tb_id, + int flags) +{ + struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = rocker_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == rocker_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = rocker_port_ipv4_nh(rocker_port, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + /***************** * Net device ops *****************/ @@ -3394,8 +3755,6 @@ not_found: static int rocker_port_open(struct net_device *dev) { struct rocker_port *rocker_port = netdev_priv(dev); - u8 stp_state = rocker_port_is_bridged(rocker_port) ? - BR_STATE_BLOCKING : BR_STATE_FORWARDING; int err; err = rocker_port_dma_rings_init(rocker_port); @@ -3418,9 +3777,9 @@ static int rocker_port_open(struct net_device *dev) goto err_request_rx_irq; } - err = rocker_port_stp_update(rocker_port, stp_state); + err = rocker_port_fwd_enable(rocker_port); if (err) - goto err_stp_update; + goto err_fwd_enable; napi_enable(&rocker_port->napi_tx); napi_enable(&rocker_port->napi_rx); @@ -3428,7 +3787,7 @@ static int rocker_port_open(struct net_device *dev) netif_start_queue(dev); return 0; -err_stp_update: +err_fwd_enable: free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); err_request_rx_irq: free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); @@ -3445,7 +3804,7 @@ static int rocker_port_stop(struct net_device *dev) rocker_port_set_enable(rocker_port, false); napi_disable(&rocker_port->napi_rx); napi_disable(&rocker_port->napi_tx); - rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); + rocker_port_fwd_disable(rocker_port); free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); rocker_port_dma_rings_fini(rocker_port); @@ -3702,7 +4061,7 @@ static int rocker_port_fdb_dump(struct sk_buff *skb, spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.lport != rocker_port->lport) + if (found->key.pport != rocker_port->pport) continue; if (idx < cb->args[0]) goto skip; @@ -3790,6 +4149,30 @@ static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state) return rocker_port_stp_update(rocker_port, state); } +static int rocker_port_switch_fib_ipv4_add(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = 0; + + return rocker_port_fib_ipv4(rocker_port, dst, dst_len, + fi, tb_id, flags); +} + +static int rocker_port_switch_fib_ipv4_del(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = ROCKER_OP_FLAG_REMOVE; + + return rocker_port_fib_ipv4(rocker_port, dst, dst_len, + fi, tb_id, flags); +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, @@ -3804,6 +4187,8 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_bridge_getlink = rocker_port_bridge_getlink, .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update, + .ndo_switch_fib_ipv4_add = rocker_port_switch_fib_ipv4_add, + .ndo_switch_fib_ipv4_del = rocker_port_switch_fib_ipv4_del, }; /******************** @@ -3882,8 +4267,8 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker, if (!cmd_stats) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_stats); @@ -3900,7 +4285,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; struct rocker_tlv *pattr; - u32 lport; + u32 pport; u64 *data = priv; int i; @@ -3912,11 +4297,11 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, attrs[ROCKER_TLV_CMD_INFO]); - if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]) + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) return -EIO; - lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]); - if (lport != rocker_port->lport) + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) return -EIO; for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { @@ -4104,7 +4489,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port) u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); bool link_up; - link_up = link_status & (1 << rocker_port->lport); + link_up = link_status & (1 << rocker_port->pport); if (link_up) netif_carrier_on(rocker_port->dev); else @@ -4152,7 +4537,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->dev = dev; rocker_port->rocker = rocker; rocker_port->port_number = port_number; - rocker_port->lport = port_number + 1; + rocker_port->pport = port_number + 1; rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; rocker_port_dev_addr_init(rocker, rocker_port); @@ -4164,8 +4549,9 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) NAPI_POLL_WEIGHT); rocker_carrier_init(rocker_port); - dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_SWITCH_OFFLOAD; + dev->features |= NETIF_F_NETNS_LOCAL | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_SWITCH_OFFLOAD; err = register_netdev(dev); if (err) { @@ -4436,9 +4822,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - err = rocker_port_vlan(rocker_port, 0, 0); - - return err; + return rocker_port_vlan(rocker_port, 0, 0); } static int rocker_port_bridge_leave(struct rocker_port *rocker_port) @@ -4458,6 +4842,11 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); err = rocker_port_vlan(rocker_port, 0, 0); + if (err) + return err; + + if (rocker_port->dev->flags & IFF_UP) + err = rocker_port_fwd_enable(rocker_port); return err; } @@ -4503,6 +4892,48 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = { .notifier_call = rocker_netdevice_event, }; +/************************************ + * Net event notifier event handler + ************************************/ + +static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE; + __be32 ip_addr = *(__be32 *)n->primary_key; + + return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha); +} + +static int rocker_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct neighbour *n = ptr; + int err; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_neigh_update(dev, n); + if (err) + netdev_warn(dev, + "failed to handle neigh update (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netevent_nb __read_mostly = { + .notifier_call = rocker_netevent_event, +}; + /*********************** * Module init and exit ***********************/ @@ -4512,18 +4943,21 @@ static int __init rocker_module_init(void) int err; register_netdevice_notifier(&rocker_netdevice_nb); + register_netevent_notifier(&rocker_netevent_nb); err = pci_register_driver(&rocker_pci_driver); if (err) goto err_pci_register_driver; return 0; err_pci_register_driver: + unregister_netdevice_notifier(&rocker_netevent_nb); unregister_netdevice_notifier(&rocker_netdevice_nb); return err; } static void __exit rocker_module_exit(void) { + unregister_netevent_notifier(&rocker_netevent_nb); unregister_netdevice_notifier(&rocker_netdevice_nb); pci_unregister_driver(&rocker_pci_driver); } diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index a5bc432..51e430d 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -14,6 +14,21 @@ #include <linux/types.h> +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +#define ROCKER_FP_PORTS_MAX 62 + #define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 @@ -136,7 +151,7 @@ enum { enum { ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, - ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ @@ -151,7 +166,7 @@ enum { enum { ROCKER_TLV_CMD_PORT_STATS_UNSPEC, - ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ @@ -191,7 +206,7 @@ enum { enum { ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, - ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, @@ -201,7 +216,7 @@ enum { enum { ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, - ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ @@ -275,9 +290,9 @@ enum { ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ - ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ - ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ - ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ @@ -291,7 +306,7 @@ enum { ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ - ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c8a01ee..413ea14a 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -422,11 +422,11 @@ static int init_tx_ring(struct device *dev, u8 queue_no, /* assign queue number */ tx_ring->queue_no = queue_no; - /* initalise counters */ + /* initialise counters */ tx_ring->dirty_tx = 0; tx_ring->cur_tx = 0; - /* initalise TX queue lock */ + /* initialise TX queue lock */ spin_lock_init(&tx_ring->tx_lock); return 0; @@ -515,7 +515,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, goto err_free_rx_buffers; } - /* initalise counters */ + /* initialise counters */ rx_ring->cur_rx = 0; rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); priv->dma_buf_sz = bfsize; @@ -837,7 +837,7 @@ static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num) /* free the skbuffs of the ring */ tx_free_ring_skbufs(tx_ring); - /* initalise counters */ + /* initialise counters */ tx_ring->cur_tx = 0; tx_ring->dirty_tx = 0; @@ -1176,7 +1176,7 @@ static int sxgbe_open(struct net_device *dev) if (priv->phydev) phy_start(priv->phydev); - /* initalise TX coalesce parameters */ + /* initialise TX coalesce parameters */ sxgbe_tx_init_coalesce(priv); if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { @@ -1721,7 +1721,7 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) * Description: * This function is a driver entry point whenever ifconfig command gets * executed to see device statistics. Statistics are number of - * bytes sent or received, errors occured etc. + * bytes sent or received, errors occurred etc. * Return value: * This function returns various statistical information of device. */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2384824..33d2f9a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3215,7 +3215,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, return status; } -/* Fake a successfull reset, which will be performed later in efx_io_resume. */ +/* Fake a successful reset, which will be performed later in efx_io_resume. */ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) { struct efx_nic *efx = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 7597532..bb89e96 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -645,7 +645,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx) } /* Flush all the transmit queues, and continue flushing receive queues until - * they're all flushed. Wait for the DRAIN events to be recieved so that there + * they're all flushed. Wait for the DRAIN events to be received so that there * are no more RX and TX events left on any channel. */ static int efx_farch_do_flush(struct efx_nic *efx) { @@ -1108,7 +1108,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) } /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush - * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add + * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add * the RX queue back to the mask of RX queues in need of flushing. */ static void diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index a707fb5..e028de1 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -6497,7 +6497,7 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ +/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index a8bbbad..fe83430 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1067,7 +1067,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx) } /* Copy the list of individual addresses into the vfdi_status.peers - * array and auxillary pages, protected by %local_lock. Drop that lock + * array and auxiliary pages, protected by %local_lock. Drop that lock * and then broadcast the address list to every VF. */ static void efx_siena_sriov_peer_work(struct work_struct *data) diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h index ae044f4..f62901d 100644 --- a/drivers/net/ethernet/sfc/vfdi.h +++ b/drivers/net/ethernet/sfc/vfdi.h @@ -98,7 +98,7 @@ struct vfdi_endpoint { * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ. * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then * finalize the SRAM entries. - * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ. + * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ. * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters. * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates * from PF and write the initial status. @@ -148,7 +148,7 @@ enum vfdi_op { * @u.init_txq.flags: Checksum offload flags. * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA * address of each page backing the transmit queue. - * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting + * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting * all traffic at this receive queue. * @u.mac_filter.flags: MAC filter flags. * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status. diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 2965c6a..41047c9 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) unsigned long flags; /* Initialise tx packet using broadcast destination address */ - memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); + eth_broadcast_addr(pdata->loopback_tx_pkt); /* Use incrementing source address */ for (i = 6; i < 12; i++) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index e97074c..5a36bd2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -91,7 +91,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * STMMAC_RESOURCE_NAME); if (IS_ERR(dwmac->stmmac_rst)) { dev_info(dev, "Could not get reset control!\n"); - return -EINVAL; + if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dwmac->stmmac_rst = NULL; } dwmac->interface = of_get_phy_mode(np); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a0ea84f..5336594 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -609,7 +609,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) * where, freq_div_ratio = clk_ptp_ref_i/50MHz * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; * NOTE: clk_ptp_ref_i should be >= 50MHz to - * achive 20ns accuracy. + * achieve 20ns accuracy. * * 2^x * y == (y << x), hence * 2^32 * 50000000 ==> (50000000 << 32) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index fef5dec..74e9b14 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2175,7 +2175,7 @@ static int gem_do_start(struct net_device *dev) } /* Mark us as attached again if we come from resume(), this has - * no effect if we weren't detatched and needs to be done now. + * no effect if we weren't detached and needs to be done now. */ netif_device_attach(dev); @@ -2794,7 +2794,7 @@ static void gem_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - /* Ensure reset task is truely gone */ + /* Ensure reset task is truly gone */ cancel_work_sync(&gp->reset_task); /* Free resources */ diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 22e0cad..401abf7 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1411,6 +1411,8 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(err < 0)) { netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; dev->stats.tx_carrier_errors++; goto out_dropped; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index a1bbaf6..b536b4c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -726,7 +726,7 @@ static void cpsw_rx_handler(void *token, int len, int status) if (ndev_status && (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up - * and running, intead of freeing which results + * and running, instead of freeing which results * in reducing of the number of rx descriptor in * DMA engine, requeue skb back to cpdma. */ diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a31a8c3..9f14d8b 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1320,7 +1320,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, if (addr) ether_addr_copy(naddr->addr, addr); else - memset(naddr->addr, 0, ETH_ALEN); + eth_zero_addr(naddr->addr); list_add_tail(&naddr->node, &netcp->addr_list); return naddr; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index bb79928..ac62a5e 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1065,7 +1065,7 @@ refill: /* * this call can fail, but for now, just leave this - * decriptor without skb + * descriptor without skb */ gelic_descr_prepare_rx(card, descr); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 0a7f2e7..13214a6 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev, } else { pr_debug("%s: clear bssid\n", __func__); clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); @@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev, memcpy(data->ap_addr.sa_data, wl->active_bssid, ETH_ALEN); } else - memset(data->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(data->ap_addr.sa_data); spin_unlock_irqrestore(&wl->lock, irqflag); mutex_unlock(&wl->assoc_stat_lock); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 17e2766..8fb807e 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. - The compiler will convert <unsigned>'%'<2^N> into a bit mask. - Making the Tx ring too large decreases the effectiveness of channel - bonding and packet priority. - There are no ill effects from too-large receive rings. */ -#define TX_RING_SIZE 16 -#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ + * The compiler will convert <unsigned>'%'<2^N> into a bit mask. + * Making the Tx ring too large decreases the effectiveness of channel + * bonding and packet priority. + * With BQL support, we can increase TX ring safely. + * There are no ill effects from too-large receive rings. + */ +#define TX_RING_SIZE 64 +#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ #define RX_RING_SIZE 64 /* Operational parameters that usually are not changed. */ @@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev) } rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); + netdev_reset_queue(dev); } static void free_tbufs(struct net_device* dev) @@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, else rp->tx_ring[entry].tx_status = 0; + netdev_sent_queue(dev, skb->len); /* lock eth irq */ wmb(); rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); @@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev) struct rhine_private *rp = netdev_priv(dev); struct device *hwdev = dev->dev.parent; int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; + unsigned int pkts_compl = 0, bytes_compl = 0; + struct sk_buff *skb; /* find and cleanup dirty tx descriptors */ while (rp->dirty_tx != rp->cur_tx) { @@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev) entry, txstatus); if (txstatus & DescOwn) break; + skb = rp->tx_skbuff[entry]; if (txstatus & 0x8000) { netif_dbg(rp, tx_done, dev, "Transmit error, Tx status %08x\n", txstatus); @@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev) (txstatus >> 3) & 0xF, txstatus & 0xF); u64_stats_update_begin(&rp->tx_stats.syncp); - rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; + rp->tx_stats.bytes += skb->len; rp->tx_stats.packets++; u64_stats_update_end(&rp->tx_stats.syncp); } @@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev) if (rp->tx_skbuff_dma[entry]) { dma_unmap_single(hwdev, rp->tx_skbuff_dma[entry], - rp->tx_skbuff[entry]->len, + skb->len, DMA_TO_DEVICE); } - dev_consume_skb_any(rp->tx_skbuff[entry]); + bytes_compl += skb->len; + pkts_compl++; + dev_consume_skb_any(skb); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } + + netdev_completed_queue(dev, pkts_compl, bytes_compl); if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a495931..f5498c2 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -56,7 +56,7 @@ MODULE_LICENSE("GPL"); #define W5100_S0_REGS 0x0400 #define W5100_S0_MR 0x0400 /* S0 Mode Register */ -#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */ +#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ #define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ #define W5100_S0_CR 0x0401 /* S0 Command Register */ #define S0_CR_OPEN 0x01 /* OPEN command */ diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 09322d9..ca0c631 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -63,7 +63,7 @@ MODULE_LICENSE("GPL"); #define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */ #define W5300_S0_MR 0x0200 /* S0 Mode Register */ #define S0_MR_CLOSED 0x0000 /* Close mode */ -#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */ +#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscuous) */ #define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */ #define W5300_S0_CR 0x0202 /* S0 Command Register */ #define S0_CR_OPEN 0x0001 /* OPEN command */ diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 9e16a28..5138407 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev) return; } - memset(diffs, 0, ETH_ALEN); + eth_zero_addr(diffs); addr = NULL; netdev_for_each_mc_addr(ha, dev) { diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index daca0de..7c4a415 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -247,6 +247,9 @@ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + spin_lock_bh(&sp->lock); /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); @@ -284,18 +287,6 @@ static int sp_close(struct net_device *dev) return 0; } -/* Return the frame type ID */ -static int sp_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned len) -{ -#ifdef CONFIG_INET - if (type != ETH_P_AX25) - return ax25_hard_header(skb, dev, type, daddr, saddr, len); -#endif - return 0; -} - static int sp_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; @@ -309,20 +300,6 @@ static int sp_set_mac_address(struct net_device *dev, void *addr) return 0; } -static int sp_rebuild_header(struct sk_buff *skb) -{ -#ifdef CONFIG_INET - return ax25_rebuild_header(skb); -#else - return 0; -#endif -} - -static const struct header_ops sp_header_ops = { - .create = sp_header, - .rebuild = sp_rebuild_header, -}; - static const struct net_device_ops sp_netdev_ops = { .ndo_open = sp_open_dev, .ndo_stop = sp_close, @@ -337,7 +314,7 @@ static void sp_setup(struct net_device *dev) dev->destructor = free_netdev; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; - dev->header_ops = &sp_header_ops; + dev->header_ops = &ax25_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index a98c153..83c7cce 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -772,6 +772,9 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (skb->data[0] != 0) { do_kiss_params(bc, skb->data, skb->len); dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index c2894e4..63ff08a 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -251,6 +251,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *orig_dev; int size; + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + /* * Just to be *really* sure not to send anything if the interface * is down, the ethernet device may have gone. diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0fad408..c3d3777 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -920,6 +920,9 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) unsigned long flags; int i; + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + /* Temporarily stop the scheduler feeding us packets */ netif_stop_queue(dev); diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index c67a272..49fe59b 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -404,6 +404,9 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb, { struct hdlcdrv_state *sm = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (skb->data[0] != 0) { do_kiss_params(sm, skb->data, skb->len); dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index f990bb1c..2ffbf13 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -529,6 +529,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev) { struct mkiss *ax = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (!netif_running(dev)) { printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); return NETDEV_TX_BUSY; @@ -554,11 +557,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev) } /* We were not busy, so we are now... :-) */ - if (skb != NULL) { - netif_stop_queue(dev); - ax_encaps(dev, skb->data, skb->len); - kfree_skb(skb); - } + netif_stop_queue(dev); + ax_encaps(dev, skb->data, skb->len); + kfree_skb(skb); return NETDEV_TX_OK; } @@ -573,32 +574,6 @@ static int ax_open_dev(struct net_device *dev) return 0; } -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) - -/* Return the frame type ID */ -static int ax_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned len) -{ -#ifdef CONFIG_INET - if (type != ETH_P_AX25) - return ax25_hard_header(skb, dev, type, daddr, saddr, len); -#endif - return 0; -} - - -static int ax_rebuild_header(struct sk_buff *skb) -{ -#ifdef CONFIG_INET - return ax25_rebuild_header(skb); -#else - return 0; -#endif -} - -#endif /* CONFIG_{AX25,AX25_MODULE} */ - /* Open the low-level part of the AX25 channel. Easy! */ static int ax_open(struct net_device *dev) { @@ -662,11 +637,6 @@ static int ax_close(struct net_device *dev) return 0; } -static const struct header_ops ax_header_ops = { - .create = ax_header, - .rebuild = ax_rebuild_header, -}; - static const struct net_device_ops ax_netdev_ops = { .ndo_open = ax_open_dev, .ndo_stop = ax_close, @@ -682,7 +652,7 @@ static void ax_setup(struct net_device *dev) dev->addr_len = 0; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->header_ops = &ax_header_ops; + dev->header_ops = &ax25_header_ops; dev->netdev_ops = &ax_netdev_ops; diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 57be9e0..ce88df3 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1639,6 +1639,9 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned long flags; char kisscmd; + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + if (skb->len > scc->stat.bufsize || skb->len < 2) { scc->dev_stat.tx_dropped++; /* bogus frame */ dev_kfree_skb(skb); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 717433c..1a4729c 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -597,6 +597,9 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb, { struct yam_port *yp = netdev_priv(dev); + if (skb->protocol == htons(ETH_P_IP)) + return ax25_ip_xmit(skb); + skb_queue_tail(&yp->send_queue, skb); dev->trans_start = jiffies; return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 384ca4f..4815843 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -634,6 +634,7 @@ struct netvsc_device { struct vmbus_channel *chn_table[NR_CPUS]; u32 send_table[VRSS_SEND_TAB_SIZE]; + u32 max_chn; u32 num_chn; atomic_t queue_sends[NR_CPUS]; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 15d82ed..a06bd66 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -687,6 +687,19 @@ static void netvsc_get_drvinfo(struct net_device *net, strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); } +static void netvsc_get_channels(struct net_device *net, + struct ethtool_channels *channel) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(dev); + + if (nvdev) { + channel->max_combined = nvdev->max_chn; + channel->combined_count = nvdev->num_chn; + } +} + static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); @@ -760,6 +773,7 @@ static void netvsc_poll_controller(struct net_device *net) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, + .get_channels = netvsc_get_channels, }; static const struct net_device_ops device_ops = { diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7816d98..ca81de0 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1027,6 +1027,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Initialize the rndis device */ net_device = hv_get_drvdata(dev); + net_device->max_chn = 1; net_device->num_chn = 1; net_device->extension = rndis_device; @@ -1094,6 +1095,7 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; + net_device->max_chn = rsscap.num_recv_que; net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? num_online_cpus() : rsscap.num_recv_que; if (net_device->num_chn == 1) @@ -1140,8 +1142,10 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); out: - if (ret) + if (ret) { + net_device->max_chn = 1; net_device->num_chn = 1; + } return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7b051ea..1d438bc 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -46,8 +46,6 @@ struct at86rf2xx_chip_data { u16 t_off_to_tx_on; u16 t_frame; u16 t_p_ack; - /* completion timeout for tx in msecs */ - u16 t_tx_timeout; int rssi_base_val; int (*set_channel)(struct at86rf230_local *, u8, u8); @@ -689,7 +687,7 @@ at86rf230_sync_state_change_complete(void *context) static int at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state) { - int rc; + unsigned long rc; at86rf230_async_state_change(lp, &lp->state, state, at86rf230_sync_state_change_complete, @@ -1281,7 +1279,6 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_off_to_tx_on = 80, .t_frame = 4096, .t_p_ack = 545, - .t_tx_timeout = 2000, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, .get_desense_steps = at86rf23x_get_desens_steps @@ -1295,7 +1292,6 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_off_to_tx_on = 110, .t_frame = 4096, .t_p_ack = 545, - .t_tx_timeout = 2000, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, .get_desense_steps = at86rf23x_get_desens_steps @@ -1309,13 +1305,12 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_off_to_tx_on = 200, .t_frame = 4096, .t_p_ack = 545, - .t_tx_timeout = 2000, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, .get_desense_steps = at86rf212_get_desens_steps }; -static int at86rf230_hw_init(struct at86rf230_local *lp) +static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim) { int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH; unsigned int dvdd; @@ -1326,7 +1321,12 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return rc; irq_type = irq_get_trigger_type(lp->spi->irq); - if (irq_type == IRQ_TYPE_EDGE_FALLING) + if (irq_type == IRQ_TYPE_EDGE_RISING || + irq_type == IRQ_TYPE_EDGE_FALLING) + dev_warn(&lp->spi->dev, + "Using edge triggered irq's are not recommended!\n"); + if (irq_type == IRQ_TYPE_EDGE_FALLING || + irq_type == IRQ_TYPE_LEVEL_LOW) irq_pol = IRQ_ACTIVE_LOW; rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol); @@ -1341,6 +1341,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) if (rc) return rc; + /* reset values differs in at86rf231 and at86rf233 */ + rc = at86rf230_write_subreg(lp, SR_IRQ_MASK_MODE, 0); + if (rc) + return rc; + get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed)); rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]); if (rc) @@ -1362,6 +1367,45 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) usleep_range(lp->data->t_sleep_cycle, lp->data->t_sleep_cycle + 100); + /* xtal_trim value is calculated by: + * CL = 0.5 * (CX + CTRIM + CPAR) + * + * whereas: + * CL = capacitor of used crystal + * CX = connected capacitors at xtal pins + * CPAR = in all at86rf2xx datasheets this is a constant value 3 pF, + * but this is different on each board setup. You need to fine + * tuning this value via CTRIM. + * CTRIM = variable capacitor setting. Resolution is 0.3 pF range is + * 0 pF upto 4.5 pF. + * + * Examples: + * atben transceiver: + * + * CL = 8 pF + * CX = 12 pF + * CPAR = 3 pF (We assume the magic constant from datasheet) + * CTRIM = 0.9 pF + * + * (12+0.9+3)/2 = 7.95 which is nearly at 8 pF + * + * xtal_trim = 0x3 + * + * openlabs transceiver: + * + * CL = 16 pF + * CX = 22 pF + * CPAR = 3 pF (We assume the magic constant from datasheet) + * CTRIM = 4.5 pF + * + * (22+4.5+3)/2 = 14.75 which is the nearest value to 16 pF + * + * xtal_trim = 0xf + */ + rc = at86rf230_write_subreg(lp, SR_XTAL_TRIM, xtal_trim); + if (rc) + return rc; + rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd); if (rc) return rc; @@ -1377,24 +1421,30 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0); } -static struct at86rf230_platform_data * -at86rf230_get_pdata(struct spi_device *spi) +static int +at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr, + u8 *xtal_trim) { - struct at86rf230_platform_data *pdata; + struct at86rf230_platform_data *pdata = spi->dev.platform_data; + int ret; - if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) - return spi->dev.platform_data; + if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) { + if (!pdata) + return -ENOENT; - pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - goto done; + *rstn = pdata->rstn; + *slp_tr = pdata->slp_tr; + *xtal_trim = pdata->xtal_trim; + return 0; + } - pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); - pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); + *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); + *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); + ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim); + if (ret < 0 && ret != -EINVAL) + return ret; - spi->dev.platform_data = pdata; -done: - return pdata; + return 0; } static int @@ -1501,43 +1551,43 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp) static int at86rf230_probe(struct spi_device *spi) { - struct at86rf230_platform_data *pdata; struct ieee802154_hw *hw; struct at86rf230_local *lp; unsigned int status; - int rc, irq_type; + int rc, irq_type, rstn, slp_tr; + u8 xtal_trim; if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); return -EINVAL; } - pdata = at86rf230_get_pdata(spi); - if (!pdata) { - dev_err(&spi->dev, "no platform_data\n"); - return -EINVAL; + rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim); + if (rc < 0) { + dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc); + return rc; } - if (gpio_is_valid(pdata->rstn)) { - rc = devm_gpio_request_one(&spi->dev, pdata->rstn, + if (gpio_is_valid(rstn)) { + rc = devm_gpio_request_one(&spi->dev, rstn, GPIOF_OUT_INIT_HIGH, "rstn"); if (rc) return rc; } - if (gpio_is_valid(pdata->slp_tr)) { - rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr, + if (gpio_is_valid(slp_tr)) { + rc = devm_gpio_request_one(&spi->dev, slp_tr, GPIOF_OUT_INIT_LOW, "slp_tr"); if (rc) return rc; } /* Reset */ - if (gpio_is_valid(pdata->rstn)) { + if (gpio_is_valid(rstn)) { udelay(1); - gpio_set_value(pdata->rstn, 0); + gpio_set_value(rstn, 0); udelay(1); - gpio_set_value(pdata->rstn, 1); + gpio_set_value(rstn, 1); usleep_range(120, 240); } @@ -1571,7 +1621,7 @@ static int at86rf230_probe(struct spi_device *spi) spi_set_drvdata(spi, lp); - rc = at86rf230_hw_init(lp); + rc = at86rf230_hw_init(lp, xtal_trim); if (rc) goto free_dev; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4f4099d..2950c37 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -336,7 +336,6 @@ static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev, static const struct header_ops ipvlan_header_ops = { .create = ipvlan_hard_header, - .rebuild = eth_rebuild_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1df38bd..b5e3320 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -550,7 +550,6 @@ static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, - .rebuild = eth_rebuild_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 27ecc5c..8362aef 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1130,16 +1130,15 @@ static const struct file_operations macvtap_fops = { #endif }; -static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); } -static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, - int flags) +static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, + size_t total_len, int flags) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); int ret; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ba2f5e7..15731d1 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -47,6 +47,7 @@ #include <linux/netpoll.h> #include <linux/inet.h> #include <linux/configfs.h> +#include <linux/etherdevice.h> MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>"); MODULE_DESCRIPTION("Console driver for network interfaces"); @@ -185,7 +186,7 @@ static struct netconsole_target *alloc_param_target(char *target_config) nt->np.local_port = 6665; nt->np.remote_port = 6666; mutex_init(&nt->mutex); - memset(nt->np.remote_mac, 0xff, ETH_ALEN); + eth_broadcast_addr(nt->np.remote_mac); /* Parse parameters and setup netpoll */ err = netpoll_parse_options(&nt->np, target_config); @@ -604,7 +605,7 @@ static struct config_item *make_netconsole_target(struct config_group *group, nt->np.local_port = 6665; nt->np.remote_port = 6666; mutex_init(&nt->mutex); - memset(nt->np.remote_mac, 0xff, ETH_ALEN); + eth_broadcast_addr(nt->np.remote_mac); /* Initialize the config_item member */ config_item_init_type_name(&nt->item, name, &netconsole_target_type); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index d2408a5..ff059e1 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -455,6 +455,18 @@ out: return NET_RX_DROP; } +static void pppoe_unbind_sock_work(struct work_struct *work) +{ + struct pppox_sock *po = container_of(work, struct pppox_sock, + proto.pppoe.padt_work); + struct sock *sk = sk_pppox(po); + + lock_sock(sk); + pppox_unbind_sock(sk); + release_sock(sk); + sock_put(sk); +} + /************************************************************************ * * Receive a PPPoE Discovery frame. @@ -500,7 +512,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, } bh_unlock_sock(sk); - sock_put(sk); + if (!schedule_work(&po->proto.pppoe.padt_work)) + sock_put(sk); } abort: @@ -613,6 +626,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); + INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); + error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OE) goto end; @@ -820,8 +835,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, return err; } -static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) { struct sk_buff *skb; struct sock *sk = sock->sk; @@ -962,8 +977,8 @@ static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; -static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, int flags) +static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, + size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 7d39484..a23319f 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1935,6 +1935,9 @@ static netdev_features_t team_fix_features(struct net_device *dev, mask); } rcu_read_unlock(); + + features = netdev_add_tso_features(features, mask); + return features; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 857dca4..b96b94c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1448,8 +1448,7 @@ static void tun_sock_write_space(struct sock *sk) kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } -static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { int ret; struct tun_file *tfile = container_of(sock, struct tun_file, socket); @@ -1464,8 +1463,7 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, return ret; } -static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, +static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 5c55f11..724a9b5 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); skb_put(skb, sizeof(padbytes)); } + + usbnet_set_skb_tx_stats(skb, 1); return skb; } diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 8cfc3bb..4e2b26a 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -641,7 +641,7 @@ static void catc_set_multicast_list(struct net_device *netdev) u8 broadcast[ETH_ALEN]; u8 rx = RxEnable | RxPolarity | RxMultiCast; - memset(broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(broadcast); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); @@ -880,7 +880,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id dev_dbg(dev, "Filling the multicast list.\n"); - memset(broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 96fc8a5..e4b7a47 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -394,7 +394,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_ skb_put(skb, ETH_HLEN); skb_reset_mac_header(skb); eth_hdr(skb)->h_proto = proto; - memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); + eth_zero_addr(eth_hdr(skb)->h_source); memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); /* add datagram */ diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 80a844e..70cbea5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1172,7 +1172,6 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) /* return skb */ ctx->tx_curr_skb = NULL; - dev->net->stats.tx_packets += ctx->tx_curr_frame_num; /* keep private stats: framing overhead and number of NTBs */ ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; @@ -1184,6 +1183,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) */ dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; + usbnet_set_skb_tx_stats(skb_out, n); + return skb_out; exit_no_skb: diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 778e915..111d907 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1477,6 +1477,7 @@ static void tiocmget_intr_callback(struct urb *urb) struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; + struct usb_interface *interface; int if_num; /* Sanity checks */ @@ -1494,7 +1495,9 @@ static void tiocmget_intr_callback(struct urb *urb) BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); usb = serial->parent->usb; - if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; + interface = serial->parent->interface; + + if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* wIndex should be the USB interface number of the port to which the * notification applies, which should always be the Modem port. @@ -1675,6 +1678,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty, unsigned long flags; int if_num; struct hso_serial *serial = tty->driver_data; + struct usb_interface *interface; /* sanity check */ if (!serial) { @@ -1685,7 +1689,8 @@ static int hso_serial_tiocmset(struct tty_struct *tty, if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) return -EINVAL; - if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; + interface = serial->parent->interface; + if_num = interface->cur_altsetting->desc.bInterfaceNumber; spin_lock_irqsave(&serial->serial_lock, flags); if (set & TIOCM_RTS) @@ -2808,7 +2813,7 @@ static int hso_get_config_data(struct usb_interface *interface) { struct usb_device *usbdev = interface_to_usbdev(interface); u8 *config_data = kmalloc(17, GFP_KERNEL); - u32 if_num = interface->altsetting->desc.bInterfaceNumber; + u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber; s32 result; if (!config_data) @@ -2886,7 +2891,7 @@ static int hso_probe(struct usb_interface *interface, return -ENODEV; } - if_num = interface->altsetting->desc.bInterfaceNumber; + if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* Get the interface/port specification from either driver_info or from * the device itself */ diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c index 8f37efd..5714107 100644 --- a/drivers/net/usb/lg-vl600.c +++ b/drivers/net/usb/lg-vl600.c @@ -201,7 +201,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb) &buf->data[sizeof(*ethhdr) + 0x12], ETH_ALEN); } else { - memset(ethhdr->h_source, 0, ETH_ALEN); + eth_zero_addr(ethhdr->h_source); memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN); /* Inbound IPv6 packets have an IPv4 ethertype (0x800) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 602dc66..f603f36 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -108,7 +108,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); eth_hdr(skb)->h_proto = proto; - memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); + eth_zero_addr(eth_hdr(skb)->h_source); fix_dest: memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); return 1; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 438fc6b..5065538 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -104,7 +104,8 @@ #define USB_TX_AGG 0xd40a #define USB_RX_BUF_TH 0xd40c #define USB_USB_TIMER 0xd428 -#define USB_RX_EARLY_AGG 0xd42c +#define USB_RX_EARLY_TIMEOUT 0xd42c +#define USB_RX_EARLY_SIZE 0xd42e #define USB_PM_CTRL_STATUS 0xd432 #define USB_TX_DMA 0xd434 #define USB_TOLERANCE 0xd490 @@ -349,10 +350,10 @@ /* USB_MISC_0 */ #define PCUT_STATUS 0x0001 -/* USB_RX_EARLY_AGG */ -#define EARLY_AGG_SUPPER 0x0e832981 -#define EARLY_AGG_HIGH 0x0e837a12 -#define EARLY_AGG_SLOW 0x0e83ffff +/* USB_RX_EARLY_TIMEOUT */ +#define COALESCE_SUPER 85000U +#define COALESCE_HIGH 250000U +#define COALESCE_SLOW 524280U /* USB_WDT11_CTRL */ #define TIMER11_EN 0x0001 @@ -606,6 +607,7 @@ struct r8152 { u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; + u32 coalesce; u16 ocp_base; u8 *intr_buff; u8 version; @@ -2142,28 +2144,19 @@ static int rtl8152_enable(struct r8152 *tp) return rtl_enable(tp); } -static void r8153_set_rx_agg(struct r8152 *tp) +static void r8153_set_rx_early_timeout(struct r8152 *tp) { - u8 speed; + u32 ocp_data = tp->coalesce / 8; - speed = rtl8152_get_speed(tp); - if (speed & _1000bps) { - if (tp->udev->speed == USB_SPEED_SUPER) { - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, - RX_THR_SUPPER); - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, - EARLY_AGG_SUPPER); - } else { - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, - RX_THR_HIGH); - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, - EARLY_AGG_HIGH); - } - } else { - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW); - ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, - EARLY_AGG_SLOW); - } + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data); +} + +static void r8153_set_rx_early_size(struct r8152 *tp) +{ + u32 mtu = tp->netdev->mtu; + u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4; + + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); } static int rtl8153_enable(struct r8152 *tp) @@ -2173,7 +2166,8 @@ static int rtl8153_enable(struct r8152 *tp) set_tx_qlen(tp); rtl_set_eee_plus(tp); - r8153_set_rx_agg(tp); + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); return rtl_enable(tp); } @@ -3719,6 +3713,61 @@ out: return ret; } +static int rtl8152_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct r8152 *tp = netdev_priv(netdev); + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + return -EOPNOTSUPP; + default: + break; + } + + coalesce->rx_coalesce_usecs = tp->coalesce; + + return 0; +} + +static int rtl8152_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct r8152 *tp = netdev_priv(netdev); + int ret; + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + return -EOPNOTSUPP; + default: + break; + } + + if (coalesce->rx_coalesce_usecs > COALESCE_SLOW) + return -EINVAL; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + return ret; + + mutex_lock(&tp->control); + + if (tp->coalesce != coalesce->rx_coalesce_usecs) { + tp->coalesce = coalesce->rx_coalesce_usecs; + + if (netif_running(tp->netdev) && netif_carrier_ok(netdev)) + r8153_set_rx_early_timeout(tp); + } + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + + return ret; +} + static struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_settings = rtl8152_get_settings, @@ -3732,6 +3781,8 @@ static struct ethtool_ops ops = { .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, + .get_coalesce = rtl8152_get_coalesce, + .set_coalesce = rtl8152_set_coalesce, .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, }; @@ -3783,6 +3834,7 @@ out: static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) { struct r8152 *tp = netdev_priv(dev); + int ret; switch (tp->version) { case RTL_VER_01: @@ -3795,9 +3847,22 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU) return -EINVAL; + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + return ret; + + mutex_lock(&tp->control); + dev->mtu = new_mtu; - return 0; + if (netif_running(dev) && netif_carrier_ok(dev)) + r8153_set_rx_early_size(tp); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + + return ret; } static const struct net_device_ops rtl8152_netdev_ops = { @@ -3966,6 +4031,18 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; + switch (udev->speed) { + case USB_SPEED_SUPER: + tp->coalesce = COALESCE_SUPER; + break; + case USB_SPEED_HIGH: + tp->coalesce = COALESCE_HIGH; + break; + default: + tp->coalesce = COALESCE_SLOW; + break; + } + intf->needs_remote_wakeup = 1; tp->rtl_ops.init(tp); diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index b94a0fb..7650cdc 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, skb_put(skb, sizeof(padbytes)); } + usbnet_set_skb_tx_stats(skb, 1); return skb; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 449835f..0f3ff28 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb) struct usbnet *dev = entry->dev; if (urb->status == 0) { - if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) - dev->net->stats.tx_packets++; + dev->net->stats.tx_packets += entry->packets; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; @@ -1348,6 +1347,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, urb->transfer_flags |= URB_ZERO_PACKET; } entry->length = urb->transfer_buffer_length = length; + if (!(info->flags & FLAG_MULTI_PACKET)) + usbnet_set_skb_tx_stats(skb, 1); spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 294214c..61c0840 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { struct Vmxnet3_TxDataDesc *tdd; + u8 protocol = 0; if (ctx->mss) { /* TSO */ ctx->eth_ip_hdr_size = skb_transport_offset(skb); @@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (ctx->ipv4) { const struct iphdr *iph = ip_hdr(skb); - if (iph->protocol == IPPROTO_TCP) - ctx->l4_hdr_size = tcp_hdrlen(skb); - else if (iph->protocol == IPPROTO_UDP) - ctx->l4_hdr_size = sizeof(struct udphdr); - else - ctx->l4_hdr_size = 0; - } else { - /* for simplicity, don't copy L4 headers */ + protocol = iph->protocol; + } else if (ctx->ipv6) { + const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + protocol = ipv6h->nexthdr; + } + + switch (protocol) { + case IPPROTO_TCP: + ctx->l4_hdr_size = tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + ctx->l4_hdr_size = sizeof(struct udphdr); + break; + default: ctx->l4_hdr_size = 0; + break; } + ctx->copy_size = min(ctx->eth_ip_hdr_size + ctx->l4_hdr_size, skb->len); } else { @@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb, iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else { + } else if (ctx->ipv6) { struct ipv6hdr *iph = ipv6_hdr(skb); tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, @@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, count = txd_estimate(skb); ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); + ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6)); ctx.mss = skb_shinfo(skb)->gso_size; if (ctx.mss) { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index cd71c77..6bb769a 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01030400 +#define VMXNET3_DRIVER_VERSION_NUM 0x01030500 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats { struct vmxnet3_tx_ctx { bool ipv4; + bool ipv6; u16 mss; u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum * offloading diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index e71a2ce..6274432 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -2676,7 +2676,7 @@ static void wifi_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; dev->tx_queue_len = 100; - memset(dev->broadcast,0xFF, ETH_ALEN); + eth_broadcast_addr(dev->broadcast); dev->flags = IFF_BROADCAST|IFF_MULTICAST; } @@ -3273,7 +3273,7 @@ static void airo_handle_link(struct airo_info *ai) } /* Send event to user space */ - memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); } diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index da92bfa..49219c5 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -1166,7 +1166,7 @@ static int at76_start_monitor(struct at76_priv *priv) int ret; memset(&scan, 0, sizeof(struct at76_req_scan)); - memset(scan.bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(scan.bssid); scan.channel = priv->channel; scan.scan_type = SCAN_TYPE_PASSIVE; @@ -1427,7 +1427,7 @@ static int at76_startup_device(struct at76_priv *priv) at76_wait_completion(priv, CMD_STARTUP); /* remove BSSID from previous run */ - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); priv->scanning = false; @@ -1973,7 +1973,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw, ieee80211_stop_queues(hw); memset(&scan, 0, sizeof(struct at76_req_scan)); - memset(scan.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(scan.bssid); if (req->n_ssids) { scan.scan_type = SCAN_TYPE_ACTIVE; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index c18647b..0eddb20 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -39,7 +39,7 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC -#define CE_DESC_FLAGS_META_DATA_LSB 3 +#define CE_DESC_FLAGS_META_DATA_LSB 2 struct ce_desc { __le32 addr; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 310e12b..c0e454b 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -436,16 +436,16 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) static void ath10k_core_free_firmware_files(struct ath10k *ar) { - if (ar->board && !IS_ERR(ar->board)) + if (!IS_ERR(ar->board)) release_firmware(ar->board); - if (ar->otp && !IS_ERR(ar->otp)) + if (!IS_ERR(ar->otp)) release_firmware(ar->otp); - if (ar->firmware && !IS_ERR(ar->firmware)) + if (!IS_ERR(ar->firmware)) release_firmware(ar->firmware); - if (ar->cal_file && !IS_ERR(ar->cal_file)) + if (!IS_ERR(ar->cal_file)) release_firmware(ar->cal_file); ar->board = NULL; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index d60e46f..f65310c3 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -159,6 +159,25 @@ struct ath10k_fw_stats_peer { u32 peer_rx_rate; /* 10x only */ }; +struct ath10k_fw_stats_vdev { + struct list_head list; + + u32 vdev_id; + u32 beacon_snr; + u32 data_snr; + u32 num_tx_frames[4]; + u32 num_rx_frames; + u32 num_tx_frames_retries[4]; + u32 num_tx_frames_failures[4]; + u32 num_rts_fail; + u32 num_rts_success; + u32 num_rx_err; + u32 num_rx_discard; + u32 num_tx_not_acked; + u32 tx_rate_history[10]; + u32 beacon_rssi_history[10]; +}; + struct ath10k_fw_stats_pdev { struct list_head list; @@ -220,6 +239,7 @@ struct ath10k_fw_stats_pdev { struct ath10k_fw_stats { struct list_head pdevs; + struct list_head vdevs; struct list_head peers; }; @@ -288,6 +308,7 @@ struct ath10k_vif { bool is_started; bool is_up; bool spectral_enabled; + bool ps; u32 aid; u8 bssid[ETH_ALEN]; @@ -413,6 +434,12 @@ enum ath10k_fw_features { */ ATH10K_FW_FEATURE_WMI_10_2 = 4, + /* Some firmware revisions lack proper multi-interface client powersave + * implementation. Enabling PS could result in connection drops, + * traffic stalls, etc. + */ + ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index d2281e5..301081d 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -243,6 +243,16 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) } } +static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head) +{ + struct ath10k_fw_stats_vdev *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + static void ath10k_debug_fw_stats_peers_free(struct list_head *head) { struct ath10k_fw_stats_peer *i, *tmp; @@ -258,6 +268,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar) spin_lock_bh(&ar->data_lock); ar->debug.fw_stats_done = false; ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); spin_unlock_bh(&ar->data_lock); } @@ -273,14 +284,27 @@ static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head) return num; } +static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head) +{ + struct ath10k_fw_stats_vdev *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_fw_stats stats = {}; bool is_start, is_started, is_end; size_t num_peers; + size_t num_vdevs; int ret; INIT_LIST_HEAD(&stats.pdevs); + INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.peers); spin_lock_bh(&ar->data_lock); @@ -308,6 +332,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) } num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers); + num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs); is_start = (list_empty(&ar->debug.fw_stats.pdevs) && !list_empty(&stats.pdevs)); is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && @@ -330,7 +355,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) goto free; } + if (num_vdevs >= BITS_PER_LONG) { + ath10k_warn(ar, "dropping fw vdev stats\n"); + goto free; + } + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); + list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); } complete(&ar->debug.fw_stats_complete); @@ -340,6 +371,7 @@ free: * resources if that is not the case. */ ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); + ath10k_debug_fw_stats_vdevs_free(&stats.vdevs); ath10k_debug_fw_stats_peers_free(&stats.peers); unlock: @@ -363,7 +395,10 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar) reinit_completion(&ar->debug.fw_stats_complete); - ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); + ret = ath10k_wmi_request_stats(ar, + WMI_STAT_PDEV | + WMI_STAT_VDEV | + WMI_STAT_PEER); if (ret) { ath10k_warn(ar, "could not request stats (%d)\n", ret); return ret; @@ -395,8 +430,11 @@ static void ath10k_fw_stats_fill(struct ath10k *ar, unsigned int len = 0; unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_vdev *vdev; const struct ath10k_fw_stats_peer *peer; size_t num_peers; + size_t num_vdevs; + int i; spin_lock_bh(&ar->data_lock); @@ -408,6 +446,7 @@ static void ath10k_fw_stats_fill(struct ath10k *ar, } num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers); + num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", @@ -531,6 +570,65 @@ static void ath10k_fw_stats_fill(struct ath10k *ar, len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k VDEV stats", num_vdevs); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "vdev id", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "beacon snr", vdev->beacon_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "data snr", vdev->data_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx frames", vdev->num_rx_frames); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts fail", vdev->num_rts_fail); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts success", vdev->num_rts_success); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx err", vdev->num_rx_err); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx discard", vdev->num_rx_discard); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num tx not acked", vdev->num_tx_not_acked); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames", i, + vdev->num_tx_frames[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames retries", i, + vdev->num_tx_frames_retries[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames failures", i, + vdev->num_tx_frames_failures[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] 0x%08x\n", + "tx rate history", i, + vdev->tx_rate_history[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "beacon rssi history", i, + vdev->beacon_rssi_history[i]); + + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", "ath10k PEER stats", num_peers); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); @@ -1900,6 +1998,7 @@ int ath10k_debug_create(struct ath10k *ar) return -ENOMEM; INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); + INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.peers); return 0; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index c1da44f..01a2b38 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -176,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) * automatically balances load wrt to CPU power. * * This probably comes at a cost of lower maximum throughput but - * improves the avarage and stability. */ + * improves the average and stability. */ spin_lock_bh(&htt->rx_ring.lock); num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index d6d2f0f..5d2db06 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -611,7 +611,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", vdev_id, ret); return ret; } @@ -658,7 +658,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar) ret = ath10k_vdev_setup_sync(ar); if (ret) - ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", @@ -927,8 +927,9 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart) ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n", - arg.vdev_id, ret); + ath10k_warn(ar, + "failed to synchronize setup for vdev %i restart %d: %d\n", + arg.vdev_id, restart, ret); return ret; } @@ -966,7 +967,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n", arvif->vdev_id, ret); return ret; } @@ -1182,7 +1183,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, if (is_zero_ether_addr(arvif->bssid)) return; - memset(arvif->bssid, 0, ETH_ALEN); + eth_zero_addr(arvif->bssid); return; } @@ -1253,6 +1254,20 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) return 0; } +static int ath10k_mac_ps_vif_count(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int num = 0; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) + if (arvif->ps) + num++; + + return num; +} + static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -1262,13 +1277,24 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) enum wmi_sta_ps_mode psmode; int ret; int ps_timeout; + bool enable_ps; lockdep_assert_held(&arvif->ar->conf_mutex); if (arvif->vif->type != NL80211_IFTYPE_STATION) return 0; - if (vif->bss_conf.ps) { + enable_ps = arvif->ps; + + if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 && + !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, + ar->fw_features)) { + ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", + arvif->vdev_id); + enable_ps = false; + } + + if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; @@ -1781,6 +1807,68 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, ath10k_smps_map[smps]); } +static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta_vht_cap vht_cap) +{ + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + int ret; + u32 param; + u32 value; + + if (!(ar->vht_cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) + return 0; + + param = ar->wmi.vdev_param->txbf; + value = 0; + + if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) + return 0; + + /* The following logic is correct. If a remote STA advertises support + * for being a beamformer then we should enable us being a beamformee. + */ + + if (ar->vht_cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { + if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; + } + + if (ar->vht_cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { + if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; + } + + if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); + if (ret) { + ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", + value, ret); + return ret; + } + + return 0; +} + /* can be called only in mac80211 callbacks due to `key_count` usage */ static void ath10k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1789,6 +1877,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; struct wmi_peer_assoc_complete_arg peer_arg; struct ieee80211_sta *ap_sta; int ret; @@ -1811,6 +1900,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, /* ap_sta must be accessed only within rcu section which must be left * before calling ath10k_setup_peer_smps() which might sleep. */ ht_cap = ap_sta->ht_cap; + vht_cap = ap_sta->vht_cap; ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); if (ret) { @@ -1836,6 +1926,13 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, return; } + ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); + if (ret) { + ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", + arvif->vdev_id, bss_conf->bssid, ret); + return; + } + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); @@ -1853,6 +1950,18 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, } arvif->is_up = true; + + /* Workaround: Some firmware revisions (tested with qca6174 + * WLAN.RM.2.0-00073) have buggy powersave state machine and must be + * poked with peer param command. + */ + ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, + WMI_PEER_DUMMY_VAR, 1); + if (ret) { + ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", + arvif->bssid, arvif->vdev_id, ret); + return; + } } static void ath10k_bss_disassoc(struct ieee80211_hw *hw, @@ -1860,6 +1969,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ieee80211_sta_vht_cap vht_cap = {}; int ret; lockdep_assert_held(&ar->conf_mutex); @@ -1874,6 +1984,13 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, arvif->def_wep_key_idx = -1; + ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); + if (ret) { + ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", + arvif->vdev_id, ret); + return; + } + arvif->is_up = false; } @@ -2554,6 +2671,17 @@ static int ath10k_start_scan(struct ath10k *ar, return -ETIMEDOUT; } + /* If we failed to start the scan, return error code at + * this point. This is probably due to some issue in the + * firmware, but no need to wedge the driver due to that... + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state == ATH10K_SCAN_IDLE) { + spin_unlock_bh(&ar->data_lock); + return -EINVAL; + } + spin_unlock_bh(&ar->data_lock); + /* Add a 200ms margin to account for event/command processing */ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, msecs_to_jiffies(arg->max_scan_time+200)); @@ -3323,9 +3451,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); + ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, + vif->addr); if (ret) - ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n", + ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n", arvif->vdev_id, ret); kfree(arvif->u.ap.noa_data); @@ -3339,6 +3468,21 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", arvif->vdev_id, ret); + /* Some firmware revisions don't notify host about self-peer removal + * until after associated vdev is deleted. + */ + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, + vif->addr); + if (ret) + ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", + arvif->vdev_id, ret); + + spin_lock_bh(&ar->data_lock); + ar->num_peers--; + spin_unlock_bh(&ar->data_lock); + } + ath10k_peer_cleanup(ar, arvif->vdev_id); mutex_unlock(&ar->conf_mutex); @@ -3534,7 +3678,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_PS) { - ret = ath10k_mac_vif_setup_ps(arvif); + arvif->ps = vif->bss_conf.ps; + + ret = ath10k_config_ps(ar); if (ret) ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", arvif->vdev_id, ret); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index e6972b0..7681237 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -104,7 +104,7 @@ static const struct ce_attr host_ce_config_wlan[] = { { .flags = CE_ATTR_FLAGS, .src_nentries = 0, - .src_sz_max = 512, + .src_sz_max = 2048, .dest_nentries = 512, }, @@ -174,7 +174,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { .pipenum = __cpu_to_le32(1), .pipedir = __cpu_to_le32(PIPEDIR_IN), .nentries = __cpu_to_le32(32), - .nbytes_max = __cpu_to_le32(512), + .nbytes_max = __cpu_to_le32(2048), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 04dc4b9..c8b64e7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -110,8 +110,7 @@ struct wmi_ops { bool deliver_cab); struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, const struct wmi_wmm_params_all_arg *arg); - struct sk_buff *(*gen_request_stats)(struct ath10k *ar, - enum wmi_stats_id stats_id); + struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms); @@ -816,14 +815,14 @@ ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, } static inline int -ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) +ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) { struct sk_buff *skb; if (!ar->wmi.ops->gen_request_stats) return -EOPNOTSUPP; - skb = ar->wmi.ops->gen_request_stats(ar, stats_id); + skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); if (IS_ERR(skb)) return PTR_ERR(skb); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 71614ba..ee0c5f6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -869,16 +869,57 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, return 0; } +static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, + struct ath10k_fw_stats_vdev *dst) +{ + int i; + + dst->vdev_id = __le32_to_cpu(src->vdev_id); + dst->beacon_snr = __le32_to_cpu(src->beacon_snr); + dst->data_snr = __le32_to_cpu(src->data_snr); + dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames); + dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail); + dst->num_rts_success = __le32_to_cpu(src->num_rts_success); + dst->num_rx_err = __le32_to_cpu(src->num_rx_err); + dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard); + dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked); + + for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) + dst->num_tx_frames[i] = + __le32_to_cpu(src->num_tx_frames[i]); + + for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) + dst->num_tx_frames_retries[i] = + __le32_to_cpu(src->num_tx_frames_retries[i]); + + for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) + dst->num_tx_frames_failures[i] = + __le32_to_cpu(src->num_tx_frames_failures[i]); + + for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) + dst->tx_rate_history[i] = + __le32_to_cpu(src->tx_rate_history[i]); + + for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) + dst->beacon_rssi_history[i] = + __le32_to_cpu(src->beacon_rssi_history[i]); +} + static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) { const void **tb; - const struct wmi_stats_event *ev; + const struct wmi_tlv_stats_ev *ev; const void *data; - u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + u32 num_pdev_stats; + u32 num_vdev_stats; + u32 num_peer_stats; + u32 num_bcnflt_stats; + u32 num_chan_stats; size_t data_len; int ret; + int i; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { @@ -899,8 +940,73 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); + num_chan_stats = __le32_to_cpu(ev->num_chan_stats); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n", + num_pdev_stats, num_vdev_stats, num_peer_stats, + num_bcnflt_stats, num_chan_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = data; + if (data_len < sizeof(*src)) + return -EPROTO; + + data += sizeof(*src); + data_len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + list_add_tail(&dst->list, &stats->pdevs); + } + + for (i = 0; i < num_vdev_stats; i++) { + const struct wmi_tlv_vdev_stats *src; + struct ath10k_fw_stats_vdev *dst; + + src = data; + if (data_len < sizeof(*src)) + return -EPROTO; + + data += sizeof(*src); + data_len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; - WARN_ON(1); /* FIXME: not implemented yet */ + ath10k_wmi_tlv_pull_vdev_stats(src, dst); + list_add_tail(&dst->list, &stats->vdevs); + } + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10x_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = data; + if (data_len < sizeof(*src)) + return -EPROTO; + + data += sizeof(*src); + data_len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->old, dst); + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + list_add_tail(&dst->list, &stats->peers); + } kfree(tb); return 0; @@ -1604,14 +1710,12 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, const struct wmi_wmm_params_all_arg *arg) { struct wmi_tlv_vdev_set_wmm_cmd *cmd; - struct wmi_wmm_params *wmm; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; void *ptr; - len = (sizeof(*tlv) + sizeof(*cmd)) + - (4 * (sizeof(*tlv) + sizeof(*wmm))); + len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return ERR_PTR(-ENOMEM); @@ -1623,13 +1727,10 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); - ptr += sizeof(*tlv); - ptr += sizeof(*cmd); - - ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); - ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk); - ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi); - ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); return skb; @@ -2080,8 +2181,7 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, } static struct sk_buff * -ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, - enum wmi_stats_id stats_id) +ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) { struct wmi_request_stats_cmd *cmd; struct wmi_tlv *tlv; @@ -2095,7 +2195,7 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; - cmd->stats_id = __cpu_to_le32(stats_id); + cmd->stats_id = __cpu_to_le32(stats_mask); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); return skb; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index de68fe7..a6c8280 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1302,8 +1302,14 @@ struct wmi_tlv_pdev_set_wmm_cmd { __le32 dg_type; /* no idea.. */ } __packed; +struct wmi_tlv_vdev_wmm_params { + __le32 dummy; + struct wmi_wmm_params params; +} __packed; + struct wmi_tlv_vdev_set_wmm_cmd { __le32 vdev_id; + struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4]; } __packed; struct wmi_tlv_phyerr_ev { @@ -1439,6 +1445,15 @@ struct wmi_tlv_sta_keepalive_cmd { __le32 interval; /* in seconds */ } __packed; +struct wmi_tlv_stats_ev { + __le32 stats_id; /* WMI_STAT_ */ + __le32 num_pdev_stats; + __le32 num_vdev_stats; + __le32 num_peer_stats; + __le32 num_bcnflt_stats; + __le32 num_chan_stats; +} __packed; + void ath10k_wmi_tlv_attach(struct ath10k *ar); #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index aeea1c7..c7ea77e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1125,6 +1125,25 @@ static void ath10k_wmi_event_scan_started(struct ath10k *ar) } } +static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_STARTING: + complete(&ar->scan.started); + __ath10k_scan_finish(ar); + break; + } +} + static void ath10k_wmi_event_scan_completed(struct ath10k *ar) { lockdep_assert_held(&ar->data_lock); @@ -1292,6 +1311,7 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) break; case WMI_SCAN_EVENT_START_FAILED: ath10k_warn(ar, "received scan start failure event\n"); + ath10k_wmi_event_scan_start_failed(ar); break; case WMI_SCAN_EVENT_DEQUEUED: case WMI_SCAN_EVENT_PREEMPTED: @@ -4954,7 +4974,7 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, } static struct sk_buff * -ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) +ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) { struct wmi_request_stats_cmd *cmd; struct sk_buff *skb; @@ -4964,9 +4984,10 @@ ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) return ERR_PTR(-ENOMEM); cmd = (struct wmi_request_stats_cmd *)skb->data; - cmd->stats_id = __cpu_to_le32(stats_id); + cmd->stats_id = __cpu_to_le32(stats_mask); - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n", + stats_mask); return skb; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 20ce360..adf935b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3057,8 +3057,12 @@ struct wmi_pdev_stats_peer { } __packed; enum wmi_stats_id { - WMI_REQUEST_PEER_STAT = 0x01, - WMI_REQUEST_AP_STAT = 0x02 + WMI_STAT_PEER = BIT(0), + WMI_STAT_AP = BIT(1), + WMI_STAT_PDEV = BIT(2), + WMI_STAT_VDEV = BIT(3), + WMI_STAT_BCNFLT = BIT(4), + WMI_STAT_VDEV_RATE = BIT(5), }; struct wlan_inst_rssi_args { @@ -3093,7 +3097,7 @@ struct wmi_pdev_suspend_cmd { } __packed; struct wmi_stats_event { - __le32 stats_id; /* %WMI_REQUEST_ */ + __le32 stats_id; /* WMI_STAT_ */ /* * number of pdev stats event structures * (wmi_pdev_stats) 0 or 1 @@ -3745,6 +3749,11 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_VHT80_RATEMASK, }; +#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) +#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1) +#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) +#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) + /* slot time long */ #define WMI_VDEV_SLOT_TIME_LONG 0x1 /* slot time short */ @@ -4436,7 +4445,8 @@ enum wmi_peer_param { WMI_PEER_AUTHORIZE = 0x3, WMI_PEER_CHAN_WIDTH = 0x4, WMI_PEER_NSS = 0x5, - WMI_PEER_USE_4ADDR = 0x6 + WMI_PEER_USE_4ADDR = 0x6, + WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ }; struct wmi_peer_set_param_cmd { diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index bc9cb35..57a80e8 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -528,7 +528,7 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, * together with the BSSID mask when matching addresses. */ iter_data.hw_macaddr = common->macaddr; - memset(&iter_data.mask, 0xff, ETH_ALEN); + eth_broadcast_addr(iter_data.mask); iter_data.found_active = false; iter_data.need_set_hw_addr = true; iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 85da63a..e297803 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -2033,7 +2033,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif) int ret; /* Setup unicast pkt pattern */ - memset(mac_mask, 0xff, ETH_ALEN); + eth_broadcast_addr(mac_mask); ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx, WOW_LIST_ID, ETH_ALEN, 0, ndev->dev_addr, diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index b42ba46..1af3fed 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -105,7 +105,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) memset(&ar->ap_stats.sta[sta->aid - 1], 0, sizeof(struct wmi_per_sta_stat)); - memset(sta->mac, 0, ETH_ALEN); + eth_zero_addr(sta->mac); memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); sta->aid = 0; sta->sta_flags = 0; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c index 7b94a6c..bd169fa 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -284,12 +284,12 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah) AR_MCI_INTERRUPT_RX_MSG_CONT_RST); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI); - if (mci->is_2g) { + if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { ar9003_mci_send_lna_transfer(ah, true); udelay(5); } - if ((mci->is_2g && !mci->update_2g5g)) { + if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, @@ -593,7 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, if (!time_out) break; - offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); + offset = ar9003_mci_get_next_gpm_offset(ah, &more_data); if (offset == MCI_GPM_INVALID) continue; @@ -657,7 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, time_out = 0; while (more_data == MCI_GPM_MORE) { - offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); + offset = ar9003_mci_get_next_gpm_offset(ah, &more_data); if (offset == MCI_GPM_INVALID) break; @@ -771,8 +771,14 @@ exit: static void ar9003_mci_mute_bt(struct ath_hw *ah) { + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + /* disable all MCI messages */ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff); + REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff); REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); /* wait pending HW messages to flush out */ @@ -783,9 +789,10 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah) * 1. reset not after resuming from full sleep * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment */ - ar9003_mci_send_lna_take(ah, true); - - udelay(5); + if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { + ar9003_mci_send_lna_take(ah, true); + udelay(5); + } ar9003_mci_send_sys_sleeping(ah, true); } @@ -821,6 +828,80 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable) AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1); } +static void ar9003_mci_stat_setup(struct ath_hw *ah) +{ + struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; + + if (!AR_SREV_9565(ah)) + return; + + if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) { + REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, + AR_MCI_DBG_CNT_CTRL_ENABLE, 1); + REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, + AR_MCI_DBG_CNT_CTRL_BT_LINKID, + MCI_STAT_ALL_BT_LINKID); + } else { + REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, + AR_MCI_DBG_CNT_CTRL_ENABLE, 0); + } +} + +static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah) +{ + u32 regval; + + regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | + SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | + SM(1, AR_BTCOEX_CTRL_PA_SHARED) | + SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | + SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) | + SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | + SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + + REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, + AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1); + REG_WRITE(ah, AR_BTCOEX_CTRL, regval); +} + +static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah) +{ + u32 regval; + + regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | + SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | + SM(0, AR_BTCOEX_CTRL_PA_SHARED) | + SM(0, AR_BTCOEX_CTRL_LNA_SHARED) | + SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | + SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | + SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + + REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, + AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0); + REG_WRITE(ah, AR_BTCOEX_CTRL, regval); +} + +static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah) +{ + u32 regval; + + regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | + SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | + SM(1, AR_BTCOEX_CTRL_PA_SHARED) | + SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | + SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | + SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | + SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | + SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); + + REG_WRITE(ah, AR_BTCOEX_CTRL, regval); +} + int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, bool is_full_sleep) { @@ -831,11 +912,6 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n", is_full_sleep, is_2g); - if (!mci->gpm_addr && !mci->sched_addr) { - ath_err(common, "MCI GPM and schedule buffers are not allocated\n"); - return -ENOMEM; - } - if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { ath_err(common, "BTCOEX control register is dead\n"); return -EINVAL; @@ -850,26 +926,17 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, * To avoid MCI state machine be affected by incoming remote MCI msgs, * MCI mode will be enabled later, right before reset the MCI TX and RX. */ - - regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | - SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | - SM(1, AR_BTCOEX_CTRL_PA_SHARED) | - SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | - SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | - SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | - SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); if (AR_SREV_9565(ah)) { - regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) | - SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK); - REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, - AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1); + u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH); + + if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) + ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah); + else + ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah); } else { - regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | - SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK); + ar9003_mci_set_btcoex_ctrl_9462(ah); } - REG_WRITE(ah, AR_BTCOEX_CTRL, regval); - if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) ar9003_mci_osla_setup(ah, true); else @@ -926,23 +993,26 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); - ar9003_mci_get_next_gpm_offset(ah, true, NULL); + /* Init GPM offset after MCI Reset Rx */ + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET); REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM))); - REG_CLR_BIT(ah, AR_MCI_TX_CTRL, - AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); + if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) + REG_CLR_BIT(ah, AR_MCI_TX_CTRL, + AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); + else + REG_SET_BIT(ah, AR_MCI_TX_CTRL, + AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); ar9003_mci_observation_set_up(ah); mci->ready = true; ar9003_mci_prep_interface(ah); + ar9003_mci_stat_setup(ah); - if (AR_SREV_9565(ah)) - REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, - AR_MCI_DBG_CNT_CTRL_ENABLE, 0); if (en_int) ar9003_mci_enable_interrupt(ah); @@ -1218,6 +1288,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) } value &= AR_BTCOEX_CTRL_MCI_MODE_EN; break; + case MCI_STATE_INIT_GPM_OFFSET: + value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + + if (value < mci->gpm_len) + mci->gpm_idx = value; + else + mci->gpm_idx = 0; + break; case MCI_STATE_LAST_SCHD_MSG_OFFSET: value = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_LAST_SCHD_MSG_INDEX); @@ -1364,21 +1442,11 @@ void ar9003_mci_check_gpm_offset(struct ath_hw *ah) mci->gpm_idx = 0; } -u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more) +u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 offset, more_gpm = 0, gpm_ptr; - if (first) { - gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - - if (gpm_ptr >= mci->gpm_len) - gpm_ptr = 0; - - mci->gpm_idx = gpm_ptr; - return gpm_ptr; - } - /* * This could be useful to avoid new GPM message interrupt which * may lead to spurious interrupt after power sleep, or multiple diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h index 66d7ab9..e288611 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h @@ -92,14 +92,36 @@ enum mci_gpm_coex_bt_update_flags_op { #define ATH_MCI_CONFIG_CLK_DIV 0x00003000 #define ATH_MCI_CONFIG_CLK_DIV_S 12 #define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000 +#define ATH_MCI_CONFIG_DISABLE_AIC 0x00008000 +#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN 0x007f0000 +#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN_S 16 +#define ATH_MCI_CONFIG_NO_QUIET_ACK 0x00800000 +#define ATH_MCI_CONFIG_NO_QUIET_ACK_S 23 +#define ATH_MCI_CONFIG_ANT_ARCH 0x07000000 +#define ATH_MCI_CONFIG_ANT_ARCH_S 24 +#define ATH_MCI_CONFIG_FORCE_QUIET_ACK 0x08000000 +#define ATH_MCI_CONFIG_FORCE_QUIET_ACK_S 27 +#define ATH_MCI_CONFIG_FORCE_2CHAIN_ACK 0x10000000 +#define ATH_MCI_CONFIG_MCI_STAT_DBG 0x20000000 #define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000 #define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000 #define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \ ATH_MCI_CONFIG_MCI_OBS_TXRX | \ ATH_MCI_CONFIG_MCI_OBS_BT) + #define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F +#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_NON_SHARED 0x00 +#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED 0x01 +#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_NON_SHARED 0x02 +#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED 0x03 +#define ATH_MCI_ANT_ARCH_3_ANT 0x04 + +#define MCI_ANT_ARCH_PA_LNA_SHARED(mci) \ + ((MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) || \ + (MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED)) + enum mci_message_header { /* length of payload */ MCI_LNA_CTRL = 0x10, /* len = 0 */ MCI_CONT_NACK = 0x20, /* len = 0 */ @@ -188,20 +210,55 @@ enum mci_bt_state { MCI_BT_CAL }; +enum mci_ps_state { + MCI_PS_DISABLE, + MCI_PS_ENABLE, + MCI_PS_ENABLE_OFF, + MCI_PS_ENABLE_ON +}; + /* Type of state query */ enum mci_state_type { MCI_STATE_ENABLE, + MCI_STATE_INIT_GPM_OFFSET, + MCI_STATE_CHECK_GPM_OFFSET, + MCI_STATE_NEXT_GPM_OFFSET, + MCI_STATE_LAST_GPM_OFFSET, + MCI_STATE_BT, + MCI_STATE_SET_BT_SLEEP, MCI_STATE_SET_BT_AWAKE, + MCI_STATE_SET_BT_CAL_START, + MCI_STATE_SET_BT_CAL, MCI_STATE_LAST_SCHD_MSG_OFFSET, MCI_STATE_REMOTE_SLEEP, + MCI_STATE_CONT_STATUS, MCI_STATE_RESET_REQ_WAKE, MCI_STATE_SEND_WLAN_COEX_VERSION, + MCI_STATE_SET_BT_COEX_VERSION, + MCI_STATE_SEND_WLAN_CHANNELS, MCI_STATE_SEND_VERSION_QUERY, MCI_STATE_SEND_STATUS_QUERY, + MCI_STATE_NEED_FLUSH_BT_INFO, + MCI_STATE_SET_CONCUR_TX_PRI, MCI_STATE_RECOVER_RX, MCI_STATE_NEED_FTP_STOMP, + MCI_STATE_NEED_TUNING, + MCI_STATE_NEED_STAT_DEBUG, + MCI_STATE_SHARED_CHAIN_CONCUR_TX, + MCI_STATE_AIC_CAL, + MCI_STATE_AIC_START, + MCI_STATE_AIC_CAL_RESET, + MCI_STATE_AIC_CAL_SINGLE, + MCI_STATE_IS_AR9462, + MCI_STATE_IS_AR9565_1ANT, + MCI_STATE_IS_AR9565_2ANT, + MCI_STATE_WLAN_WEAK_SIGNAL, + MCI_STATE_SET_WLAN_PS_STATE, + MCI_STATE_GET_WLAN_PS_STATE, MCI_STATE_DEBUG, - MCI_STATE_NEED_FLUSH_BT_INFO, + MCI_STATE_STAT_DEBUG, + MCI_STATE_ALLOW_FCS, + MCI_STATE_SET_2G_CONTENTION, MCI_STATE_MAX }; @@ -255,7 +312,7 @@ int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, void ar9003_mci_cleanup(struct ath_hw *ah); void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, u32 *rx_msg_intr); -u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more); +u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more); void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor); void ar9003_mci_send_wlan_channels(struct ath_hw *ah); /* diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c index 86bfc96..bea41df9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c @@ -20,11 +20,25 @@ #include "reg_wow.h" #include "hw-ops.h" +static void ath9k_hw_set_sta_powersave(struct ath_hw *ah) +{ + if (!ath9k_hw_mci_is_enabled(ah)) + goto set; + /* + * If MCI is being used, set PWR_SAV only when MCI's + * PS state is disabled. + */ + if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE) + return; +set: + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); +} + static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + ath9k_hw_set_sta_powersave(ah); /* set rx disable bit */ REG_WRITE(ah, AR_CR, AR_CR_RXD); @@ -44,6 +58,9 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE); } + if (ath9k_hw_mci_is_enabled(ah)) + REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); } @@ -74,8 +91,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) for (i = 0; i < KAL_NUM_DESC_WORDS; i++) REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); - REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); - data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) | (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16); data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) | @@ -88,9 +103,11 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) (ap_mac_addr[1] << 8) | (ap_mac_addr[0]); data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]); - if (AR_SREV_9462_20(ah)) { - /* AR9462 2.0 has an extra descriptor word (time based - * discard) compared to other chips */ + if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) { + /* + * AR9462 2.0 and AR9565 have an extra descriptor word + * (time based discard) compared to other chips. + */ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0); wow_ka_data_word0 = AR_WOW_TXBUF(13); } else { @@ -99,7 +116,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) for (i = 0; i < KAL_NUM_DATA_WORDS; i++) REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]); - } int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, @@ -170,18 +186,17 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) u32 val = 0, rval; /* - * read the WoW status register to know - * the wakeup reason + * Read the WoW status register to know + * the wakeup reason. */ rval = REG_READ(ah, AR_WOW_PATTERN); val = AR_WOW_STATUS(rval); /* - * mask only the WoW events that we have enabled. Sometimes + * Mask only the WoW events that we have enabled. Sometimes * we have spurious WoW events from the AR_WOW_PATTERN * register. This mask will clean it up. */ - val &= ah->wow.wow_event_mask; if (val) { @@ -195,6 +210,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) wow_status |= AH_WOW_BEACON_MISS; } + rval = REG_READ(ah, AR_MAC_PCU_WOW4); + val = AR_WOW_STATUS2(rval); + val &= ah->wow.wow_event_mask2; + + if (val) { + if (AR_WOW2_PATTERN_FOUND(val)) + wow_status |= AH_WOW_USER_PATTERN_EN; + } + /* * set and clear WOW_PME_CLEAR registers for the chip to * generate next wow signal. @@ -206,10 +230,12 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) AR_PMCTRL_PWR_STATE_D1D3); /* - * clear all events + * Clear all events. */ REG_WRITE(ah, AR_WOW_PATTERN, AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN))); + REG_WRITE(ah, AR_MAC_PCU_WOW4, + AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4))); /* * restore the beacon threshold to init value @@ -226,7 +252,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, false); + if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) { + u32 dc = REG_READ(ah, AR_DIRECT_CONNECT); + + if (!(dc & AR_DC_TSF2_ENABLE)) + ath9k_hw_gen_timer_start_tsf2(ah); + } + ah->wow.wow_event_mask = 0; + ah->wow.wow_event_mask2 = 0; return wow_status; } @@ -408,6 +442,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) ath9k_hw_wow_set_arwr_reg(ah); + if (ath9k_hw_mci_is_enabled(ah)) + REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); + /* HW WoW */ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5)); diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 0f8e946..7e89236 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -645,6 +645,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, struct ath9k_vif_iter_data *iter_data); void ath9k_calculate_summary_state(struct ath_softc *sc, struct ath_chanctx *ctx); +void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif); /*******************/ /* Beacon Handling */ diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 3dfc2c7..5a084d9 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -103,7 +103,9 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah) return; } - if (AR_SREV_9300_20_OR_LATER(ah)) { + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) { + btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI; + } else if (AR_SREV_9300_20_OR_LATER(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; @@ -307,6 +309,18 @@ static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah) btcoex->enabled = true; } +static void ath9k_hw_btcoex_disable_mci(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + int i; + + ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); + + for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) + REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), + btcoex_hw->wlan_weight[i]); +} + void ath9k_hw_btcoex_enable(struct ath_hw *ah) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; @@ -318,17 +332,18 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah) ath9k_hw_btcoex_enable_2wire(ah); break; case ATH_BTCOEX_CFG_3WIRE: - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { - ath9k_hw_btcoex_enable_mci(ah); - return; - } ath9k_hw_btcoex_enable_3wire(ah); break; + case ATH_BTCOEX_CFG_MCI: + ath9k_hw_btcoex_enable_mci(ah); + break; } - REG_RMW(ah, AR_GPIO_PDPU, - (0x2 << (btcoex_hw->btactive_gpio * 2)), - (0x3 << (btcoex_hw->btactive_gpio * 2))); + if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) { + REG_RMW(ah, AR_GPIO_PDPU, + (0x2 << (btcoex_hw->btactive_gpio * 2)), + (0x3 << (btcoex_hw->btactive_gpio * 2))); + } ah->btcoex_hw.enabled = true; } @@ -340,14 +355,14 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah) int i; btcoex_hw->enabled = false; - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { - ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); - for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) - REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), - btcoex_hw->wlan_weight[i]); + + if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) { + ath9k_hw_btcoex_disable_mci(ah); return; } - ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); + + if (!AR_SREV_9300_20_OR_LATER(ah)) + ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h index 6de26ea..5fe62ff 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -58,6 +58,7 @@ enum ath_btcoex_scheme { ATH_BTCOEX_CFG_NONE, ATH_BTCOEX_CFG_2WIRE, ATH_BTCOEX_CFG_3WIRE, + ATH_BTCOEX_CFG_MCI, }; struct ath9k_hw_mci { diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 50a2e0a..dbf8f49 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1156,7 +1156,10 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf, if (tpc_enabled != ah->tpc_enabled) { ah->tpc_enabled = tpc_enabled; - ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false); + + mutex_lock(&sc->mutex); + ath9k_set_txpower(sc, NULL); + mutex_unlock(&sc->mutex); } return count; diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index da344b2..86d46c1 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -202,17 +202,16 @@ static void ath_btcoex_period_timer(unsigned long data) } spin_unlock_irqrestore(&sc->sc_pm_lock, flags); - ath9k_mci_update_rssi(sc); - ath9k_ps_wakeup(sc); + spin_lock_bh(&btcoex->btcoex_lock); - if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) - ath_detect_bt_priority(sc); - - if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) { + ath9k_mci_update_rssi(sc); ath_mci_ftp_adjust(sc); + } - spin_lock_bh(&btcoex->btcoex_lock); + if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) + ath_detect_bt_priority(sc); stomp_type = btcoex->bt_stomp_type; timer_period = btcoex->btcoex_no_stomp; @@ -252,9 +251,6 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg) struct ath_softc *sc = (struct ath_softc *)arg; struct ath_hw *ah = sc->sc_ah; struct ath_btcoex *btcoex = &sc->btcoex; - struct ath_common *common = ath9k_hw_common(ah); - - ath_dbg(common, BTCOEX, "no stomp timer running\n"); ath9k_ps_wakeup(sc); spin_lock_bh(&btcoex->btcoex_lock); @@ -271,7 +267,7 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg) ath9k_ps_restore(sc); } -static int ath_init_btcoex_timer(struct ath_softc *sc) +static void ath_init_btcoex_timer(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; @@ -280,6 +276,7 @@ static int ath_init_btcoex_timer(struct ath_softc *sc) btcoex->btcoex_period / 100; btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * btcoex->btcoex_period / 100; + btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, (unsigned long) sc); @@ -287,8 +284,6 @@ static int ath_init_btcoex_timer(struct ath_softc *sc) (unsigned long) sc); spin_lock_init(&btcoex->btcoex_lock); - - return 0; } /* @@ -299,6 +294,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc) struct ath_btcoex *btcoex = &sc->btcoex; struct ath_hw *ah = sc->sc_ah; + if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE && + ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) + return; + ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); /* make sure duty cycle timer is also stopped when resuming */ @@ -312,13 +311,19 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc) mod_timer(&btcoex->period_timer, jiffies); } - /* * Pause btcoex timer and bt duty cycle timer */ void ath9k_btcoex_timer_pause(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_hw *ah = sc->sc_ah; + + if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE && + ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) + return; + + ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n"); del_timer_sync(&btcoex->period_timer); del_timer_sync(&btcoex->no_stomp_timer); @@ -356,33 +361,33 @@ void ath9k_start_btcoex(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; - if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) && - !ah->btcoex_hw.enabled) { - if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) - ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, - AR_STOMP_LOW_WLAN_WGHT, 0); - else - ath9k_hw_btcoex_set_weight(ah, 0, 0, - ATH_BTCOEX_STOMP_NONE); - ath9k_hw_btcoex_enable(ah); + if (ah->btcoex_hw.enabled || + ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) - ath9k_btcoex_timer_resume(sc); - } + if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_LOW_WLAN_WGHT, 0); + else + ath9k_hw_btcoex_set_weight(ah, 0, 0, + ATH_BTCOEX_STOMP_NONE); + ath9k_hw_btcoex_enable(ah); + ath9k_btcoex_timer_resume(sc); } void ath9k_stop_btcoex(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; - if (ah->btcoex_hw.enabled && - ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) - ath9k_btcoex_timer_pause(sc); - ath9k_hw_btcoex_disable(ah); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) - ath_mci_flush_profile(&sc->btcoex.mci); - } + if (!ah->btcoex_hw.enabled || + ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE) + return; + + ath9k_btcoex_timer_pause(sc); + ath9k_hw_btcoex_disable(ah); + + if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + ath_mci_flush_profile(&sc->btcoex.mci); } void ath9k_deinit_btcoex(struct ath_softc *sc) @@ -409,22 +414,20 @@ int ath9k_init_btcoex(struct ath_softc *sc) break; case ATH_BTCOEX_CFG_3WIRE: ath9k_hw_btcoex_init_3wire(sc->sc_ah); - r = ath_init_btcoex_timer(sc); - if (r) - return -1; + ath_init_btcoex_timer(sc); txq = sc->tx.txq_map[IEEE80211_AC_BE]; ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); - sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - if (ath9k_hw_mci_is_enabled(ah)) { - sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; - INIT_LIST_HEAD(&sc->btcoex.mci.info); + break; + case ATH_BTCOEX_CFG_MCI: + ath_init_btcoex_timer(sc); - r = ath_mci_setup(sc); - if (r) - return r; + sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; + INIT_LIST_HEAD(&sc->btcoex.mci.info); + ath9k_hw_btcoex_init_mci(ah); - ath9k_hw_btcoex_init_mci(ah); - } + r = ath_mci_setup(sc); + if (r) + return r; break; default: diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 8e7153b..10c02f5 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -40,6 +40,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ + { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */ { USB_DEVICE(0x0cf3, 0x7015), .driver_info = AR9287_USB }, /* Atheros */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 92d5a6c..564923c 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -149,7 +149,7 @@ static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, * when matching addresses. */ iter_data.hw_macaddr = NULL; - memset(&iter_data.mask, 0xff, ETH_ALEN); + eth_broadcast_addr(iter_data.mask); if (vif) ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index e82e570..29a25d9 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -27,6 +27,7 @@ #include "eeprom.h" #include "calib.h" #include "reg.h" +#include "reg_mci.h" #include "phy.h" #include "btcoex.h" #include "dynack.h" diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9ede991..b0badef 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -994,7 +994,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, * BSSID mask when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); - memset(&iter_data->mask, 0xff, ETH_ALEN); + eth_broadcast_addr(iter_data->mask); iter_data->slottime = ATH9K_SLOT_TIME_9; list_for_each_entry(avp, &ctx->vifs, list) @@ -1139,7 +1139,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ctx->primary_sta = iter_data.primary_sta; } else { ctx->primary_sta = NULL; - memset(common->curbssid, 0, ETH_ALEN); + eth_zero_addr(common->curbssid); common->curaid = 0; ath9k_hw_write_associd(sc->sc_ah); if (ath9k_hw_mci_is_enabled(sc->sc_ah)) @@ -1172,6 +1172,38 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ath9k_ps_restore(sc); } +static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + int *power = (int *)data; + + if (*power < vif->bss_conf.txpower) + *power = vif->bss_conf.txpower; +} + +/* Called with sc->mutex held. */ +void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif) +{ + int power; + struct ath_hw *ah = sc->sc_ah; + struct ath_regulatory *reg = ath9k_hw_regulatory(ah); + + ath9k_ps_wakeup(sc); + if (ah->tpc_enabled) { + power = (vif) ? vif->bss_conf.txpower : -1; + ieee80211_iterate_active_interfaces_atomic( + sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + ath9k_tpc_vif_iter, &power); + if (power == -1) + power = sc->hw->conf.power_level; + } else { + power = sc->hw->conf.power_level; + } + sc->cur_chan->txpower = 2 * power; + ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false); + sc->cur_chan->cur_txpower = reg->max_power_level; + ath9k_ps_restore(sc); +} + static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1225,6 +1257,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, ath9k_assign_hw_queues(hw, vif); + ath9k_set_txpower(sc, vif); + an->sc = sc; an->sta = NULL; an->vif = vif; @@ -1265,6 +1299,8 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, ath9k_assign_hw_queues(hw, vif); ath9k_calculate_summary_state(sc, avp->chanctx); + ath9k_set_txpower(sc, vif); + mutex_unlock(&sc->mutex); return 0; } @@ -1294,6 +1330,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, ath9k_calculate_summary_state(sc, avp->chanctx); + ath9k_set_txpower(sc, NULL); + mutex_unlock(&sc->mutex); } @@ -1397,14 +1435,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); } - if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level); - sc->cur_chan->txpower = 2 * conf->power_level; - ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, - sc->cur_chan->txpower, - &sc->cur_chan->cur_txpower); - } - mutex_unlock(&sc->mutex); ath9k_ps_restore(sc); @@ -1764,6 +1794,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, if (changed & CHECK_ANI) ath_check_ani(sc); + if (changed & BSS_CHANGED_TXPOWER) { + ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n", + vif->addr, bss_conf->txpower, bss_conf->txpower_type); + ath9k_set_txpower(sc, vif); + } + mutex_unlock(&sc->mutex); ath9k_ps_restore(sc); diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c index 3f7a11e..66596b9 100644 --- a/drivers/net/wireless/ath/ath9k/mci.c +++ b/drivers/net/wireless/ath/ath9k/mci.c @@ -495,7 +495,7 @@ void ath_mci_intr(struct ath_softc *sc) ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) { - ar9003_mci_get_next_gpm_offset(ah, true, NULL); + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET); return; } @@ -559,8 +559,7 @@ void ath_mci_intr(struct ath_softc *sc) return; pgpm = mci->gpm_buf.bf_addr; - offset = ar9003_mci_get_next_gpm_offset(ah, false, - &more_data); + offset = ar9003_mci_get_next_gpm_offset(ah, &more_data); if (offset == MCI_GPM_INVALID) break; diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 9587ec6..1234399 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -2044,279 +2044,4 @@ enum { #define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6 -/* MCI Registers */ - -#define AR_MCI_COMMAND0 0x1800 -#define AR_MCI_COMMAND0_HEADER 0xFF -#define AR_MCI_COMMAND0_HEADER_S 0 -#define AR_MCI_COMMAND0_LEN 0x1f00 -#define AR_MCI_COMMAND0_LEN_S 8 -#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000 -#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13 - -#define AR_MCI_COMMAND1 0x1804 - -#define AR_MCI_COMMAND2 0x1808 -#define AR_MCI_COMMAND2_RESET_TX 0x01 -#define AR_MCI_COMMAND2_RESET_TX_S 0 -#define AR_MCI_COMMAND2_RESET_RX 0x02 -#define AR_MCI_COMMAND2_RESET_RX_S 1 -#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC -#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2 -#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400 -#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10 - -#define AR_MCI_RX_CTRL 0x180c - -#define AR_MCI_TX_CTRL 0x1810 -/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */ -#define AR_MCI_TX_CTRL_CLK_DIV 0x03 -#define AR_MCI_TX_CTRL_CLK_DIV_S 0 -#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04 -#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000 -#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24 - -#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814 -#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF -#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0 -#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000 -#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16 - -#define AR_MCI_SCHD_TABLE_0 0x1818 -#define AR_MCI_SCHD_TABLE_1 0x181c -#define AR_MCI_GPM_0 0x1820 -#define AR_MCI_GPM_1 0x1824 -#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000 -#define AR_MCI_GPM_WRITE_PTR_S 16 -#define AR_MCI_GPM_BUF_LEN 0x0000FFFF -#define AR_MCI_GPM_BUF_LEN_S 0 - -#define AR_MCI_INTERRUPT_RAW 0x1828 -#define AR_MCI_INTERRUPT_EN 0x182c -#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001 -#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0 -#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002 -#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1 -#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004 -#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2 -#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008 -#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3 -#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010 -#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4 -#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020 -#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5 -#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080 -#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7 -#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100 -#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8 -#define AR_MCI_INTERRUPT_RX_MSG 0x00000200 -#define AR_MCI_INTERRUPT_RX_MSG_S 9 -#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400 -#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10 -#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800 -#define AR_MCI_INTERRUPT_BT_PRI_S 11 -#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000 -#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27 -#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000 -#define AR_MCI_INTERRUPT_BT_FREQ_S 28 -#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000 -#define AR_MCI_INTERRUPT_BT_STOMP_S 29 -#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000 -#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30 -#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000 -#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31 - -#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \ - AR_MCI_INTERRUPT_RX_INVALID_HDR | \ - AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \ - AR_MCI_INTERRUPT_RX_MSG | \ - AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \ - AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT) - -#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ - AR_MCI_INTERRUPT_TX_SW_MSG_FAIL) - -#define AR_MCI_REMOTE_CPU_INT 0x1830 -#define AR_MCI_REMOTE_CPU_INT_EN 0x1834 -#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838 -#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c -#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 -#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 -#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 -#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 -#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 -#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 -#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 -#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 -#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 -#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 -#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 -#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 -#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 -#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ - AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \ - AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ - AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ - AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ - AR_MCI_INTERRUPT_RX_MSG_CONT_RST) - -#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \ - AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \ - AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \ - AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \ - AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) - -#define AR_MCI_CPU_INT 0x1840 - -#define AR_MCI_RX_STATUS 0x1844 -#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00 -#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8 -#define AR_MCI_RX_REMOTE_SLEEP 0x00001000 -#define AR_MCI_RX_REMOTE_SLEEP_S 12 -#define AR_MCI_RX_MCI_CLK_REQ 0x00002000 -#define AR_MCI_RX_MCI_CLK_REQ_S 13 - -#define AR_MCI_CONT_STATUS 0x1848 -#define AR_MCI_CONT_RSSI_POWER 0x000000FF -#define AR_MCI_CONT_RSSI_POWER_S 0 -#define AR_MCI_CONT_PRIORITY 0x0000FF00 -#define AR_MCI_CONT_PRIORITY_S 8 -#define AR_MCI_CONT_TXRX 0x00010000 -#define AR_MCI_CONT_TXRX_S 16 - -#define AR_MCI_BT_PRI0 0x184c -#define AR_MCI_BT_PRI1 0x1850 -#define AR_MCI_BT_PRI2 0x1854 -#define AR_MCI_BT_PRI3 0x1858 -#define AR_MCI_BT_PRI 0x185c -#define AR_MCI_WL_FREQ0 0x1860 -#define AR_MCI_WL_FREQ1 0x1864 -#define AR_MCI_WL_FREQ2 0x1868 -#define AR_MCI_GAIN 0x186c -#define AR_MCI_WBTIMER1 0x1870 -#define AR_MCI_WBTIMER2 0x1874 -#define AR_MCI_WBTIMER3 0x1878 -#define AR_MCI_WBTIMER4 0x187c -#define AR_MCI_MAXGAIN 0x1880 -#define AR_MCI_HW_SCHD_TBL_CTL 0x1884 -#define AR_MCI_HW_SCHD_TBL_D0 0x1888 -#define AR_MCI_HW_SCHD_TBL_D1 0x188c -#define AR_MCI_HW_SCHD_TBL_D2 0x1890 -#define AR_MCI_HW_SCHD_TBL_D3 0x1894 -#define AR_MCI_TX_PAYLOAD0 0x1898 -#define AR_MCI_TX_PAYLOAD1 0x189c -#define AR_MCI_TX_PAYLOAD2 0x18a0 -#define AR_MCI_TX_PAYLOAD3 0x18a4 -#define AR_BTCOEX_WBTIMER 0x18a8 - -#define AR_BTCOEX_CTRL 0x18ac -#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001 -#define AR_BTCOEX_CTRL_AR9462_MODE_S 0 -#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002 -#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1 -#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004 -#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2 -#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008 -#define AR_BTCOEX_CTRL_LNA_SHARED_S 3 -#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010 -#define AR_BTCOEX_CTRL_PA_SHARED_S 4 -#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020 -#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5 -#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040 -#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6 -#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180 -#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7 -#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00 -#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9 -#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000 -#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12 -#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000 -#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19 -#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000 -#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20 -#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000 -#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28 -#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000 -#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29 -#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000 -#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30 -#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000 -#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31 - -#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) -#define AR_BTCOEX_WL_LNA 0x1940 -#define AR_BTCOEX_RFGAIN_CTRL 0x1944 -#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF -#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0 - -#define AR_BTCOEX_CTRL2 0x1948 -#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800 -#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11 -#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000 -#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19 -#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000 -#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22 -#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000 -#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23 -#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000 -#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24 -#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000 -#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25 - -#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001 -#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0 -#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002 -#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1 -#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004 -#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2 -#define AR_GLB_WLAN_UART_INTF_EN 0x00020000 -#define AR_GLB_WLAN_UART_INTF_EN_S 17 -#define AR_GLB_DS_JTAG_DISABLE 0x00040000 -#define AR_GLB_DS_JTAG_DISABLE_S 18 - -#define AR_BTCOEX_RC 0x194c -#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2)) -#define AR_BTCOEX_DBG 0x1a50 -#define AR_MCI_LAST_HW_MSG_HDR 0x1a54 -#define AR_MCI_LAST_HW_MSG_BDY 0x1a58 - -#define AR_MCI_SCHD_TABLE_2 0x1a5c -#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001 -#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0 -#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002 -#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1 - -#define AR_BTCOEX_CTRL3 0x1a60 -#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff -#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0 - -#define AR_GLB_SWREG_DISCONT_MODE 0x2002c -#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3 - -#define AR_MCI_MISC 0x1a74 -#define AR_MCI_MISC_HW_FIX_EN 0x00000001 -#define AR_MCI_MISC_HW_FIX_EN_S 0 -#define AR_MCI_DBG_CNT_CTRL 0x1a78 -#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001 -#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0 - #endif diff --git a/drivers/net/wireless/ath/ath9k/reg_mci.h b/drivers/net/wireless/ath/ath9k/reg_mci.h new file mode 100644 index 0000000..6251310 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/reg_mci.h @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REG_MCI_H +#define REG_MCI_H + +#define AR_MCI_COMMAND0 0x1800 +#define AR_MCI_COMMAND0_HEADER 0xFF +#define AR_MCI_COMMAND0_HEADER_S 0 +#define AR_MCI_COMMAND0_LEN 0x1f00 +#define AR_MCI_COMMAND0_LEN_S 8 +#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000 +#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13 + +#define AR_MCI_COMMAND1 0x1804 + +#define AR_MCI_COMMAND2 0x1808 +#define AR_MCI_COMMAND2_RESET_TX 0x01 +#define AR_MCI_COMMAND2_RESET_TX_S 0 +#define AR_MCI_COMMAND2_RESET_RX 0x02 +#define AR_MCI_COMMAND2_RESET_RX_S 1 +#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC +#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2 +#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400 +#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10 + +#define AR_MCI_RX_CTRL 0x180c + +#define AR_MCI_TX_CTRL 0x1810 +/* + * 0 = no division, + * 1 = divide by 2, + * 2 = divide by 4, + * 3 = divide by 8 + */ +#define AR_MCI_TX_CTRL_CLK_DIV 0x03 +#define AR_MCI_TX_CTRL_CLK_DIV_S 0 +#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04 +#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000 +#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24 + +#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814 +#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF +#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0 +#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000 +#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16 + +#define AR_MCI_SCHD_TABLE_0 0x1818 +#define AR_MCI_SCHD_TABLE_1 0x181c +#define AR_MCI_GPM_0 0x1820 +#define AR_MCI_GPM_1 0x1824 +#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000 +#define AR_MCI_GPM_WRITE_PTR_S 16 +#define AR_MCI_GPM_BUF_LEN 0x0000FFFF +#define AR_MCI_GPM_BUF_LEN_S 0 + +#define AR_MCI_INTERRUPT_RAW 0x1828 + +#define AR_MCI_INTERRUPT_EN 0x182c +#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001 +#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0 +#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002 +#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1 +#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004 +#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2 +#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008 +#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3 +#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010 +#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4 +#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020 +#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5 +#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080 +#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7 +#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100 +#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8 +#define AR_MCI_INTERRUPT_RX_MSG 0x00000200 +#define AR_MCI_INTERRUPT_RX_MSG_S 9 +#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400 +#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10 +#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800 +#define AR_MCI_INTERRUPT_BT_PRI_S 11 +#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000 +#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27 +#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000 +#define AR_MCI_INTERRUPT_BT_FREQ_S 28 +#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000 +#define AR_MCI_INTERRUPT_BT_STOMP_S 29 +#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000 +#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30 +#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000 +#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31 + +#define AR_MCI_REMOTE_CPU_INT 0x1830 +#define AR_MCI_REMOTE_CPU_INT_EN 0x1834 +#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838 +#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c +#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 +#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 +#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 +#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 +#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 +#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 +#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 +#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 +#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 +#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 +#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 +#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 +#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 + +#define AR_MCI_CPU_INT 0x1840 + +#define AR_MCI_RX_STATUS 0x1844 +#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00 +#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8 +#define AR_MCI_RX_REMOTE_SLEEP 0x00001000 +#define AR_MCI_RX_REMOTE_SLEEP_S 12 +#define AR_MCI_RX_MCI_CLK_REQ 0x00002000 +#define AR_MCI_RX_MCI_CLK_REQ_S 13 + +#define AR_MCI_CONT_STATUS 0x1848 +#define AR_MCI_CONT_RSSI_POWER 0x000000FF +#define AR_MCI_CONT_RSSI_POWER_S 0 +#define AR_MCI_CONT_PRIORITY 0x0000FF00 +#define AR_MCI_CONT_PRIORITY_S 8 +#define AR_MCI_CONT_TXRX 0x00010000 +#define AR_MCI_CONT_TXRX_S 16 + +#define AR_MCI_BT_PRI0 0x184c +#define AR_MCI_BT_PRI1 0x1850 +#define AR_MCI_BT_PRI2 0x1854 +#define AR_MCI_BT_PRI3 0x1858 +#define AR_MCI_BT_PRI 0x185c +#define AR_MCI_WL_FREQ0 0x1860 +#define AR_MCI_WL_FREQ1 0x1864 +#define AR_MCI_WL_FREQ2 0x1868 +#define AR_MCI_GAIN 0x186c +#define AR_MCI_WBTIMER1 0x1870 +#define AR_MCI_WBTIMER2 0x1874 +#define AR_MCI_WBTIMER3 0x1878 +#define AR_MCI_WBTIMER4 0x187c +#define AR_MCI_MAXGAIN 0x1880 +#define AR_MCI_HW_SCHD_TBL_CTL 0x1884 +#define AR_MCI_HW_SCHD_TBL_D0 0x1888 +#define AR_MCI_HW_SCHD_TBL_D1 0x188c +#define AR_MCI_HW_SCHD_TBL_D2 0x1890 +#define AR_MCI_HW_SCHD_TBL_D3 0x1894 +#define AR_MCI_TX_PAYLOAD0 0x1898 +#define AR_MCI_TX_PAYLOAD1 0x189c +#define AR_MCI_TX_PAYLOAD2 0x18a0 +#define AR_MCI_TX_PAYLOAD3 0x18a4 +#define AR_BTCOEX_WBTIMER 0x18a8 + +#define AR_BTCOEX_CTRL 0x18ac +#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001 +#define AR_BTCOEX_CTRL_AR9462_MODE_S 0 +#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002 +#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1 +#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004 +#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2 +#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008 +#define AR_BTCOEX_CTRL_LNA_SHARED_S 3 +#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010 +#define AR_BTCOEX_CTRL_PA_SHARED_S 4 +#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020 +#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5 +#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040 +#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6 +#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180 +#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7 +#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00 +#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9 +#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000 +#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12 +#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000 +#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19 +#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000 +#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20 +#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000 +#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28 +#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000 +#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29 +#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000 +#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30 +#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000 +#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31 + +#define AR_BTCOEX_WL_WEIGHTS0 0x18b0 +#define AR_BTCOEX_WL_WEIGHTS1 0x18b4 +#define AR_BTCOEX_WL_WEIGHTS2 0x18b8 +#define AR_BTCOEX_WL_WEIGHTS3 0x18bc + +#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) +#define AR_BTCOEX_WL_LNA 0x1940 +#define AR_BTCOEX_RFGAIN_CTRL 0x1944 +#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF +#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0 + +#define AR_BTCOEX_CTRL2 0x1948 +#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800 +#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11 +#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000 +#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19 +#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000 +#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22 +#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000 +#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23 +#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000 +#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24 +#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000 +#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25 + +#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001 +#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0 +#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002 +#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1 +#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004 +#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2 +#define AR_GLB_WLAN_UART_INTF_EN 0x00020000 +#define AR_GLB_WLAN_UART_INTF_EN_S 17 +#define AR_GLB_DS_JTAG_DISABLE 0x00040000 +#define AR_GLB_DS_JTAG_DISABLE_S 18 + +#define AR_BTCOEX_RC 0x194c +#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2)) +#define AR_BTCOEX_DBG 0x1a50 +#define AR_MCI_LAST_HW_MSG_HDR 0x1a54 +#define AR_MCI_LAST_HW_MSG_BDY 0x1a58 + +#define AR_MCI_SCHD_TABLE_2 0x1a5c +#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001 +#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0 +#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002 +#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1 + +#define AR_BTCOEX_CTRL3 0x1a60 +#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff +#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0 + +#define AR_GLB_SWREG_DISCONT_MODE 0x2002c +#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3 + +#define AR_MCI_MISC 0x1a74 +#define AR_MCI_MISC_HW_FIX_EN 0x00000001 +#define AR_MCI_MISC_HW_FIX_EN_S 0 + +#define AR_MCI_DBG_CNT_CTRL 0x1a78 +#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001 +#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0 +#define AR_MCI_DBG_CNT_CTRL_BT_LINKID 0x000007f8 +#define AR_MCI_DBG_CNT_CTRL_BT_LINKID_S 3 + +#define MCI_STAT_ALL_BT_LINKID 0xffff + +#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \ + AR_MCI_INTERRUPT_RX_INVALID_HDR | \ + AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \ + AR_MCI_INTERRUPT_RX_MSG | \ + AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \ + AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT) + +#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \ + AR_MCI_INTERRUPT_TX_SW_MSG_FAIL) + +#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ + AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \ + AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ + AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ + AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ + AR_MCI_INTERRUPT_RX_MSG_CONT_RST) + +#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \ + AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | \ + AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \ + AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | \ + AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) + +#endif /* REG_MCI_H */ diff --git a/drivers/net/wireless/ath/ath9k/reg_wow.h b/drivers/net/wireless/ath/ath9k/reg_wow.h index 3abfca5..4530540 100644 --- a/drivers/net/wireless/ath/ath9k/reg_wow.h +++ b/drivers/net/wireless/ath/ath9k/reg_wow.h @@ -72,7 +72,7 @@ #define AR_WOW_MAC_INTR_EN 0x00040000 #define AR_WOW_MAGIC_EN 0x00010000 #define AR_WOW_PATTERN_EN(x) (x & 0xff) -#define AR_WOW_PAT_FOUND_SHIFT 8 +#define AR_WOW_PAT_FOUND_SHIFT 8 #define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT)) #define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT) #define AR_WOW_MAGIC_PAT_FOUND 0x00020000 @@ -90,6 +90,14 @@ AR_WOW_BEACON_FAIL | \ AR_WOW_KEEP_ALIVE_FAIL)) +#define AR_WOW2_PATTERN_EN(x) ((x & 0xff) << 0) +#define AR_WOW2_PATTERN_FOUND_SHIFT 8 +#define AR_WOW2_PATTERN_FOUND(x) (x & (0xff << AR_WOW2_PATTERN_FOUND_SHIFT)) +#define AR_WOW2_PATTERN_FOUND_MASK ((0xff) << AR_WOW2_PATTERN_FOUND_SHIFT) + +#define AR_WOW_STATUS2(x) (x & AR_WOW2_PATTERN_FOUND_MASK) +#define AR_WOW_CLEAR_EVENTS2(x) (x & ~(AR_WOW2_PATTERN_EN(0xff))) + #define AR_WOW_AIFS_CNT(x) (x & 0xff) #define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8) #define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1b8e75c..0acd079 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1103,14 +1103,28 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, struct sk_buff *skb; struct ath_frame_info *fi; struct ieee80211_tx_info *info; + struct ieee80211_vif *vif; struct ath_hw *ah = sc->sc_ah; if (sc->tx99_state || !ah->tpc_enabled) return MAX_RATE_POWER; skb = bf->bf_mpdu; - fi = get_frame_info(skb); info = IEEE80211_SKB_CB(skb); + vif = info->control.vif; + + if (!vif) { + max_power = sc->cur_chan->cur_txpower; + goto out; + } + + if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) { + max_power = min_t(u8, sc->cur_chan->cur_txpower, + 2 * vif->bss_conf.txpower); + goto out; + } + + fi = get_frame_info(skb); if (!AR_SREV_9300_20_OR_LATER(ah)) { int txpower = fi->tx_power; @@ -1147,25 +1161,25 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, txpower -= 2; txpower = max(txpower, 0); - max_power = min_t(u8, ah->tx_power[rateidx], txpower); - - /* XXX: clamp minimum TX power at 1 for AR9160 since if - * max_power is set to 0, frames are transmitted at max - * TX power - */ - if (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) - max_power = 1; + max_power = min_t(u8, ah->tx_power[rateidx], + 2 * vif->bss_conf.txpower); + max_power = min_t(u8, max_power, txpower); } else if (!bf->bf_state.bfs_paprd) { if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) - max_power = min(ah->tx_power_stbc[rateidx], - fi->tx_power); + max_power = min_t(u8, ah->tx_power_stbc[rateidx], + 2 * vif->bss_conf.txpower); else - max_power = min(ah->tx_power[rateidx], fi->tx_power); + max_power = min_t(u8, ah->tx_power[rateidx], + 2 * vif->bss_conf.txpower); + max_power = min(max_power, fi->tx_power); } else { max_power = ah->paprd_training_power; } - - return max_power; +out: + /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power + * is set to 0, frames are transmitted at max TX power + */ + return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power; } static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 2d5ea21..4bd708c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -387,11 +387,25 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, int ch; int rc = 0; + wil_print_connect_params(wil, sme); + if (test_bit(wil_status_fwconnecting, wil->status) || test_bit(wil_status_fwconnected, wil->status)) return -EALREADY; - wil_print_connect_params(wil, sme); + if (sme->ie_len > WMI_MAX_IE_LEN) { + wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len); + return -ERANGE; + } + + rsn_eid = sme->ie ? + cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : + NULL; + + if (sme->privacy && !rsn_eid) { + wil_err(wil, "Missing RSN IE for secure connection\n"); + return -EINVAL; + } bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, @@ -407,17 +421,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rc = -ENOENT; goto out; } + wil->privacy = sme->privacy; - rsn_eid = sme->ie ? - cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : - NULL; - if (rsn_eid) { - if (sme->ie_len > WMI_MAX_IE_LEN) { - rc = -ERANGE; - wil_err(wil, "IE too large (%td bytes)\n", - sme->ie_len); - goto out; - } + if (wil->privacy) { /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */ rc = wmi_del_cipher_key(wil, 0, bss->bssid); if (rc) { @@ -450,7 +456,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, bss->capability); goto out; } - if (rsn_eid) { + if (wil->privacy) { conn.dot11_auth_mode = WMI_AUTH11_SHARED; conn.auth_mode = WMI_AUTH_WPA2_PSK; conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; @@ -769,7 +775,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); - wil->secure_pcp = info->privacy; + wil->privacy = info->privacy; netif_carrier_on(ndev); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 45c3558e..3830cc2 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -29,6 +29,7 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ +u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */ enum dbg_off_type { doff_u32 = 0, @@ -102,23 +103,30 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data) % vring->size; int avail = vring->size - used - 1; char name[10]; + char sidle[10]; /* performance monitoring */ cycles_t now = get_cycles(); uint64_t idle = txdata->idle * 100; uint64_t total = now - txdata->begin; - do_div(idle, total); + if (total != 0) { + do_div(idle, total); + snprintf(sidle, sizeof(sidle), "%3d%%", + (int)idle); + } else { + snprintf(sidle, sizeof(sidle), "N/A"); + } txdata->begin = now; txdata->idle = 0ULL; snprintf(name, sizeof(name), "tx_%2d", i); seq_printf(s, - "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n", - wil->sta[cid].addr, cid, tid, - txdata->agg_wsize, txdata->agg_timeout, - txdata->agg_amsdu ? "+" : "-", - used, avail, (int)idle); + "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n", + wil->sta[cid].addr, cid, tid, + txdata->agg_wsize, txdata->agg_timeout, + txdata->agg_amsdu ? "+" : "-", + used, avail, sidle); wil_print_vring(s, wil, name, vring, '_', 'H'); } @@ -549,7 +557,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf, dev_close(ndev); ndev->flags &= ~IFF_UP; rtnl_unlock(); - wil_reset(wil); + wil_reset(wil, true); return len; } @@ -618,7 +626,7 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf, struct wil6210_priv *wil = file->private_data; int rc; char *kbuf = kmalloc(len + 1, GFP_KERNEL); - char cmd[8]; + char cmd[9]; int p1, p2, p3; if (!kbuf) @@ -1392,7 +1400,7 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, /* fields in struct wil6210_priv */ static const struct dbg_off dbg_wil_off[] = { - WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32), + WIL_FIELD(privacy, S_IRUGO, doff_u32), WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong), WIL_FIELD(fw_version, S_IRUGO, doff_u32), WIL_FIELD(hw_version, S_IRUGO, doff_x32), @@ -1412,6 +1420,8 @@ static const struct dbg_off dbg_statics[] = { {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, + {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh, + doff_u32}, {}, }; diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 4c44a82..0ea695f 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -50,27 +50,19 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, wil_dbg_misc(wil, "%s()\n", __func__); - if (test_bit(hw_capability_advanced_itr_moderation, - wil->hw_capabilities)) { - tx_itr_en = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL)); - if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) - tx_itr_val = - ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH)); - - rx_itr_en = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL)); - if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN) - rx_itr_val = - ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH)); - } else { - rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL)); - if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN) - rx_itr_val = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_TRSH)); - } + tx_itr_en = ioread32(wil->csr + + HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL)); + if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) + tx_itr_val = + ioread32(wil->csr + + HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH)); + + rx_itr_en = ioread32(wil->csr + + HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL)); + if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN) + rx_itr_val = + ioread32(wil->csr + + HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH)); cp->tx_coalesce_usecs = tx_itr_val; cp->rx_coalesce_usecs = rx_itr_val; diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 93c5cc1..4428345 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,6 +20,7 @@ #include "fw.h" MODULE_FIRMWARE(WIL_FW_NAME); +MODULE_FIRMWARE(WIL_FW2_NAME); /* target operations */ /* register read */ diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index d4acf93..157f5ef 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -451,8 +451,6 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size) } return -EINVAL; } - /* Mark FW as loaded from host */ - S(RGF_USER_USAGE_6, 1); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index a6f9230..28ffc18 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -166,9 +166,16 @@ void wil_unmask_irq(struct wil6210_priv *wil) /* target write operation */ #define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) -static -void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil) +void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { + wil_dbg_irq(wil, "%s()\n", __func__); + + /* disable interrupt moderation for monitor + * to get better timestamp precision + */ + if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) + return; + /* Disable and clear tx counter before (re)configuration */ W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); @@ -206,42 +213,8 @@ void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil) BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); } -static -void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil) -{ - /* disable, use usec resolution */ - W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR); - - wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration); - W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration); - /* start it */ - W(RGF_DMA_ITR_CNT_CRL, - BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK); -} - #undef W -void wil_configure_interrupt_moderation(struct wil6210_priv *wil) -{ - wil_dbg_irq(wil, "%s()\n", __func__); - - /* disable interrupt moderation for monitor - * to get better timestamp precision - */ - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) - return; - - if (test_bit(hw_capability_advanced_itr_moderation, - wil->hw_capabilities)) - wil_configure_interrupt_moderation_new(wil); - else { - /* Advanced interrupt moderation is not available before - * Sparrow v2. Will use legacy interrupt moderation - */ - wil_configure_interrupt_moderation_lgc(wil); - } -} - static irqreturn_t wil6210_irq_rx(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -253,7 +226,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) trace_wil6210_irq_rx(isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); - if (!isr) { + if (unlikely(!isr)) { wil_err(wil, "spurious IRQ: RX\n"); return IRQ_NONE; } @@ -266,17 +239,18 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) * action is always the same - should empty the accumulated * packets from the RX ring. */ - if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) { + if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE | + BIT_DMA_EP_RX_ICR_RX_HTRSH))) { wil_dbg_irq(wil, "RX done\n"); - if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH) + if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)) wil_err_ratelimited(wil, "Received \"Rx buffer is in risk of overflow\" interrupt\n"); isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH); - if (test_bit(wil_status_reset_done, wil->status)) { - if (test_bit(wil_status_napi_en, wil->status)) { + if (likely(test_bit(wil_status_reset_done, wil->status))) { + if (likely(test_bit(wil_status_napi_en, wil->status))) { wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); need_unmask = false; napi_schedule(&wil->napi_rx); @@ -289,7 +263,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) } } - if (isr) + if (unlikely(isr)) wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); /* Rx IRQ will be enabled when NAPI processing finished */ @@ -313,19 +287,19 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) trace_wil6210_irq_tx(isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); - if (!isr) { + if (unlikely(!isr)) { wil_err(wil, "spurious IRQ: TX\n"); return IRQ_NONE; } wil6210_mask_irq_tx(wil); - if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { + if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) { wil_dbg_irq(wil, "TX done\n"); isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; /* clear also all VRING interrupts */ isr &= ~(BIT(25) - 1UL); - if (test_bit(wil_status_reset_done, wil->status)) { + if (likely(test_bit(wil_status_reset_done, wil->status))) { wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); need_unmask = false; napi_schedule(&wil->napi_tx); @@ -334,7 +308,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) } } - if (isr) + if (unlikely(isr)) wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); /* Tx IRQ will be enabled when NAPI processing finished */ @@ -523,11 +497,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) /** * pseudo_cause is Clear-On-Read, no need to ACK */ - if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)) + if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))) return IRQ_NONE; /* FIXME: IRQ mask debug */ - if (wil6210_debug_irq_mask(wil, pseudo_cause)) + if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause))) return IRQ_NONE; trace_wil6210_irq_pseudo(pseudo_cause); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index b04e0af..db74e81 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -29,10 +29,6 @@ bool no_fw_recovery; module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); -static bool no_fw_load = true; -module_param(no_fw_load, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash."); - /* if not set via modparam, will be set to default value of 1/8 of * rx ring size during init flow */ @@ -520,8 +516,6 @@ static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; u32 x; - bool is_reset_v2 = test_bit(hw_capability_reset_v2, - wil->hw_capabilities); wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); @@ -532,82 +526,67 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_halt_cpu(wil); + /* clear all boot loader "ready" bits */ + W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0); /* Clear Fw Download notification */ C(RGF_USER_USAGE_6, BIT(0)); - if (is_reset_v2) { - S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); - /* XTAL stabilization should take about 3ms */ - usleep_range(5000, 7000); - x = R(RGF_CAF_PLL_LOCK_STATUS); - if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) { - wil_err(wil, "Xtal stabilization timeout\n" - "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x); - return -ETIME; - } - /* switch 10k to XTAL*/ - C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); - /* 40 MHz */ - C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); - - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); + S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); + /* XTAL stabilization should take about 3ms */ + usleep_range(5000, 7000); + x = R(RGF_CAF_PLL_LOCK_STATUS); + if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) { + wil_err(wil, "Xtal stabilization timeout\n" + "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x); + return -ETIME; } + /* switch 10k to XTAL*/ + C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); + /* 40 MHz */ + C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); + + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, - is_reset_v2 ? 0x000000f0 : 0x00000170); + W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); - if (is_reset_v2) { - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); - } + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - if (is_reset_v2) { - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); - /* reset A2 PCIE AHB */ - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); - } else { - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001); - W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8)); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); - } + W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); + W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */ - /* TODO: check order here!!! Erez code is different */ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - /* wait until device ready. typical time is 200..250 msec */ + /* wait until device ready. typical time is 20..80 msec */ do { msleep(RST_DELAY); - x = R(RGF_USER_HW_MACHINE_STATE); + x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready)); if (delay++ > RST_COUNT) { - wil_err(wil, "Reset not completed, hw_state 0x%08x\n", + wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", x); return -ETIME; } - } while (x != HW_MACHINE_BOOT_DONE); - - if (!is_reset_v2) - W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8)); + } while (!(x & BIT_BL_READY)); C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); + /* enable fix for HW bug related to the SA/DA swap in AP Rx */ + S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | + BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); + wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); return 0; } -#undef R -#undef W -#undef S -#undef C - void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) { le32_to_cpus(&r->base); @@ -617,6 +596,32 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) le32_to_cpus(&r->head); } +static int wil_get_bl_info(struct wil6210_priv *wil) +{ + struct net_device *ndev = wil_to_ndev(wil); + struct RGF_BL bl; + + wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl)); + le32_to_cpus(&bl.ready); + le32_to_cpus(&bl.version); + le32_to_cpus(&bl.rf_type); + le32_to_cpus(&bl.baseband_type); + + if (!is_valid_ether_addr(bl.mac_address)) { + wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address); + return -EINVAL; + } + + ether_addr_copy(ndev->perm_addr, bl.mac_address); + if (!is_valid_ether_addr(ndev->dev_addr)) + ether_addr_copy(ndev->dev_addr, bl.mac_address); + wil_info(wil, + "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n", + bl.version, bl.mac_address, bl.rf_type, bl.baseband_type); + + return 0; +} + static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { ulong to = msecs_to_jiffies(1000); @@ -637,7 +642,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil) * After calling this routine, you're expected to reload * the firmware. */ -int wil_reset(struct wil6210_priv *wil) +int wil_reset(struct wil6210_priv *wil, bool load_fw) { int rc; @@ -675,30 +680,36 @@ int wil_reset(struct wil6210_priv *wil) if (rc) return rc; - if (!no_fw_load) { - wil_info(wil, "Use firmware <%s>\n", WIL_FW_NAME); + rc = wil_get_bl_info(wil); + if (rc) + return rc; + + if (load_fw) { + wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME, + WIL_FW2_NAME); + wil_halt_cpu(wil); /* Loading f/w from the file */ rc = wil_request_firmware(wil, WIL_FW_NAME); if (rc) return rc; + rc = wil_request_firmware(wil, WIL_FW2_NAME); + if (rc) + return rc; - /* clear any interrupts which on-card-firmware may have set */ + /* Mark FW as loaded from host */ + S(RGF_USER_USAGE_6, 1); + + /* clear any interrupts which on-card-firmware + * may have set + */ wil6210_clear_irq(wil); - { /* CAF_ICR - clear and mask */ - u32 a = HOSTADDR(RGF_CAF_ICR) + - offsetof(struct RGF_ICR, ICR); - u32 m = HOSTADDR(RGF_CAF_ICR) + - offsetof(struct RGF_ICR, IMV); - u32 icr = ioread32(wil->csr + a); - - iowrite32(icr, wil->csr + a); /* W1C */ - iowrite32(~0, wil->csr + m); - wmb(); /* wait for completion */ - } + /* CAF_ICR - clear and mask */ + /* it is W1C, clear by writing back same value */ + S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + wil_release_cpu(wil); - } else { - wil_info(wil, "Use firmware from on-card flash\n"); } /* init after reset */ @@ -706,15 +717,22 @@ int wil_reset(struct wil6210_priv *wil) reinit_completion(&wil->wmi_ready); reinit_completion(&wil->wmi_call); - wil_configure_interrupt_moderation(wil); - wil_unmask_irq(wil); + if (load_fw) { + wil_configure_interrupt_moderation(wil); + wil_unmask_irq(wil); - /* we just started MAC, wait for FW ready */ - rc = wil_wait_for_fw_ready(wil); + /* we just started MAC, wait for FW ready */ + rc = wil_wait_for_fw_ready(wil); + } return rc; } +#undef R +#undef W +#undef S +#undef C + void wil_fw_error_recovery(struct wil6210_priv *wil) { wil_dbg_misc(wil, "starting fw error recovery\n"); @@ -730,7 +748,7 @@ int __wil_up(struct wil6210_priv *wil) WARN_ON(!mutex_is_locked(&wil->mutex)); - rc = wil_reset(wil); + rc = wil_reset(wil, true); if (rc) return rc; @@ -837,7 +855,7 @@ int __wil_down(struct wil6210_priv *wil) if (!iter) wil_err(wil, "timeout waiting for idle FW/HW\n"); - wil_rx_fini(wil); + wil_reset(wil, false); return 0; } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 3dd2670..25343cf 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -39,18 +39,6 @@ void wil_set_capabilities(struct wil6210_priv *wil) bitmap_zero(wil->hw_capabilities, hw_capability_last); switch (rev_id) { - case JTAG_DEV_ID_MARLON_B0: - wil->hw_name = "Marlon B0"; - wil->hw_version = HW_VER_MARLON_B0; - break; - case JTAG_DEV_ID_SPARROW_A0: - wil->hw_name = "Sparrow A0"; - wil->hw_version = HW_VER_SPARROW_A0; - break; - case JTAG_DEV_ID_SPARROW_A1: - wil->hw_name = "Sparrow A1"; - wil->hw_version = HW_VER_SPARROW_A1; - break; case JTAG_DEV_ID_SPARROW_B0: wil->hw_name = "Sparrow B0"; wil->hw_version = HW_VER_SPARROW_B0; @@ -62,13 +50,6 @@ void wil_set_capabilities(struct wil6210_priv *wil) } wil_info(wil, "Board hardware is %s\n", wil->hw_name); - - if (wil->hw_version >= HW_VER_SPARROW_A0) - set_bit(hw_capability_reset_v2, wil->hw_capabilities); - - if (wil->hw_version >= HW_VER_SPARROW_B0) - set_bit(hw_capability_advanced_itr_moderation, - wil->hw_capabilities); } void wil_disable_irq(struct wil6210_priv *wil) @@ -150,7 +131,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) /* need reset here to obtain MAC */ mutex_lock(&wil->mutex); - rc = wil_reset(wil); + rc = wil_reset(wil, false); mutex_unlock(&wil->mutex); if (debug_fw) rc = 0; @@ -305,7 +286,6 @@ static void wil_pcie_remove(struct pci_dev *pdev) } static const struct pci_device_id wil6210_pcie_ids[] = { - { PCI_DEVICE(0x1ae9, 0x0301) }, { PCI_DEVICE(0x1ae9, 0x0310) }, { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */ { /* end: all zeroes */ }, diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 8439f65..7f2f560 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -53,34 +53,38 @@ static inline int wil_vring_is_full(struct vring *vring) return wil_vring_next_tail(vring) == vring->swhead; } -/* - * Available space in Tx Vring - */ -static inline int wil_vring_avail_tx(struct vring *vring) +/* Used space in Tx Vring */ +static inline int wil_vring_used_tx(struct vring *vring) { u32 swhead = vring->swhead; u32 swtail = vring->swtail; - int used = (vring->size + swhead - swtail) % vring->size; + return (vring->size + swhead - swtail) % vring->size; +} - return vring->size - used - 1; +/* Available space in Tx Vring */ +static inline int wil_vring_avail_tx(struct vring *vring) +{ + return vring->size - wil_vring_used_tx(vring) - 1; } -/** - * wil_vring_wmark_low - low watermark for available descriptor space - */ +/* wil_vring_wmark_low - low watermark for available descriptor space */ static inline int wil_vring_wmark_low(struct vring *vring) { return vring->size/8; } -/** - * wil_vring_wmark_high - high watermark for available descriptor space - */ +/* wil_vring_wmark_high - high watermark for available descriptor space */ static inline int wil_vring_wmark_high(struct vring *vring) { return vring->size/4; } +/* wil_val_in_range - check if value in [min,max) */ +static inline bool wil_val_in_range(int val, int min, int max) +{ + return val >= min && val < max; +} + static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) { struct device *dev = wil_to_dev(wil); @@ -98,8 +102,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) vring->va = NULL; return -ENOMEM; } - /* - * vring->va should be aligned on its size rounded up to power of 2 + /* vring->va should be aligned on its size rounded up to power of 2 * This is granted by the dma_alloc_coherent */ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); @@ -346,27 +349,6 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, } } -/* - * Fast swap in place between 2 registers - */ -static void wil_swap_u16(u16 *a, u16 *b) -{ - *a ^= *b; - *b ^= *a; - *a ^= *b; -} - -static void wil_swap_ethaddr(void *data) -{ - struct ethhdr *eth = data; - u16 *s = (u16 *)eth->h_source; - u16 *d = (u16 *)eth->h_dest; - - wil_swap_u16(s++, d++); - wil_swap_u16(s++, d++); - wil_swap_u16(s, d); -} - /** * reap 1 frame from @swhead * @@ -386,17 +368,16 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, unsigned int sz = mtu_max + ETH_HLEN; u16 dmalen; u8 ftype; - u8 ds_bits; int cid; struct wil_net_stats *stats; BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); - if (wil_vring_is_empty(vring)) + if (unlikely(wil_vring_is_empty(vring))) return NULL; _d = &vring->va[vring->swhead].rx; - if (!(_d->dma.status & RX_DMA_STATUS_DU)) { + if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { /* it is not error, we just reached end of Rx done area */ return NULL; } @@ -416,7 +397,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); - if (dmalen > sz) { + if (unlikely(dmalen > sz)) { wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); kfree_skb(skb); return NULL; @@ -445,14 +426,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, * in Rx descriptor. If type is not data, it is 802.11 frame as is */ ftype = wil_rxdesc_ftype(d) << 2; - if (ftype != IEEE80211_FTYPE_DATA) { + if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); /* TODO: process it */ kfree_skb(skb); return NULL; } - if (skb->len < ETH_HLEN) { + if (unlikely(skb->len < ETH_HLEN)) { wil_err(wil, "Short frame, len = %d\n", skb->len); /* TODO: process it (i.e. BAR) */ kfree_skb(skb); @@ -463,9 +444,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, * and in case of error drop the packet * higher stack layers will handle retransmission (if required) */ - if (d->dma.status & RX_DMA_STATUS_L4I) { + if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { /* L4 protocol identified, csum calculated */ - if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) + if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) skb->ip_summed = CHECKSUM_UNNECESSARY; /* If HW reports bad checksum, let IP stack re-check it * For example, HW don't understand Microsoft IP stack that @@ -474,15 +455,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, */ } - ds_bits = wil_rxdesc_ds_bits(d); - if (ds_bits == 1) { - /* - * HW bug - in ToDS mode, i.e. Rx on AP side, - * addresses get swapped - */ - wil_swap_ethaddr(skb->data); - } - return skb; } @@ -503,7 +475,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) (next_tail != v->swhead) && (count-- > 0); v->swtail = next_tail) { rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); - if (rc) { + if (unlikely(rc)) { wil_err(wil, "Error %d in wil_rx_refill[%d]\n", rc, v->swtail); break; @@ -565,7 +537,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) struct vring *v = &wil->vring_rx; struct sk_buff *skb; - if (!v->va) { + if (unlikely(!v->va)) { wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); return; } @@ -952,13 +924,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; uint i = swhead; dma_addr_t pa; + int used; wil_dbg_txrx(wil, "%s()\n", __func__); if (unlikely(!txdata->enabled)) return -EINVAL; - if (avail < 1 + nr_frags) { + if (unlikely(avail < 1 + nr_frags)) { wil_err_ratelimited(wil, "Tx ring[%2d] full. No space for %d fragments\n", vring_index, 1 + nr_frags); @@ -979,7 +952,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* 1-st segment */ wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); /* Process TCP/UDP checksum offloading */ - if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { + if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", vring_index); goto dma_error; @@ -1027,8 +1000,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, */ vring->ctx[i].skb = skb_get(skb); - if (wil_vring_is_empty(vring)) /* performance monitoring */ + /* performance monitoring */ + used = wil_vring_used_tx(vring); + if (wil_val_in_range(vring_idle_trsh, + used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; + wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", + vring_index, used, used + nr_frags + 1); + } /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); @@ -1082,18 +1061,18 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) int rc; wil_dbg_txrx(wil, "%s()\n", __func__); - if (!test_bit(wil_status_fwready, wil->status)) { + if (unlikely(!test_bit(wil_status_fwready, wil->status))) { if (!pr_once_fw) { wil_err(wil, "FW not ready\n"); pr_once_fw = true; } goto drop; } - if (!test_bit(wil_status_fwconnected, wil->status)) { + if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { wil_err(wil, "FW not connected\n"); goto drop; } - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { + if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { wil_err(wil, "Xmit in monitor mode not supported\n"); goto drop; } @@ -1109,7 +1088,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) else vring = wil_tx_bcast(wil, skb); } - if (!vring) { + if (unlikely(!vring)) { wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); goto drop; } @@ -1117,7 +1096,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) rc = wil_tx_vring(wil, vring, skb); /* do we still have enough room in the vring? */ - if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) { + if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) { netif_tx_stop_all_queues(wil_to_ndev(wil)); wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); } @@ -1172,19 +1151,23 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) int cid = wil->vring2cid_tid[ringid][0]; struct wil_net_stats *stats = &wil->sta[cid].stats; volatile struct vring_tx_desc *_d; + int used_before_complete; + int used_new; - if (!vring->va) { + if (unlikely(!vring->va)) { wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); return 0; } - if (!txdata->enabled) { + if (unlikely(!txdata->enabled)) { wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); return 0; } wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); + used_before_complete = wil_vring_used_tx(vring); + while (!wil_vring_is_empty(vring)) { int new_swtail; struct wil_ctx *ctx = &vring->ctx[vring->swtail]; @@ -1196,7 +1179,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) /* TODO: check we are not past head */ _d = &vring->va[lf].tx; - if (!(_d->dma.status & TX_DMA_STATUS_DU)) + if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) break; new_swtail = (lf + 1) % vring->size; @@ -1224,7 +1207,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) wil_txdesc_unmap(dev, d, ctx); if (skb) { - if (d->dma.error == 0) { + if (likely(d->dma.error == 0)) { ndev->stats.tx_packets++; stats->tx_packets++; ndev->stats.tx_bytes += skb->len; @@ -1246,8 +1229,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) } } - if (wil_vring_is_empty(vring)) { /* performance monitoring */ - wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid); + /* performance monitoring */ + used_new = wil_vring_used_tx(vring); + if (wil_val_in_range(vring_idle_trsh, + used_new, used_before_complete)) { + wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", + ringid, used_before_complete, used_new); txdata->last_idle = get_cycles(); } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9461156..b6e65c3 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -27,9 +27,11 @@ extern bool no_fw_recovery; extern unsigned int mtu_max; extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; +extern u32 vring_idle_trsh; #define WIL_NAME "wil6210" -#define WIL_FW_NAME "wil6210.fw" +#define WIL_FW_NAME "wil6210.fw" /* code */ +#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ @@ -120,6 +122,16 @@ struct RGF_ICR { u32 IMC; /* Mask Clear, write 1 to clear */ } __packed; +struct RGF_BL { + u32 ready; /* 0x880A3C bit [0] */ +#define BIT_BL_READY BIT(0) + u32 version; /* 0x880A40 version of the BL struct */ + u32 rf_type; /* 0x880A44 ID of the connected RF */ + u32 baseband_type; /* 0x880A48 ID of the baseband */ + u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */ + u8 pad[2]; +} __packed; + /* registers - FW addresses */ #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) @@ -130,6 +142,7 @@ struct RGF_ICR { #define RGF_USER_MAC_CPU_0 (0x8801fc) #define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */ #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) +#define RGF_USER_BL (0x880A3C) /* Boot Loader */ #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ #define RGF_USER_CLKS_CTL_0 (0x880abc) #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ @@ -169,6 +182,13 @@ struct RGF_ICR { #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3) #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4) +/* Offload control (Sparrow B0+) */ +#define RGF_DMA_OFUL_NID_0 (0x881cd4) + #define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN BIT(0) + #define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN BIT(1) + #define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC BIT(2) + #define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC BIT(3) + /* New (sparrow v2+) interrupt moderation control */ #define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40) #define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34) @@ -229,16 +249,10 @@ struct RGF_ICR { #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ - #define JTAG_DEV_ID_MARLON_B0 (0x0612072f) - #define JTAG_DEV_ID_SPARROW_A0 (0x0632072f) - #define JTAG_DEV_ID_SPARROW_A1 (0x1632072f) #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f) enum { HW_VER_UNKNOWN, - HW_VER_MARLON_B0, /* JTAG_DEV_ID_MARLON_B0 */ - HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */ - HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */ HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */ }; @@ -482,8 +496,6 @@ enum { }; enum { - hw_capability_reset_v2 = 0, - hw_capability_advanced_itr_moderation = 1, hw_capability_last }; @@ -528,7 +540,7 @@ struct wil6210_priv { wait_queue_head_t wq; /* for all wait_event() use */ /* profile */ u32 monitor_flags; - u32 secure_pcp; /* create secure PCP? */ + u32 privacy; /* secure connection? */ int sinfo_gen; /* interrupt moderation */ u32 tx_max_burst_duration; @@ -658,7 +670,7 @@ int wil_if_add(struct wil6210_priv *wil); void wil_if_remove(struct wil6210_priv *wil); int wil_priv_init(struct wil6210_priv *wil); void wil_priv_deinit(struct wil6210_priv *wil); -int wil_reset(struct wil6210_priv *wil); +int wil_reset(struct wil6210_priv *wil, bool no_fw); void wil_fw_error_recovery(struct wil6210_priv *wil); void wil_set_recovery_state(struct wil6210_priv *wil, int state); int wil_up(struct wil6210_priv *wil); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 0f3e433..0213135 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -281,7 +281,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) /*=== Event handlers ===*/ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) { - struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; struct wmi_ready_event *evt = d; @@ -290,11 +289,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, evt->mac, wil->n_mids); - - if (!is_valid_ether_addr(ndev->dev_addr)) { - memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); - memcpy(ndev->perm_addr, evt->mac, ETH_ALEN); - } + /* ignore MAC address, we already have it from the boot loader */ snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), "%d", wil->fw_version); } @@ -879,7 +874,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) struct wmi_pcp_started_event evt; } __packed reply; - if (!wil->secure_pcp) + if (!wil->privacy) cmd.disable_sec = 1; if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 55db9f0..6a1f03c 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1004,7 +1004,7 @@ static void frag_rx_path(struct atmel_private *priv, atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); if ((crc ^ 0xffffffff) != netcrc) { priv->dev->stats.rx_crc_errors++; - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); } } @@ -1022,7 +1022,7 @@ static void frag_rx_path(struct atmel_private *priv, atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); if ((crc ^ 0xffffffff) != netcrc) { priv->dev->stats.rx_crc_errors++; - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); more_frags = 1; /* don't send broken assembly */ } } @@ -1031,7 +1031,7 @@ static void frag_rx_path(struct atmel_private *priv, priv->frag_no++; if (!more_frags) { /* last one */ - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { priv->dev->stats.rx_dropped++; } else { @@ -1127,7 +1127,7 @@ static void rx_done_irq(struct atmel_private *priv) atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); /* we use the same buffer for frag reassembly and control packets */ - memset(priv->frag_source, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->frag_source); if (priv->do_rx_crc) { /* last 4 octets is crc */ @@ -1379,7 +1379,7 @@ static int atmel_close(struct net_device *dev) wrqu.data.length = 0; wrqu.data.flags = 0; wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); } @@ -1555,7 +1555,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, priv->last_qual = jiffies; priv->last_beacon_timestamp = 0; memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); - memset(priv->BSSID, 0, ETH_ALEN); + eth_zero_addr(priv->BSSID); priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ priv->station_was_associated = 0; @@ -2760,7 +2760,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid) u8 SSID_size; } cmd; - memset(cmd.BSSID, 0xff, ETH_ALEN); + eth_broadcast_addr(cmd.BSSID); if (priv->fast_scan) { cmd.SSID_size = priv->SSID_size; @@ -4049,7 +4049,7 @@ static int reset_atmel_card(struct net_device *dev) wrqu.data.length = 0; wrqu.data.flags = 0; wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); } diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05..ac99798 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4132,7 +4132,7 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw, if (conf->bssid) memcpy(wl->bssid, conf->bssid, ETH_ALEN); else - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } if (b43_status(dev) >= B43_STAT_INITIALIZED) { @@ -4819,7 +4819,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev) switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: - bcma_core_pci_down(dev->dev->bdev->bus); + bcma_host_pci_down(dev->dev->bdev->bus); break; #endif #ifdef CONFIG_B43_SSB @@ -4866,9 +4866,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev) switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: - bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0], + bcma_core_pci_irq_ctl(dev->dev->bdev->bus, dev->dev->bdev, true); - bcma_core_pci_up(dev->dev->bdev->bus); + bcma_host_pci_up(dev->dev->bdev->bus); break; #endif #ifdef CONFIG_B43_SSB @@ -5051,7 +5051,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw, wl->operating = false; b43_adjust_opmode(dev); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->mac_addr); b43_upload_card_macaddress(dev); mutex_unlock(&wl->mutex); @@ -5067,8 +5067,8 @@ static int b43_op_start(struct ieee80211_hw *hw) /* Kill all old instance specific information to make sure * the card won't use it in the short timeframe between start * and mac80211 reconfiguring it. */ - memset(wl->bssid, 0, ETH_ALEN); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); + eth_zero_addr(wl->mac_addr); wl->filter_flags = 0; wl->radiotap_enabled = false; b43_qos_clear(wl); diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 4e58c00..c77b7f5 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -2866,7 +2866,7 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw, if (conf->bssid) memcpy(wl->bssid, conf->bssid, ETH_ALEN); else - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { @@ -3470,7 +3470,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, spin_lock_irqsave(&wl->irq_lock, flags); b43legacy_adjust_opmode(dev); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->mac_addr); b43legacy_upload_card_macaddress(dev); spin_unlock_irqrestore(&wl->irq_lock, flags); @@ -3487,8 +3487,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) /* Kill all old instance specific information to make sure * the card won't use it in the short timeframe between start * and mac80211 reconfiguring it. */ - memset(wl->bssid, 0, ETH_ALEN); - memset(wl->mac_addr, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); + eth_zero_addr(wl->mac_addr); wl->filter_flags = 0; wl->beacon0_uploaded = false; wl->beacon1_uploaded = false; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 7944224..c438ccd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -58,6 +58,14 @@ #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */ #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */ +struct brcmf_sdiod_freezer { + atomic_t freezing; + atomic_t thread_count; + u32 frozen_count; + wait_queue_head_t thread_freeze; + struct completion resumed; +}; + static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE; module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0); MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]"); @@ -197,6 +205,30 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) return 0; } +void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, + enum brcmf_sdiod_state state) +{ + if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM || + state == sdiodev->state) + return; + + brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state); + switch (sdiodev->state) { + case BRCMF_SDIOD_DATA: + /* any other state means bus interface is down */ + brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); + break; + case BRCMF_SDIOD_DOWN: + /* transition from DOWN to DATA means bus interface is up */ + if (state == BRCMF_SDIOD_DATA) + brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP); + break; + default: + break; + } + sdiodev->state = state; +} + static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func, uint regaddr, u8 byte) { @@ -269,12 +301,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn, return ret; } -static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev) -{ - sdiodev->state = BRCMF_STATE_NOMEDIUM; - brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); -} - static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 regsz, void *data, bool write) { @@ -282,7 +308,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, s32 retry = 0; int ret; - if (sdiodev->state == BRCMF_STATE_NOMEDIUM) + if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM) return -ENOMEDIUM; /* @@ -308,7 +334,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); if (ret == -ENOMEDIUM) - brcmf_sdiod_nomedium_state(sdiodev); + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); else if (ret != 0) { /* * SleepCSR register access can fail when @@ -331,7 +357,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) int err = 0, i; u8 addr[3]; - if (sdiodev->state == BRCMF_STATE_NOMEDIUM) + if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM) return -ENOMEDIUM; addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK; @@ -460,7 +486,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr, req_sz); if (err == -ENOMEDIUM) - brcmf_sdiod_nomedium_state(sdiodev); + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); return err; } @@ -595,7 +621,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; if (ret == -ENOMEDIUM) { - brcmf_sdiod_nomedium_state(sdiodev); + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); break; } else if (ret != 0) { brcmf_err("CMD53 sg block %s failed %d\n", @@ -877,6 +903,87 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) sdiodev->txglomsz = brcmf_sdiod_txglomsz; } +#ifdef CONFIG_PM_SLEEP +static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev) +{ + sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL); + if (!sdiodev->freezer) + return -ENOMEM; + atomic_set(&sdiodev->freezer->thread_count, 0); + atomic_set(&sdiodev->freezer->freezing, 0); + init_waitqueue_head(&sdiodev->freezer->thread_freeze); + init_completion(&sdiodev->freezer->resumed); + return 0; +} + +static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) +{ + if (sdiodev->freezer) { + WARN_ON(atomic_read(&sdiodev->freezer->freezing)); + kfree(sdiodev->freezer); + } +} + +static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev) +{ + atomic_t *expect = &sdiodev->freezer->thread_count; + int res = 0; + + sdiodev->freezer->frozen_count = 0; + reinit_completion(&sdiodev->freezer->resumed); + atomic_set(&sdiodev->freezer->freezing, 1); + brcmf_sdio_trigger_dpc(sdiodev->bus); + wait_event(sdiodev->freezer->thread_freeze, + atomic_read(expect) == sdiodev->freezer->frozen_count); + sdio_claim_host(sdiodev->func[1]); + res = brcmf_sdio_sleep(sdiodev->bus, true); + sdio_release_host(sdiodev->func[1]); + return res; +} + +static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev) +{ + sdio_claim_host(sdiodev->func[1]); + brcmf_sdio_sleep(sdiodev->bus, false); + sdio_release_host(sdiodev->func[1]); + atomic_set(&sdiodev->freezer->freezing, 0); + complete_all(&sdiodev->freezer->resumed); +} + +bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev) +{ + return atomic_read(&sdiodev->freezer->freezing); +} + +void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev) +{ + if (!brcmf_sdiod_freezing(sdiodev)) + return; + sdiodev->freezer->frozen_count++; + wake_up(&sdiodev->freezer->thread_freeze); + wait_for_completion(&sdiodev->freezer->resumed); +} + +void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev) +{ + atomic_inc(&sdiodev->freezer->thread_count); +} + +void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev) +{ + atomic_dec(&sdiodev->freezer->thread_count); +} +#else +static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev) +{ + return 0; +} + +static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) +{ +} +#endif /* CONFIG_PM_SLEEP */ + static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) { if (sdiodev->bus) { @@ -884,6 +991,8 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) sdiodev->bus = NULL; } + brcmf_sdiod_freezer_detach(sdiodev); + /* Disable Function 2 */ sdio_claim_host(sdiodev->func[2]); sdio_disable_func(sdiodev->func[2]); @@ -955,6 +1064,10 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) */ brcmf_sdiod_sgtable_alloc(sdiodev); + ret = brcmf_sdiod_freezer_attach(sdiodev); + if (ret) + goto out; + /* try to attach to the target device */ sdiodev->bus = brcmf_sdio_probe(sdiodev); if (!sdiodev->bus) { @@ -1050,9 +1163,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, bus_if->wowl_supported = true; #endif - sdiodev->sleeping = false; - atomic_set(&sdiodev->suspend, false); - init_waitqueue_head(&sdiodev->idle_wait); + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN); brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n"); err = brcmf_sdiod_probe(sdiodev); @@ -1114,24 +1225,22 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled) #ifdef CONFIG_PM_SLEEP static int brcmf_ops_sdio_suspend(struct device *dev) { + struct sdio_func *func; struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; mmc_pm_flag_t sdio_flags; - brcmf_dbg(SDIO, "Enter\n"); + func = container_of(dev, struct sdio_func, dev); + brcmf_dbg(SDIO, "Enter: F%d\n", func->num); + if (func->num != SDIO_FUNC_1) + return 0; + bus_if = dev_get_drvdata(dev); sdiodev = bus_if->bus_priv.sdio; - /* wait for watchdog to go idle */ - if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping, - msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) { - brcmf_err("bus still active\n"); - return -EBUSY; - } - /* disable watchdog */ + brcmf_sdiod_freezer_on(sdiodev); brcmf_sdio_wd_timer(sdiodev->bus, 0); - atomic_set(&sdiodev->suspend, true); if (sdiodev->wowl_enabled) { sdio_flags = MMC_PM_KEEP_POWER; @@ -1149,12 +1258,13 @@ static int brcmf_ops_sdio_resume(struct device *dev) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct sdio_func *func = container_of(dev, struct sdio_func, dev); - brcmf_dbg(SDIO, "Enter\n"); - if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported) - disable_irq_wake(sdiodev->pdata->oob_irq_nr); - brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS); - atomic_set(&sdiodev->suspend, false); + brcmf_dbg(SDIO, "Enter: F%d\n", func->num); + if (func->num != SDIO_FUNC_2) + return 0; + + brcmf_sdiod_freezer_off(sdiodev); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index b59b8c6..9b805c9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -700,7 +700,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, /* Do a scan abort to stop the driver's scan engine */ brcmf_dbg(SCAN, "ABORT scan in firmware\n"); memset(¶ms_le, 0, sizeof(params_le)); - memset(params_le.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(params_le.bssid); params_le.bss_type = DOT11_BSSTYPE_ANY; params_le.scan_type = 0; params_le.channel_num = cpu_to_le32(1); @@ -866,7 +866,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, char *ptr; struct brcmf_ssid_le ssid_le; - memset(params_le->bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(params_le->bssid); params_le->bss_type = DOT11_BSSTYPE_ANY; params_le->scan_type = 0; params_le->channel_num = 0; @@ -1050,10 +1050,6 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; - /* Arm scan timeout timer */ - mod_timer(&cfg->escan_timeout, jiffies + - WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); - escan_req = false; if (request) { /* scan bss */ @@ -1112,12 +1108,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, } } + /* Arm scan timeout timer */ + mod_timer(&cfg->escan_timeout, jiffies + + WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); + return 0; scan_out: clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); - if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); cfg->scan_request = NULL; return err; } @@ -1375,8 +1373,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, BRCMF_ASSOC_PARAMS_FIXED_SIZE; memcpy(profile->bssid, params->bssid, ETH_ALEN); } else { - memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); - memset(profile->bssid, 0, ETH_ALEN); + eth_broadcast_addr(join_params.params_le.bssid); + eth_zero_addr(profile->bssid); } /* Channel */ @@ -1850,7 +1848,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, if (sme->bssid) memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN); else - memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(ext_join_params->assoc_le.bssid); if (cfg->channel) { ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1); @@ -1895,7 +1893,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, if (sme->bssid) memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); else - memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(join_params.params_le.bssid); if (cfg->channel) { join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec); @@ -2252,7 +2250,6 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) { /* we ignore this key index in this case */ - brcmf_err("invalid key index (%d)\n", key_idx); return -EINVAL; } @@ -4272,7 +4269,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, return -EIO; memcpy(&scbval.ea, params->mac, ETH_ALEN); - scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING); + scbval.val = cpu_to_le32(params->reason_code); err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, sizeof(scbval)); if (err) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c index 2d6e2cc..f8f47dc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c @@ -944,6 +944,34 @@ fail: return ret; } +static int brcmf_revinfo_read(struct seq_file *s, void *data) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(s->private); + struct brcmf_rev_info *ri = &bus_if->drvr->revinfo; + char drev[BRCMU_DOTREV_LEN]; + char brev[BRCMU_BOARDREV_LEN]; + + seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid); + seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid); + seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev)); + seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum); + seq_printf(s, "chiprev: %u\n", ri->chiprev); + seq_printf(s, "chippkg: %u\n", ri->chippkg); + seq_printf(s, "corerev: %u\n", ri->corerev); + seq_printf(s, "boardid: 0x%04x\n", ri->boardid); + seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor); + seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev)); + seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev)); + seq_printf(s, "ucoderev: %u\n", ri->ucoderev); + seq_printf(s, "bus: %u\n", ri->bus); + seq_printf(s, "phytype: %u\n", ri->phytype); + seq_printf(s, "phyrev: %u\n", ri->phyrev); + seq_printf(s, "anarev: %u\n", ri->anarev); + seq_printf(s, "nvramrev: %08x\n", ri->nvramrev); + + return 0; +} + int brcmf_bus_start(struct device *dev) { int ret = -1; @@ -974,6 +1002,8 @@ int brcmf_bus_start(struct device *dev) if (ret < 0) goto fail; + brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read); + /* assure we have chipid before feature attach */ if (!bus_if->chip) { bus_if->chip = drvr->revinfo.chipnum; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index 910fbb5..eb13253 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -236,7 +236,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) brcmf_flowring_block(flow, flowid, false); hash_idx = ring->hash_id; flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; - memset(flow->hash[hash_idx].mac, 0, ETH_ALEN); + eth_zero_addr(flow->hash[hash_idx].mac); flow->rings[flowid] = NULL; skb = skb_dequeue(&ring->skblist); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index effb48e..98d82ec 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -697,7 +697,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, else sparams->scan_type = 1; - memset(&sparams->bssid, 0xFF, ETH_ALEN); + eth_broadcast_addr(sparams->bssid); if (ssid.SSID_len) memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len); sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c index faec35c..257ee70 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c @@ -515,6 +515,7 @@ struct brcmf_sdio { bool txoff; /* Transmit flow-controlled */ struct brcmf_sdio_count sdcnt; bool sr_enabled; /* SaveRestore enabled */ + bool sleeping; u8 tx_hdrlen; /* sdio bus header length for tx packet */ bool txglom; /* host tx glomming enable flag */ @@ -1013,12 +1014,12 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok) brcmf_dbg(SDIO, "Enter: request %s currently %s\n", (sleep ? "SLEEP" : "WAKE"), - (bus->sdiodev->sleeping ? "SLEEP" : "WAKE")); + (bus->sleeping ? "SLEEP" : "WAKE")); /* If SR is enabled control bus state with KSO */ if (bus->sr_enabled) { /* Done if we're already in the requested state */ - if (sleep == bus->sdiodev->sleeping) + if (sleep == bus->sleeping) goto end; /* Going to sleep */ @@ -1026,6 +1027,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok) /* Don't sleep if something is pending */ if (atomic_read(&bus->intstatus) || atomic_read(&bus->ipend) > 0 || + bus->ctrl_frame_stat || (!atomic_read(&bus->fcstate) && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && data_ok(bus))) { @@ -1065,9 +1067,7 @@ end: } else { brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); } - bus->sdiodev->sleeping = sleep; - if (sleep) - wake_up(&bus->sdiodev->idle_wait); + bus->sleeping = sleep; brcmf_dbg(SDIO, "new state %s\n", (sleep ? "SLEEP" : "WAKE")); done: @@ -1909,7 +1909,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) bus->rxpending = true; for (rd->seq_num = bus->rx_seq, rxleft = maxframes; - !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA; + !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA; rd->seq_num++, rxleft--) { /* Handle glomming separately */ @@ -2415,7 +2415,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes) } /* Deflow-control stack if needed */ - if ((bus->sdiodev->state == BRCMF_STATE_DATA) && + if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) && bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { bus->txoff = false; brcmf_txflowblock(bus->sdiodev->dev, false); @@ -2503,7 +2503,7 @@ static void brcmf_sdio_bus_stop(struct device *dev) bus->watchdog_tsk = NULL; } - if (sdiodev->state != BRCMF_STATE_NOMEDIUM) { + if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { sdio_claim_host(sdiodev->func[1]); /* Enable clock for device interrupts */ @@ -2603,21 +2603,6 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) return ret; } -static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev) -{ -#ifdef CONFIG_PM_SLEEP - int retry; - - /* Wait for possible resume to complete */ - retry = 0; - while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50)) - msleep(20); - if (atomic_read(&sdiodev->suspend)) - return -EIO; -#endif - return 0; -} - static void brcmf_sdio_dpc(struct brcmf_sdio *bus) { u32 newstatus = 0; @@ -2628,9 +2613,6 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Enter\n"); - if (brcmf_sdio_pm_resume_wait(bus->sdiodev)) - return; - sdio_claim_host(bus->sdiodev->func[1]); /* If waiting for HTAVAIL, check status */ @@ -2755,7 +2737,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) brcmf_sdio_sendfromq(bus, framecnt); } - if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) { + if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) { brcmf_err("failed backplane access over SDIO, halting operation\n"); atomic_set(&bus->intstatus, 0); } else if (atomic_read(&bus->intstatus) || @@ -2862,11 +2844,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) qcount[prec] = pktq_plen(&bus->txq, prec); #endif - if (atomic_read(&bus->dpc_tskcnt) == 0) { - atomic_inc(&bus->dpc_tskcnt); - queue_work(bus->brcmf_wq, &bus->datawork); - } - + brcmf_sdio_trigger_dpc(bus); return ret; } @@ -2964,11 +2942,8 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) bus->ctrl_frame_buf = msg; bus->ctrl_frame_len = msglen; bus->ctrl_frame_stat = true; - if (atomic_read(&bus->dpc_tskcnt) == 0) { - atomic_inc(&bus->dpc_tskcnt); - queue_work(bus->brcmf_wq, &bus->datawork); - } + brcmf_sdio_trigger_dpc(bus); wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, msecs_to_jiffies(CTL_DONE_TIMEOUT)); @@ -3411,7 +3386,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, } /* Allow full data communication using DPC from now on. */ - bus->sdiodev->state = BRCMF_STATE_DATA; + brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); bcmerror = 0; err: @@ -3548,6 +3523,14 @@ done: return err; } +void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus) +{ + if (atomic_read(&bus->dpc_tskcnt) == 0) { + atomic_inc(&bus->dpc_tskcnt); + queue_work(bus->brcmf_wq, &bus->datawork); + } +} + void brcmf_sdio_isr(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); @@ -3557,7 +3540,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus) return; } - if (bus->sdiodev->state != BRCMF_STATE_DATA) { + if (bus->sdiodev->state != BRCMF_SDIOD_DATA) { brcmf_err("bus is down. we have nothing to do\n"); return; } @@ -3602,9 +3585,8 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) SDIO_CCCR_INTx, NULL); sdio_release_host(bus->sdiodev->func[1]); - intstatus = - devpend & (INTR_STATUS_FUNC1 | - INTR_STATUS_FUNC2); + intstatus = devpend & (INTR_STATUS_FUNC1 | + INTR_STATUS_FUNC2); } /* If there is something, make like the ISR and @@ -3623,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) } #ifdef DEBUG /* Poll for console output periodically */ - if (bus->sdiodev->state == BRCMF_STATE_DATA && + if (bus->sdiodev->state == BRCMF_SDIOD_DATA && bus->console_interval != 0) { bus->console.count += BRCMF_WD_POLL_MS; if (bus->console.count >= bus->console_interval) { @@ -3667,6 +3649,11 @@ static void brcmf_sdio_dataworker(struct work_struct *work) atomic_set(&bus->dpc_tskcnt, 0); brcmf_sdio_dpc(bus); } + if (brcmf_sdiod_freezing(bus->sdiodev)) { + brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN); + brcmf_sdiod_try_freeze(bus->sdiodev); + brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); + } } static void @@ -3944,13 +3931,19 @@ static int brcmf_sdio_watchdog_thread(void *data) { struct brcmf_sdio *bus = (struct brcmf_sdio *)data; + int wait; allow_signal(SIGTERM); /* Run until signal received */ + brcmf_sdiod_freezer_count(bus->sdiodev); while (1) { if (kthread_should_stop()) break; - if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { + brcmf_sdiod_freezer_uncount(bus->sdiodev); + wait = wait_for_completion_interruptible(&bus->watchdog_wait); + brcmf_sdiod_freezer_count(bus->sdiodev); + brcmf_sdiod_try_freeze(bus->sdiodev); + if (!wait) { brcmf_sdio_bus_watchdog(bus); /* Count the tick for reference */ bus->sdcnt.tickcnt++; @@ -3971,7 +3964,7 @@ brcmf_sdio_watchdog(unsigned long data) /* Reschedule the watchdog */ if (bus->wd_timer_valid) mod_timer(&bus->timer, - jiffies + BRCMF_WD_POLL_MS * HZ / 1000); + jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS)); } } @@ -4089,6 +4082,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) { int ret; struct brcmf_sdio *bus; + struct workqueue_struct *wq; brcmf_dbg(TRACE, "Enter\n"); @@ -4117,12 +4111,16 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) bus->sgentry_align = sdiodev->pdata->sd_sgentry_align; } - INIT_WORK(&bus->datawork, brcmf_sdio_dataworker); - bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq"); - if (bus->brcmf_wq == NULL) { + /* single-threaded workqueue */ + wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM, + dev_name(&sdiodev->func[1]->dev)); + if (!wq) { brcmf_err("insufficient memory to create txworkqueue\n"); goto fail; } + brcmf_sdiod_freezer_count(sdiodev); + INIT_WORK(&bus->datawork, brcmf_sdio_dataworker); + bus->brcmf_wq = wq; /* attempt to attach to the dongle */ if (!(brcmf_sdio_probe_attach(bus))) { @@ -4143,7 +4141,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) /* Initialize watchdog thread */ init_completion(&bus->watchdog_wait); bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread, - bus, "brcmf_watchdog"); + bus, "brcmf_wdog/%s", + dev_name(&sdiodev->func[1]->dev)); if (IS_ERR(bus->watchdog_tsk)) { pr_warn("brcmf_watchdog thread failed to start\n"); bus->watchdog_tsk = NULL; @@ -4242,7 +4241,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) destroy_workqueue(bus->brcmf_wq); if (bus->ci) { - if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) { + if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { sdio_claim_host(bus->sdiodev->func[1]); brcmf_sdio_clkctl(bus, CLK_AVAIL, false); /* Leave the device in state where it is @@ -4277,7 +4276,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick) } /* don't start the wd until fw is loaded */ - if (bus->sdiodev->state != BRCMF_STATE_DATA) + if (bus->sdiodev->state != BRCMF_SDIOD_DATA) return; if (wdtick) { @@ -4290,16 +4289,28 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick) dynamically changed or in the first instance */ bus->timer.expires = - jiffies + BRCMF_WD_POLL_MS * HZ / 1000; + jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS); add_timer(&bus->timer); } else { /* Re arm the timer, at last watchdog period */ mod_timer(&bus->timer, - jiffies + BRCMF_WD_POLL_MS * HZ / 1000); + jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS)); } bus->wd_timer_valid = true; bus->save_ms = wdtick; } } + +int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep) +{ + int ret; + + sdio_claim_host(bus->sdiodev->func[1]); + ret = brcmf_sdio_bus_sleep(bus, sleep, false); + sdio_release_host(bus->sdiodev->func[1]); + + return ret; +} + diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h index ec2586a..7328478 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h @@ -155,11 +155,17 @@ /* watchdog polling interval in ms */ #define BRCMF_WD_POLL_MS 10 -/* The state of the bus */ -enum brcmf_sdio_state { - BRCMF_STATE_DOWN, /* Device available, still initialising */ - BRCMF_STATE_DATA, /* Ready for data transfers, DPC enabled */ - BRCMF_STATE_NOMEDIUM /* No medium access to dongle possible */ +/** + * enum brcmf_sdiod_state - the state of the bus. + * + * @BRCMF_SDIOD_DOWN: Device can be accessed, no DPC. + * @BRCMF_SDIOD_DATA: Ready for data transfers, DPC enabled. + * @BRCMF_SDIOD_NOMEDIUM: No medium access to dongle possible. + */ +enum brcmf_sdiod_state { + BRCMF_SDIOD_DOWN, + BRCMF_SDIOD_DATA, + BRCMF_SDIOD_NOMEDIUM }; struct brcmf_sdreg { @@ -169,15 +175,13 @@ struct brcmf_sdreg { }; struct brcmf_sdio; +struct brcmf_sdiod_freezer; struct brcmf_sdio_dev { struct sdio_func *func[SDIO_MAX_FUNCS]; u8 num_funcs; /* Supported funcs on client */ u32 sbwad; /* Save backplane window address */ struct brcmf_sdio *bus; - atomic_t suspend; /* suspend flag */ - bool sleeping; - wait_queue_head_t idle_wait; struct device *dev; struct brcmf_bus *bus_if; struct brcmfmac_sdio_platform_data *pdata; @@ -194,7 +198,8 @@ struct brcmf_sdio_dev { char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; bool wowl_enabled; - enum brcmf_sdio_state state; + enum brcmf_sdiod_state state; + struct brcmf_sdiod_freezer *freezer; }; /* sdio core registers */ @@ -337,6 +342,28 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, /* Issue an abort to the specified function */ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); +void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, + enum brcmf_sdiod_state state); +#ifdef CONFIG_PM_SLEEP +bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev); +void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev); +void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev); +void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev); +#else +static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev) +{ + return false; +} +static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev) +{ +} +static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev) +{ +} +static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev) +{ +} +#endif /* CONFIG_PM_SLEEP */ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); void brcmf_sdio_remove(struct brcmf_sdio *bus); @@ -344,5 +371,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus); void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick); void brcmf_sdio_wowl_config(struct device *dev, bool enabled); +int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep); +void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus); #endif /* BRCMFMAC_SDIO_H */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index eb8584a..c84af1d 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -4668,7 +4668,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, brcms_c_coredisable(wlc_hw); /* Match driver "down" state */ - bcma_core_pci_down(wlc_hw->d11core->bus); + bcma_host_pci_down(wlc_hw->d11core->bus); /* turn off pll and xtal to match driver "down" state */ brcms_b_xtal(wlc_hw, OFF); @@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw) * Configure pci/pcmcia here instead of in brcms_c_attach() * to allow mfg hotswap: down, hotswap (chip power cycle), up. */ - bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core, + bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core, true); /* @@ -4969,12 +4969,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw) */ if (brcms_b_radio_read_hwdisabled(wlc_hw)) { /* put SB PCI in down state again */ - bcma_core_pci_down(wlc_hw->d11core->bus); + bcma_host_pci_down(wlc_hw->d11core->bus); brcms_b_xtal(wlc_hw, OFF); return -ENOMEDIUM; } - bcma_core_pci_up(wlc_hw->d11core->bus); + bcma_host_pci_up(wlc_hw->d11core->bus); /* reset the d11 core */ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); @@ -5171,7 +5171,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw) /* turn off primary xtal and pll */ if (!wlc_hw->noreset) { - bcma_core_pci_down(wlc_hw->d11core->bus); + bcma_host_pci_down(wlc_hw->d11core->bus); brcms_b_xtal(wlc_hw, OFF); } } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c index 084f18f..99dac9b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c @@ -23041,10 +23041,7 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type) else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL) wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1, NPHY_RSSI_SEL_W1); - else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL) - wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1, - NPHY_RSSI_SEL_W2); - else + else /* RADIO_2055_WBRSSI_G2_SEL */ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1, NPHY_RSSI_SEL_W2); if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL) @@ -23053,13 +23050,9 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type) else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL) wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2, NPHY_RSSI_SEL_W1); - else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL) + else /* RADIO_2055_WBRSSI_G1_SEL */ wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2, NPHY_RSSI_SEL_W2); - else - wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2, - NPHY_RSSI_SEL_W2); - wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type); write_phy_reg(pi, 0x91, rfctrlintc_state[0]); diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 4a47c7f..89bc18c 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c @@ -293,7 +293,7 @@ void cw1200_remove_interface(struct ieee80211_hw *dev, } priv->vif = NULL; priv->mode = NL80211_IFTYPE_MONITOR; - memset(priv->mac_addr, 0, ETH_ALEN); + eth_zero_addr(priv->mac_addr); memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); cw1200_free_keys(priv); cw1200_setup_mac(priv); diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c index 0bd5411..d28bd49 100644 --- a/drivers/net/wireless/cw1200/txrx.c +++ b/drivers/net/wireless/cw1200/txrx.c @@ -1429,7 +1429,7 @@ void cw1200_link_id_gc_work(struct work_struct *work) priv->link_id_map &= ~mask; priv->sta_asleep_mask &= ~mask; priv->pspoll_mask &= ~mask; - memset(map_link.mac_addr, 0, ETH_ALEN); + eth_zero_addr(map_link.mac_addr); spin_unlock_bh(&priv->ps_state_lock); reset.link_id = i + 1; wsm_reset(priv, &reset); diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 8bde776..055e11d 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -174,8 +174,8 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && - skb->data[0] & 0x01) - memset(&hdr.addr1, 0xff, ETH_ALEN); + is_multicast_ether_addr(skb->data)) + eth_broadcast_addr(hdr.addr1); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index fd8d83d..c995ace 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -309,7 +309,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap, int i; PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name); - memset(addr, 0xff, ETH_ALEN); + eth_broadcast_addr(addr); resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID); @@ -1015,8 +1015,8 @@ static void prism2_send_mgmt(struct net_device *dev, memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */ } else if (ieee80211_is_ctl(hdr->frame_control)) { /* control:ACK does not have addr2 or addr3 */ - memset(hdr->addr2, 0, ETH_ALEN); - memset(hdr->addr3, 0, ETH_ALEN); + eth_zero_addr(hdr->addr2); + eth_zero_addr(hdr->addr3); } else { memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */ memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */ @@ -1601,7 +1601,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, memcpy(prev_ap, pos, ETH_ALEN); pos++; pos++; pos++; left -= 6; } else - memset(prev_ap, 0, ETH_ALEN); + eth_zero_addr(prev_ap); if (left >= 2) { unsigned int ileft; diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index de7c4ff..7635ac4 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c @@ -442,7 +442,7 @@ static void handle_info_queue_linkstatus(local_info_t *local) } else { netif_carrier_off(local->dev); netif_carrier_off(local->ddev); - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); } wrqu.ap_addr.sa_family = ARPHRD_ETHER; diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 52919ad..01de1a3 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -224,7 +224,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr, if (selected) { if (do_not_remove) - memset(selected->u.wds.remote_addr, 0, ETH_ALEN); + eth_zero_addr(selected->u.wds.remote_addr); else { hostap_remove_interface(selected->dev, rtnl_locked, 0); local->wds_connections--; @@ -798,7 +798,6 @@ static void prism2_tx_timeout(struct net_device *dev) const struct header_ops hostap_80211_ops = { .create = eth_header, - .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, .parse = hostap_80211_header_parse, @@ -1088,7 +1087,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason) ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH, (u8 *) &val, 2); - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); return ret; } diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 5790401..ca25283 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -4,6 +4,7 @@ #include <linux/interrupt.h> #include <linux/wireless.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include <linux/mutex.h> #include <net/iw_handler.h> #include <net/ieee80211_radiotap.h> @@ -85,16 +86,16 @@ struct hfa384x_rx_frame { /* 802.11 */ __le16 frame_control; __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; - u8 addr4[6]; + u8 addr4[ETH_ALEN]; __le16 data_len; /* 802.3 */ - u8 dst_addr[6]; - u8 src_addr[6]; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; __be16 len; /* followed by frame data; max 2304 bytes */ @@ -114,16 +115,16 @@ struct hfa384x_tx_frame { /* 802.11 */ __le16 frame_control; /* parts not used */ __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; /* filled by firmware */ - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; /* filled by firmware */ + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; /* filled by firmware */ - u8 addr4[6]; + u8 addr4[ETH_ALEN]; __le16 data_len; /* 802.3 */ - u8 dst_addr[6]; - u8 src_addr[6]; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; __be16 len; /* followed by frame data; max 2304 bytes */ @@ -156,7 +157,7 @@ struct hfa384x_hostscan_request { } __packed; struct hfa384x_join_request { - u8 bssid[6]; + u8 bssid[ETH_ALEN]; __le16 channel; } __packed; @@ -228,7 +229,7 @@ struct hfa384x_scan_result { __le16 chid; __le16 anl; __le16 sl; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; __le16 beacon_interval; __le16 capability; __le16 ssid_len; @@ -241,7 +242,7 @@ struct hfa384x_hostscan_result { __le16 chid; __le16 anl; __le16 sl; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; __le16 beacon_interval; __le16 capability; __le16 ssid_len; @@ -824,7 +825,7 @@ struct local_info { #define PRISM2_INFO_PENDING_SCANRESULTS 1 int prev_link_status; /* previous received LinkStatus info */ int prev_linkstatus_connected; - u8 preferred_ap[6]; /* use this AP if possible */ + u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */ #ifdef PRISM2_CALLBACK void *callback_data; /* Can be used in callbacks; e.g., allocate diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 6fabea0..08eb229 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -2147,8 +2147,8 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) return; } - memset(priv->bssid, 0, ETH_ALEN); - memset(priv->ieee->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); + eth_zero_addr(priv->ieee->bssid); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); @@ -6956,7 +6956,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev, wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); } else - memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu->ap_addr.sa_data); IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); return 0; @@ -8300,7 +8300,7 @@ static void ipw2100_wx_event_work(struct work_struct *work) priv->status & STATUS_RF_KILL_MASK || ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &priv->bssid, &len)) { - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); } else { /* We now have the BSSID, so can finish setting to the full * associated state */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 67cad9b..39f3e6f 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -1964,7 +1964,7 @@ static void notify_wx_assoc_event(struct ipw_priv *priv) if (priv->status & STATUS_ASSOCIATED) memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); else - memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); } @@ -7400,7 +7400,7 @@ static int ipw_associate_network(struct ipw_priv *priv, memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); if (priv->ieee->iw_mode == IW_MODE_ADHOC) { - memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); + eth_broadcast_addr(priv->assoc_request.dest); priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); } else { memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); @@ -8986,7 +8986,7 @@ static int ipw_wx_get_wap(struct net_device *dev, wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); } else - memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(wrqu->ap_addr.sa_data); IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index eaaeea1..bac60b2 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -1678,7 +1678,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search) lq_sta->total_success > lq_sta->max_success_limit || (!lq_sta->search_better_tbl && lq_sta->flush_timer && flush_interval_passed)) { - D_RATE("LQ: stay is expired %d %d %d\n:", + D_RATE("LQ: stay is expired %d %d %d\n", lq_sta->total_failed, lq_sta->total_success, flush_interval_passed); diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 2c4fa49..8871145 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -4634,7 +4634,7 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) il->vif = NULL; il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; il_teardown_interface(il, vif); - memset(il->bssid, 0, ETH_ALEN); + eth_zero_addr(il->bssid); D_MAC80211("leave\n"); mutex_unlock(&il->mutex); diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index c4d6dd7..234e30f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -1549,7 +1549,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) table.blink1, table.blink2, table.ilink1, table.ilink2, table.bcon_time, table.gp1, table.gp2, table.gp3, table.ucode_ver, - table.hw_ver, table.brd_ver); + table.hw_ver, 0, table.brd_ver); IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(priv, "0x%08X | uPc\n", table.pc); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 97e38d2..0597a9c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -77,8 +77,8 @@ #define IWL3160_UCODE_API_OK 10 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 9 -#define IWL3160_UCODE_API_MIN 9 +#define IWL7260_UCODE_API_MIN 10 +#define IWL3160_UCODE_API_MIN 10 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 2f7fe81..d8dfa6d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -75,7 +75,7 @@ #define IWL8000_UCODE_API_OK 10 /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 9 +#define IWL8000_UCODE_API_MIN 10 /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 78bd41b..53555a0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -431,11 +431,11 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, u32 data1, u32 data2, u32 line, u32 blink1, u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, - u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver, + u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver, u32 brd_ver), TP_ARGS(dev, desc, tsf_low, data1, data2, line, blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, - gp3, ucode_ver, hw_ver, brd_ver), + gp3, major, minor, hw_ver, brd_ver), TP_STRUCT__entry( DEV_ENTRY __field(u32, desc) @@ -451,7 +451,8 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __field(u32, gp1) __field(u32, gp2) __field(u32, gp3) - __field(u32, ucode_ver) + __field(u32, major) + __field(u32, minor) __field(u32, hw_ver) __field(u32, brd_ver) ), @@ -470,21 +471,22 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __entry->gp1 = gp1; __entry->gp2 = gp2; __entry->gp3 = gp3; - __entry->ucode_ver = ucode_ver; + __entry->major = major; + __entry->minor = minor; __entry->hw_ver = hw_ver; __entry->brd_ver = brd_ver; ), TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " - "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X " - "hw 0x%08X brd 0x%08X", + "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X " + "minor 0x%08X hw 0x%08X brd 0x%08X", __get_str(dev), __entry->desc, __entry->tsf_low, __entry->data1, __entry->data2, __entry->line, __entry->blink1, __entry->blink2, __entry->ilink1, __entry->ilink2, __entry->bcon_time, __entry->gp1, __entry->gp2, - __entry->gp3, __entry->ucode_ver, __entry->hw_ver, - __entry->brd_ver) + __entry->gp3, __entry->major, __entry->minor, + __entry->hw_ver, __entry->brd_ver) ); TRACE_EVENT(iwlwifi_dev_ucode_event, diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 996e7f1..141331d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -175,6 +175,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.dbg_dest_tlv); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) kfree(drv->fw.dbg_conf_tlv[i]); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) + kfree(drv->fw.dbg_trigger_tlv[i]); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -293,8 +295,10 @@ struct iwl_firmware_pieces { /* FW debug data parsed for driver usage */ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; - size_t dbg_conf_tlv_len[FW_DBG_MAX]; + struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; }; /* @@ -842,6 +846,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, capa->n_scan_channels = le32_to_cpup((__le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_VERSION: { + __le32 *ptr = (void *)tlv_data; + u32 major, minor; + u8 local_comp; + + if (tlv_len != sizeof(u32) * 3) + goto invalid_tlv_len; + + major = le32_to_cpup(ptr++); + minor = le32_to_cpup(ptr++); + local_comp = le32_to_cpup(ptr); + + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), "%u.%u.%u", + major, minor, local_comp); + break; + } case IWL_UCODE_TLV_FW_DBG_DEST: { struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data; @@ -897,6 +918,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, pieces->dbg_conf_tlv_len[conf->id] = tlv_len; break; } + case IWL_UCODE_TLV_FW_DBG_TRIGGER: { + struct iwl_fw_dbg_trigger_tlv *trigger = + (void *)tlv_data; + u32 trigger_id = le32_to_cpu(trigger->id); + + if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) { + IWL_ERR(drv, + "Skip unknown trigger: %u\n", + trigger->id); + break; + } + + if (pieces->dbg_trigger_tlv[trigger_id]) { + IWL_ERR(drv, + "Ignore duplicate dbg trigger %u\n", + trigger->id); + break; + } + + IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id); + + pieces->dbg_trigger_tlv[trigger_id] = trigger; + pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; + break; + } case IWL_UCODE_TLV_SEC_RT_USNIFFER: usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, @@ -1107,7 +1153,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (err) goto try_again; - api_ver = IWL_UCODE_API(drv->fw.ucode_ver); + if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION) + api_ver = drv->fw.ucode_ver; + else + api_ver = IWL_UCODE_API(drv->fw.ucode_ver); /* * api_ver should match the api version forming part of the @@ -1178,6 +1227,19 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { + if (pieces->dbg_trigger_tlv[i]) { + drv->fw.dbg_trigger_tlv_len[i] = + pieces->dbg_trigger_tlv_len[i]; + drv->fw.dbg_trigger_tlv[i] = + kmemdup(pieces->dbg_trigger_tlv[i], + drv->fw.dbg_trigger_tlv_len[i], + GFP_KERNEL); + if (!drv->fw.dbg_trigger_tlv[i]) + goto out_free_fw; + } + } + /* Now that we can no longer fail, copy information */ /* diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index 919a254..37b38a5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -82,6 +82,8 @@ * sections like this in a single file. * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers * @IWL_FW_ERROR_DUMP_MEM: chunk of memory + * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. + * Structured as &struct iwl_fw_error_dump_trigger_desc. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -94,6 +96,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_TXF = 7, IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_MEM = 9, + IWL_FW_ERROR_DUMP_ERROR_INFO = 10, IWL_FW_ERROR_DUMP_MAX, }; @@ -230,4 +233,47 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) return (void *)(data->data + le32_to_cpu(data->len)); } +/** + * enum iwl_fw_dbg_trigger - triggers available + * + * @FW_DBG_TRIGGER_USER: trigger log collection by user + * This should not be defined as a trigger to the driver, but a value the + * driver should set to indicate that the trigger was initiated by the + * user. + * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts + * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are + * missed. + * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch. + * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a + * command response or a notification. + * @FW_DB_TRIGGER_RESERVED: reserved + * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold. + * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon + * goes below a threshold. + */ +enum iwl_fw_dbg_trigger { + FW_DBG_TRIGGER_INVALID = 0, + FW_DBG_TRIGGER_USER, + FW_DBG_TRIGGER_FW_ASSERT, + FW_DBG_TRIGGER_MISSED_BEACONS, + FW_DBG_TRIGGER_CHANNEL_SWITCH, + FW_DBG_TRIGGER_FW_NOTIF, + FW_DB_TRIGGER_RESERVED, + FW_DBG_TRIGGER_STATS, + FW_DBG_TRIGGER_RSSI, + + /* must be last */ + FW_DBG_TRIGGER_MAX, +}; + +/** + * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition + * @type: %enum iwl_fw_dbg_trigger + * @data: raw data about what happened + */ +struct iwl_fw_error_dump_trigger_desc { + __le32 type; + u8 data[]; +}; + #endif /* __fw_error_dump_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 016d913..5ea3818 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -66,6 +66,7 @@ #define __iwl_fw_file_h__ #include <linux/netdevice.h> +#include <linux/nl80211.h> /* v1/v2 uCode file layout */ struct iwl_ucode_header { @@ -133,8 +134,10 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35, + IWL_UCODE_TLV_FW_VERSION = 36, IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_CONF = 39, + IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, }; struct iwl_ucode_tlv { @@ -156,7 +159,8 @@ struct iwl_tlv_ucode_header { __le32 zero; __le32 magic; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; - __le32 ver; /* major/minor/API/serial */ + /* major/minor/API/serial or major in new format */ + __le32 ver; __le32 build; __le64 ignore; /* @@ -237,7 +241,6 @@ enum iwl_ucode_tlv_flag { * enum iwl_ucode_tlv_api - ucode api * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. - * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. @@ -250,11 +253,12 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported. * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params + * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10 + * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), - IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), @@ -263,6 +267,8 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17), IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18), + IWL_UCODE_TLV_API_STATS_V10 = BIT(19), + IWL_UCODE_TLV_API_NEW_VERSION = BIT(20), }; /** @@ -284,6 +290,8 @@ enum iwl_ucode_tlv_api { * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command + * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics + * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running */ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), @@ -298,6 +306,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13), IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22), + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28), }; /* The default calibrate table size if not specified by firmware file */ @@ -450,44 +460,129 @@ struct iwl_fw_dbg_conf_hcmd { } __packed; /** - * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration + * enum iwl_fw_dbg_trigger_mode - triggers functionalities * - * @enabled: is this trigger enabled - * @reserved: - * @len: length, in bytes, of the %trigger field - * @trigger: pointer to a trigger struct + * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism + * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data */ -struct iwl_fw_dbg_trigger { - u8 enabled; - u8 reserved; - u8 len; - u8 trigger[0]; +enum iwl_fw_dbg_trigger_mode { + IWL_FW_DBG_TRIGGER_START = BIT(0), + IWL_FW_DBG_TRIGGER_STOP = BIT(1), +}; + +/** + * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger + * @IWL_FW_DBG_CONF_VIF_ANY: any vif type + * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode + * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode + * @IWL_FW_DBG_CONF_VIF_AP: AP mode + * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode + * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode + * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device + */ +enum iwl_fw_dbg_trigger_vif_type { + IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED, + IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC, + IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION, + IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP, + IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT, + IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO, + IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE, +}; + +/** + * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger + * @id: %enum iwl_fw_dbg_trigger + * @vif_type: %enum iwl_fw_dbg_trigger_vif_type + * @stop_conf_ids: bitmap of configurations this trigger relates to. + * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding + * to the currently running configuration is set, the data should be + * collected. + * @stop_delay: how many milliseconds to wait before collecting the data + * after the STOP trigger fires. + * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both + * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what + * configuration should be applied when the triggers kicks in. + * @occurrences: number of occurrences. 0 means the trigger will never fire. + */ +struct iwl_fw_dbg_trigger_tlv { + __le32 id; + __le32 vif_type; + __le32 stop_conf_ids; + __le32 stop_delay; + u8 mode; + u8 start_conf_id; + __le16 occurrences; + __le32 reserved[2]; + + u8 data[0]; } __packed; +#define FW_DBG_START_FROM_ALIVE 0 +#define FW_DBG_CONF_MAX 32 +#define FW_DBG_INVALID 0xff + /** - * enum iwl_fw_dbg_conf - configurations available - * - * @FW_DBG_CUSTOM: take this configuration from alive - * Note that the trigger is NO-OP for this configuration + * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons + * @stop_consec_missed_bcon: stop recording if threshold is crossed. + * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed. + * @start_consec_missed_bcon: start recording if threshold is crossed. + * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed. + * @reserved1: reserved + * @reserved2: reserved + */ +struct iwl_fw_dbg_trigger_missed_bcon { + __le32 stop_consec_missed_bcon; + __le32 stop_consec_missed_bcon_since_rx; + __le32 reserved2[2]; + __le32 start_consec_missed_bcon; + __le32 start_consec_missed_bcon_since_rx; + __le32 reserved1[2]; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. + * cmds: the list of commands to trigger the collection on */ -enum iwl_fw_dbg_conf { - FW_DBG_CUSTOM = 0, +struct iwl_fw_dbg_trigger_cmd { + struct cmd { + u8 cmd_id; + u8 group_id; + } __packed cmds[16]; +} __packed; - /* must be last */ - FW_DBG_MAX, - FW_DBG_INVALID = 0xff, -}; +/** + * iwl_fw_dbg_trigger_stats - configures trigger for statistics + * @stop_offset: the offset of the value to be monitored + * @stop_threshold: the threshold above which to collect + * @start_offset: the offset of the value to be monitored + * @start_threshold: the threshold above which to start recording + */ +struct iwl_fw_dbg_trigger_stats { + __le32 stop_offset; + __le32 stop_threshold; + __le32 start_offset; + __le32 start_threshold; +} __packed; /** - * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration - * - * @id: %enum iwl_fw_dbg_conf + * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI + * @rssi: RSSI value to trigger at + */ +struct iwl_fw_dbg_trigger_low_rssi { + __le32 rssi; +} __packed; + +/** + * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. + * @id: conf id * @usniffer: should the uSniffer image be used * @num_of_hcmds: how many HCMDs to send are present here * @hcmd: a variable length host command to be sent to apply the configuration. * If there is more than one HCMD to send, they will appear one after the * other and be sent in the order that they appear in. - * This parses IWL_UCODE_TLV_FW_DBG_CONF + * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to + * %FW_DBG_CONF_MAX configuration per run. */ struct iwl_fw_dbg_conf_tlv { u8 id; @@ -495,8 +590,6 @@ struct iwl_fw_dbg_conf_tlv { u8 reserved; u8 num_of_hcmds; struct iwl_fw_dbg_conf_hcmd hcmd; - - /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */ } __packed; #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index ffd785c..cf75baf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -68,6 +68,7 @@ #include <net/mac80211.h> #include "iwl-fw-file.h" +#include "iwl-fw-error-dump.h" /** * enum iwl_ucode_type @@ -157,6 +158,8 @@ struct iwl_fw_cscheme_list { * @dbg_dest_tlv: points to the destination TLV for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries + * @dbg_trigger_tlv: array of pointers to triggers TLVs + * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv */ struct iwl_fw { @@ -186,9 +189,10 @@ struct iwl_fw { u32 sdio_adma_addr; struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; - size_t dbg_conf_tlv_len[FW_DBG_MAX]; - + struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; }; @@ -206,46 +210,29 @@ static inline const char *get_fw_dbg_mode_string(int mode) } } -static inline const struct iwl_fw_dbg_trigger * -iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id) +static inline bool +iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) { const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; - u8 *ptr; - int i; if (!conf_tlv) - return NULL; - - ptr = (void *)&conf_tlv->hcmd; - for (i = 0; i < conf_tlv->num_of_hcmds; i++) { - ptr += sizeof(conf_tlv->hcmd); - ptr += le16_to_cpu(conf_tlv->hcmd.len); - } - - return (const struct iwl_fw_dbg_trigger *)ptr; -} - -static inline bool -iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id) -{ - const struct iwl_fw_dbg_trigger *trigger = - iwl_fw_dbg_conf_get_trigger(fw, id); - - if (!trigger) return false; - return trigger->enabled; + return conf_tlv->usniffer; } -static inline bool -iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) -{ - const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; +#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ + void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ + unlikely(__dbg_trigger); \ +}) - if (!conf_tlv) - return false; +static inline struct iwl_fw_dbg_trigger_tlv* +iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id) +{ + if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv))) + return NULL; - return conf_tlv->usniffer; + return fw->dbg_trigger_tlv[id]; } #endif /* __iwl_fw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c index d4fb5ca..e893c6e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c @@ -72,7 +72,7 @@ #include "iwl-trans.h" #define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ -#define IWL_NUM_PAPD_CH_GROUPS 7 +#define IWL_NUM_PAPD_CH_GROUPS 9 #define IWL_NUM_TXP_CH_GROUPS 9 struct iwl_phy_db_entry { diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 6221e4d..6095088 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -370,7 +370,6 @@ enum secure_load_status_reg { #define MON_BUFF_CYCLE_CNT (0xa03c48) #define DBGC_IN_SAMPLE (0xa03c00) -#define DBGC_OUT_CTRL (0xa03c0c) /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index a96bd8d..542a681 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -595,6 +595,7 @@ enum iwl_d0i3_mode { * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) * @dbg_dest_tlv: points to the destination TLV for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug + * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv */ struct iwl_trans { @@ -628,7 +629,8 @@ struct iwl_trans { u64 dflt_pwr_limit; const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; + const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; u8 dbg_dest_reg_num; enum iwl_d0i3_mode d0i3_mode; diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 1ec4d55..ce99572 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -611,7 +611,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); - if (IWL_MVM_BT_COEX_CORUNNING) + if (iwl_mvm_bt_is_plcr_supported(mvm)) bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); if (IWL_MVM_BT_COEX_MPLUT) { @@ -1234,7 +1234,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd); - if (!IWL_MVM_BT_COEX_CORUNNING) + if (!iwl_mvm_bt_is_plcr_supported(mvm)) return 0; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d530ef3..9717ee6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -619,7 +619,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) if (IWL_MVM_BT_COEX_SYNC2SCO) bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); - if (IWL_MVM_BT_COEX_CORUNNING) { + if (iwl_mvm_bt_is_plcr_supported(mvm)) { bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | BT_VALID_CORUN_LUT_40); bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); @@ -1167,16 +1167,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, return lut_type != BT_COEX_LOOSE_LUT; } -bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant) -{ - u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - return ag < BT_HIGH_TRAFFIC; -} - bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) { u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - return ag == BT_OFF; + return ag < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, @@ -1213,7 +1207,7 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; - if (!IWL_MVM_BT_COEX_CORUNNING) + if (!iwl_mvm_bt_is_plcr_supported(mvm)) return 0; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 14e8fd6..9bdfa95 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1876,25 +1876,28 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) if (mvm->net_detect) { iwl_mvm_query_netdetect_reasons(mvm, vif); + /* has unlocked the mutex, so skip that */ + goto out; } else { keep = iwl_mvm_query_wakeup_reasons(mvm, vif); #ifdef CONFIG_IWLWIFI_DEBUGFS if (keep) mvm->keep_vif = vif; + /* has unlocked the mutex, so skip that */ + goto out_iterate; #endif } - /* has unlocked the mutex, so skip that */ - goto out; out_unlock: mutex_unlock(&mvm->mutex); - out: +out_iterate: if (!test) ieee80211_iterate_active_interfaces_rtnl(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); +out: /* return 1 to reconfigure the device */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 5fe1459..5f37eab 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -545,6 +545,57 @@ static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif, return ret ? count : -EINVAL; } +static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_phy_ctxt *phy_ctxt; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + rcu_read_lock(); + + chanctx_conf = rcu_dereference(vif->chanctx_conf); + /* make sure the channel context is assigned */ + if (!chanctx_conf) { + rcu_read_unlock(); + mutex_unlock(&mvm->mutex); + return -EINVAL; + } + + phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; + rcu_read_unlock(); + + mvm->dbgfs_rx_phyinfo = value; + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, + chanctx_conf->rx_chains_static, + chanctx_conf->rx_chains_dynamic); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[8]; + + snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo); + + return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -560,6 +611,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -575,7 +627,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return; mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); - mvmvif->mvm = mvm; if (!mvmvif->dbgfs_dir) { IWL_ERR(mvm, "Failed to create debugfs directory under %s\n", @@ -595,6 +646,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 82c09d8..8cbe77d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -942,7 +942,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; - enum iwl_fw_dbg_conf conf; + int conf; char buf[8]; const size_t bufsz = sizeof(buf); int pos = 0; @@ -966,7 +966,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, if (ret) return ret; - if (WARN_ON(conf_id >= FW_DBG_MAX)) + if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) return -EINVAL; mutex_lock(&mvm->mutex); @@ -985,7 +985,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (ret) return ret; - iwl_mvm_fw_dbg_collect(mvm); + iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h index c405cda..aabaedd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h @@ -70,6 +70,7 @@ #define MAC_INDEX_AUX 4 #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX +#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1) enum iwl_ac { AC_BK, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index cfc0e65..a5fbbd6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -70,55 +70,10 @@ /* Scan Commands, Responses, Notifications */ -/* Masks for iwl_scan_channel.type flags */ -#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0) -#define SCAN_CHANNEL_NARROW_BAND BIT(22) - /* Max number of IEs for direct SSID scans in a command */ #define PROBE_OPTION_MAX 20 /** - * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table - * @channel: band is selected by iwl_scan_cmd "flags" field - * @tx_gain: gain for analog radio - * @dsp_atten: gain for DSP - * @active_dwell: dwell time for active scan in TU, typically 5-50 - * @passive_dwell: dwell time for passive scan in TU, typically 20-500 - * @type: type is broken down to these bits: - * bit 0: 0 = passive, 1 = active - * bits 1-20: SSID direct bit map. If any of these bits is set then - * the corresponding SSID IE is transmitted in probe request - * (bit i adds IE in position i to the probe request) - * bit 22: channel width, 0 = regular, 1 = TGj narrow channel - * - * @iteration_count: - * @iteration_interval: - * This struct is used once for each channel in the scan list. - * Each channel can independently select: - * 1) SSID for directed active scans - * 2) Txpower setting (for rate specified within Tx command) - * 3) How long to stay on-channel (behavior may be modified by quiet_time, - * quiet_plcp_th, good_CRC_th) - * - * To avoid uCode errors, make sure the following are true (see comments - * under struct iwl_scan_cmd about max_out_time and quiet_time): - * 1) If using passive_dwell (i.e. passive_dwell != 0): - * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) - * 2) quiet_time <= active_dwell - * 3) If restricting off-channel time (i.e. max_out_time !=0): - * passive_dwell < max_out_time - * active_dwell < max_out_time - */ -struct iwl_scan_channel { - __le32 type; - __le16 channel; - __le16 iteration_count; - __le32 iteration_interval; - __le16 active_dwell; - __le16 passive_dwell; -} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */ - -/** * struct iwl_ssid_ie - directed scan network information element * * Up to 20 of these may appear in REPLY_SCAN_CMD, @@ -132,152 +87,6 @@ struct iwl_ssid_ie { u8 ssid[IEEE80211_MAX_SSID_LEN]; } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ -/** - * iwl_scan_flags - masks for scan command flags - *@SCAN_FLAGS_PERIODIC_SCAN: - *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX: - *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND: - *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND: - *@SCAN_FLAGS_FRAGMENTED_SCAN: - *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active - * in the past hour, even if they are marked as passive. - */ -enum iwl_scan_flags { - SCAN_FLAGS_PERIODIC_SCAN = BIT(0), - SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = BIT(1), - SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2), - SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3), - SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4), - SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5), -}; - -/** - * enum iwl_scan_type - Scan types for scan command - * @SCAN_TYPE_FORCED: - * @SCAN_TYPE_BACKGROUND: - * @SCAN_TYPE_OS: - * @SCAN_TYPE_ROAMING: - * @SCAN_TYPE_ACTION: - * @SCAN_TYPE_DISCOVERY: - * @SCAN_TYPE_DISCOVERY_FORCED: - */ -enum iwl_scan_type { - SCAN_TYPE_FORCED = 0, - SCAN_TYPE_BACKGROUND = 1, - SCAN_TYPE_OS = 2, - SCAN_TYPE_ROAMING = 3, - SCAN_TYPE_ACTION = 4, - SCAN_TYPE_DISCOVERY = 5, - SCAN_TYPE_DISCOVERY_FORCED = 6, -}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */ - -/** - * struct iwl_scan_cmd - scan request command - * ( SCAN_REQUEST_CMD = 0x80 ) - * @len: command length in bytes - * @scan_flags: scan flags from SCAN_FLAGS_* - * @channel_count: num of channels in channel list - * (1 - ucode_capa.n_scan_channels) - * @quiet_time: in msecs, dwell this time for active scan on quiet channels - * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than - * this number of packets were received (typically 1) - * @passive2active: is auto switching from passive to active during scan allowed - * @rxchain_sel_flags: RXON_RX_CHAIN_* - * @max_out_time: in TUs, max out of serving channel time - * @suspend_time: how long to pause scan when returning to service channel: - * bits 0-19: beacon interal in TUs (suspend before executing) - * bits 20-23: reserved - * bits 24-31: number of beacons (suspend between channels) - * @rxon_flags: RXON_FLG_* - * @filter_flags: RXON_FILTER_* - * @tx_cmd: for active scans (zero for passive), w/o payload, - * no RS so specify TX rate - * @direct_scan: direct scan SSIDs - * @type: one of SCAN_TYPE_* - * @repeats: how many time to repeat the scan - */ -struct iwl_scan_cmd { - __le16 len; - u8 scan_flags; - u8 channel_count; - __le16 quiet_time; - __le16 quiet_plcp_th; - __le16 passive2active; - __le16 rxchain_sel_flags; - __le32 max_out_time; - __le32 suspend_time; - /* RX_ON_FLAGS_API_S_VER_1 */ - __le32 rxon_flags; - __le32 filter_flags; - struct iwl_tx_cmd tx_cmd; - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; - __le32 type; - __le32 repeats; - - /* - * Probe request frame, followed by channel list. - * - * Size of probe request frame is specified by byte count in tx_cmd. - * Channel list follows immediately after probe request frame. - * Number of channels in list is specified by channel_count. - * Each channel in list is of type: - * - * struct iwl_scan_channel channels[0]; - * - * NOTE: Only one band of channels can be scanned per pass. You - * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait - * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) - * before requesting another scan. - */ - u8 data[0]; -} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */ - -/* Response to scan request contains only status with one of these values */ -#define SCAN_RESPONSE_OK 0x1 -#define SCAN_RESPONSE_ERROR 0x2 - -/* - * SCAN_ABORT_CMD = 0x81 - * When scan abort is requested, the command has no fields except the common - * header. The response contains only a status with one of these values. - */ -#define SCAN_ABORT_POSSIBLE 0x1 -#define SCAN_ABORT_IGNORED 0x2 /* no pending scans */ - -/* TODO: complete documentation */ -#define SCAN_OWNER_STATUS 0x1 -#define MEASURE_OWNER_STATUS 0x2 - -/** - * struct iwl_scan_start_notif - notifies start of scan in the device - * ( SCAN_START_NOTIFICATION = 0x82 ) - * @tsf_low: TSF timer (lower half) in usecs - * @tsf_high: TSF timer (higher half) in usecs - * @beacon_timer: structured as follows: - * bits 0:19 - beacon interval in usecs - * bits 20:23 - reserved (0) - * bits 24:31 - number of beacons - * @channel: which channel is scanned - * @band: 0 for 5.2 GHz, 1 for 2.4 GHz - * @status: one of *_OWNER_STATUS - */ -struct iwl_scan_start_notif { - __le32 tsf_low; - __le32 tsf_high; - __le32 beacon_timer; - u8 channel; - u8 band; - u8 reserved[2]; - __le32 status; -} __packed; /* SCAN_START_NTF_API_S_VER_1 */ - -/* scan results probe_status first bit indicates success */ -#define SCAN_PROBE_STATUS_OK 0 -#define SCAN_PROBE_STATUS_TX_FAILED BIT(0) -/* error statuses combined with TX_FAILED */ -#define SCAN_PROBE_STATUS_FAIL_TTL BIT(1) -#define SCAN_PROBE_STATUS_FAIL_BT BIT(2) - /* How many statistics are gathered for each channel */ #define SCAN_RESULTS_STATISTICS 1 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h index 928168b..709e28d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h @@ -65,6 +65,7 @@ #ifndef __fw_api_stats_h__ #define __fw_api_stats_h__ +#include "fw-api-mac.h" struct mvm_statistics_dbg { __le32 burst_check; @@ -218,7 +219,7 @@ struct mvm_statistics_bt_activity { __le32 lo_priority_rx_denied_cnt; } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ -struct mvm_statistics_general { +struct mvm_statistics_general_v5 { __le32 radio_temperature; __le32 radio_voltage; struct mvm_statistics_dbg dbg; @@ -244,6 +245,39 @@ struct mvm_statistics_general { struct mvm_statistics_bt_activity bt_activity; } __packed; /* STATISTICS_GENERAL_API_S_VER_5 */ +struct mvm_statistics_general_v8 { + __le32 radio_temperature; + __le32 radio_voltage; + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div slow_div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; + __le32 beacon_filtered; + __le32 missed_beacons; + __s8 beacon_filter_average_energy; + __s8 beacon_filter_reason; + __s8 beacon_filter_current_energy; + __s8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; + __le64 rx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le64 tx_time; + __le32 beacon_counter[NUM_MAC_INDEX]; + u8 beacon_average_energy[NUM_MAC_INDEX]; + u8 reserved[4 - (NUM_MAC_INDEX % 4)]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ + struct mvm_statistics_rx { struct mvm_statistics_rx_phy ofdm; struct mvm_statistics_rx_phy cck; @@ -256,22 +290,28 @@ struct mvm_statistics_rx { * * By default, uCode issues this notification after receiving a beacon * while associated. To disable this behavior, set DISABLE_NOTIF flag in the - * REPLY_STATISTICS_CMD 0x9c, above. - * - * Statistics counters continue to increment beacon after beacon, but are - * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD - * 0x9c with CLEAR_STATS bit set (see above). - * - * uCode also issues this notification during scans. uCode clears statistics - * appropriately so that each notification contains statistics for only the - * one channel that has just been scanned. + * STATISTICS_CMD (0x9c), below. */ -struct iwl_notif_statistics { +struct iwl_notif_statistics_v8 { __le32 flag; struct mvm_statistics_rx rx; struct mvm_statistics_tx tx; - struct mvm_statistics_general general; + struct mvm_statistics_general_v5 general; } __packed; /* STATISTICS_NTFY_API_S_VER_8 */ +struct iwl_notif_statistics_v10 { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general_v8 general; +} __packed; /* STATISTICS_NTFY_API_S_VER_10 */ + +#define IWL_STATISTICS_FLG_CLEAR 0x1 +#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2 + +struct iwl_statistics_cmd { + __le32 flags; +} __packed; /* STATISTICS_CMD_API_S_VER_1 */ + #endif /* __fw_api_stats_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index b56154f..d95b472 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -192,6 +192,7 @@ enum { BEACON_NOTIFICATION = 0x90, BEACON_TEMPLATE_CMD = 0x91, TX_ANT_CONFIGURATION_CMD = 0x98, + STATISTICS_CMD = 0x9c, STATISTICS_NOTIFICATION = 0x9d, EOSP_NOTIFICATION = 0x9e, REDUCE_TX_POWER_CMD = 0x9f, @@ -431,7 +432,7 @@ enum { #define IWL_ALIVE_FLG_RFKILL BIT(0) -struct mvm_alive_resp { +struct mvm_alive_resp_ver1 { __le16 status; __le16 flags; u8 ucode_minor; @@ -482,6 +483,30 @@ struct mvm_alive_resp_ver2 { __le32 dbg_print_buff_addr; } __packed; /* ALIVE_RES_API_S_VER_2 */ +struct mvm_alive_resp { + __le16 status; + __le16 flags; + __le32 ucode_minor; + __le32 ucode_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ + __le32 st_fwrd_addr; /* pointer to Store and forward */ + __le32 st_fwrd_size; + __le32 umac_minor; /* UMAC version: minor */ + __le32 umac_major; /* UMAC version: major */ + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* ALIVE_RES_API_S_VER_3 */ + /* Error response/notification */ enum { FW_ERR_UNKNOWN_CMD = 0x0, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index ca38e98..a81da4c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -112,25 +112,27 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; - struct mvm_alive_resp *palive; + struct mvm_alive_resp_ver1 *palive1; struct mvm_alive_resp_ver2 *palive2; + struct mvm_alive_resp *palive; - if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { - palive = (void *)pkt->data; + if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { + palive1 = (void *)pkt->data; mvm->support_umac_log = false; mvm->error_event_table = - le32_to_cpu(palive->error_event_table_ptr); - mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); + le32_to_cpu(palive1->error_event_table_ptr); + mvm->log_event_table = + le32_to_cpu(palive1->log_event_table_ptr); + alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); - alive_data->valid = le16_to_cpu(palive->status) == + alive_data->valid = le16_to_cpu(palive1->status) == IWL_ALIVE_STATUS_OK; IWL_DEBUG_FW(mvm, "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive->status), palive->ver_type, - palive->ver_subtype, palive->flags); - } else { + le16_to_cpu(palive1->status), palive1->ver_type, + palive1->ver_subtype, palive1->flags); + } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { palive2 = (void *)pkt->data; mvm->error_event_table = @@ -156,6 +158,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, IWL_DEBUG_FW(mvm, "UMAC version: Major - 0x%x, Minor - 0x%x\n", palive2->umac_major, palive2->umac_minor); + } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { + palive = (void *)pkt->data; + + mvm->error_event_table = + le32_to_cpu(palive->error_event_table_ptr); + mvm->log_event_table = + le32_to_cpu(palive->log_event_table_ptr); + alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); + mvm->umac_error_event_table = + le32_to_cpu(palive->error_info_addr); + mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); + mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); + + alive_data->valid = le16_to_cpu(palive->status) == + IWL_ALIVE_STATUS_OK; + if (mvm->umac_error_event_table) + mvm->support_umac_log = true; + + IWL_DEBUG_FW(mvm, + "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", + le16_to_cpu(palive->status), palive->ver_type, + palive->ver_subtype, palive->flags); + + IWL_DEBUG_FW(mvm, + "UMAC version: Major - 0x%x, Minor - 0x%x\n", + le32_to_cpu(palive->umac_major), + le32_to_cpu(palive->umac_minor)); } return true; @@ -188,8 +217,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_sf_region st_fwrd_space; if (ucode_type == IWL_UCODE_REGULAR && - iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) && - iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM)) + iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); else fw = iwl_get_ucode_image(mvm, ucode_type); @@ -451,20 +479,80 @@ exit: iwl_free_resp(&cmd); } -void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm) +int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, + struct iwl_mvm_dump_desc *desc, + unsigned int delay) { + if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) + return -EBUSY; + + if (WARN_ON(mvm->fw_dump_desc)) + iwl_mvm_free_fw_dump_desc(mvm); + + IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", + le32_to_cpu(desc->trig_desc.type)); + + mvm->fw_dump_desc = desc; + /* stop recording */ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); } else { iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); + /* wait before we collect the data till the DBGC stop */ + udelay(100); } - schedule_work(&mvm->fw_error_dump_wk); + queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); + + return 0; } -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id) +int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, unsigned int delay) +{ + struct iwl_mvm_dump_desc *desc; + + desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + desc->len = len; + desc->trig_desc.type = cpu_to_le32(trig); + memcpy(desc->trig_desc.data, str, len); + + return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay); +} + +int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *str, size_t len) +{ + unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + u16 occurrences = le16_to_cpu(trigger->occurrences); + int ret; + + if (!occurrences) + return 0; + + ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), str, + len, delay); + if (ret) + return ret; + + trigger->occurrences = cpu_to_le16(occurrences - 1); + return 0; +} + +static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) +{ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); + else + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); +} + +int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) { u8 *ptr; int ret; @@ -474,6 +562,14 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id) "Invalid configuration %d\n", conf_id)) return -EINVAL; + /* EARLY START - firmware's configuration is hard coded */ + if ((!mvm->fw->dbg_conf_tlv[conf_id] || + !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + conf_id == FW_DBG_START_FROM_ALIVE) { + iwl_mvm_restart_early_start(mvm); + return 0; + } + if (!mvm->fw->dbg_conf_tlv[conf_id]) return -EINVAL; @@ -583,7 +679,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); mvm->fw_dbg_conf = FW_DBG_INVALID; - iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM); + /* if we have a destination, assume EARLY START */ + if (mvm->fw->dbg_dest_tlv) + mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; + iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 7bdc622..581b3b8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -244,6 +244,7 @@ static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, struct ieee80211_vif *exclude_vif) { + u8 sta_id; struct iwl_mvm_hw_queues_iface_iterator_data data = { .exclude_vif = exclude_vif, .used_hw_queues = @@ -264,6 +265,13 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, iwl_mvm_mac_sta_hw_queues_iter, &data); + /* + * Some TDLS stations may be removed but are in the process of being + * drained. Don't touch their queues. + */ + for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) + data.used_hw_queues |= mvm->tfd_drained[sta_id]; + return data.used_hw_queues; } @@ -1367,10 +1375,18 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, { struct iwl_missed_beacons_notif *missed_beacons = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; + struct iwl_fw_dbg_trigger_tlv *trigger; + u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; + u32 rx_missed_bcon, rx_missed_bcon_since_rx; if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) return; + rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons); + rx_missed_bcon_since_rx = + le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. @@ -1378,6 +1394,26 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, + FW_DBG_TRIGGER_MISSED_BEACONS)) + return; + + trigger = iwl_fw_dbg_get_trigger(mvm->fw, + FW_DBG_TRIGGER_MISSED_BEACONS); + bcon_trig = (void *)trigger->data; + stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); + stop_trig_missed_bcon_since_rx = + le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); + + /* TODO: implement start trigger */ + + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + return; + + if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || + rx_missed_bcon >= stop_trig_missed_bcon) + iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL, 0); } int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1ff7ec0..2042554 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -339,13 +339,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) !iwlwifi_mod_params.sw_crypto) hw->flags |= IEEE80211_HW_MFP_CAPABLE; - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN || - mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { - hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; - hw->wiphy->features |= - NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; - } + hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; + hw->wiphy->features |= + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); @@ -889,12 +886,23 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, iwl_trans_release_nic_access(mvm->trans, &flags); } +void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) +{ + if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert || + !mvm->fw_dump_desc) + return; + + kfree(mvm->fw_dump_desc); + mvm->fw_dump_desc = NULL; +} + void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_mem *dump_mem; + struct iwl_fw_error_dump_trigger_desc *dump_trig; struct iwl_mvm_dump_ptrs *fw_error_dump; u32 sram_len, sram_ofs; u32 file_len, fifo_data_len = 0; @@ -964,6 +972,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) fifo_data_len + sizeof(*dump_info); + if (mvm->fw_dump_desc) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) + + mvm->fw_dump_desc->len; + /* Make room for the SMEM, if it exists */ if (smem_len) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; @@ -975,6 +987,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_file = vzalloc(file_len); if (!dump_file) { kfree(fw_error_dump); + iwl_mvm_free_fw_dump_desc(mvm); return; } @@ -1003,6 +1016,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) iwl_mvm_dump_fifos(mvm, &dump_data); + if (mvm->fw_dump_desc) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_trig) + + mvm->fw_dump_desc->len); + dump_trig = (void *)dump_data->data; + memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, + sizeof(*dump_trig) + mvm->fw_dump_desc->len); + + /* now we can free this copy */ + iwl_mvm_free_fw_dump_desc(mvm); + dump_data = iwl_fw_error_next_data(dump_data); + } + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -1041,16 +1067,26 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); + + clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); } +struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { + .trig_desc = { + .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), + }, +}; + static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { /* clear the D3 reconfig, we only need it to avoid dumping a * firmware coredump on reconfiguration, we shouldn't do that * on D3->D0 transition */ - if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) + if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { + mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; iwl_mvm_fw_error_dump(mvm); + } /* cleanup all stale references (scan, roc), but keep the * ucode_down ref until reconfig is complete @@ -1091,6 +1127,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->vif_count = 0; mvm->rx_ba_sessions = 0; + mvm->fw_dbg_conf = FW_DBG_INVALID; + + /* keep statistics ticking */ + iwl_mvm_accu_radio_stats(mvm); } int __iwl_mvm_mac_start(struct iwl_mvm *mvm) @@ -1213,6 +1253,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); + /* firmware counters are obviously reset now, but we shouldn't + * partially track so also clear the fw_reset_accu counters. + */ + memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); + /* * Disallow low power states when the FW is down by taking * the UCODE_DOWN ref. in case of ongoing hw restart the @@ -1252,7 +1297,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->async_handlers_wk); - flush_work(&mvm->fw_error_dump_wk); + cancel_delayed_work_sync(&mvm->fw_dump_wk); + iwl_mvm_free_fw_dump_desc(mvm); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); @@ -1300,6 +1346,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + mvmvif->mvm = mvm; + /* * make sure D0i3 exit is completed, otherwise a target access * during tx queue configuration could be done when still in @@ -1317,6 +1365,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + /* make sure that beacon statistics don't go backwards with FW reset */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) @@ -1810,6 +1863,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + memset(&mvmvif->beacon_stats, 0, + sizeof(mvmvif->beacon_stats)); + /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, NULL); if (ret) { @@ -2196,10 +2254,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) ret = iwl_mvm_scan_umac(mvm, vif, hw_req); - else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) - ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req); else - ret = iwl_mvm_scan_request(mvm, vif, req); + ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req); if (ret) iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); @@ -2527,13 +2583,7 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - /* Newest FW fixes sched scan while connected on another interface */ - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) { - if (!vif->bss_conf.idle) { - ret = -EBUSY; - goto out; - } - } else if (!iwl_mvm_is_idle(mvm)) { + if (!vif->bss_conf.idle) { ret = -EBUSY; goto out; } @@ -3433,6 +3483,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); + iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH, + NULL, 0); + switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = @@ -3581,6 +3634,95 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, } } +static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + memset(survey, 0, sizeof(*survey)); + + /* only support global statistics right now */ + if (idx != 0) + return -ENOENT; + + if (!(mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + return -ENOENT; + + mutex_lock(&mvm->mutex); + + if (mvm->ucode_loaded) { + ret = iwl_mvm_request_statistics(mvm, false); + if (ret) + goto out; + } + + survey->filled = SURVEY_INFO_TIME | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_SCAN; + survey->time = mvm->accu_radio_stats.on_time_rf + + mvm->radio_stats.on_time_rf; + do_div(survey->time, USEC_PER_MSEC); + + survey->time_rx = mvm->accu_radio_stats.rx_time + + mvm->radio_stats.rx_time; + do_div(survey->time_rx, USEC_PER_MSEC); + + survey->time_tx = mvm->accu_radio_stats.tx_time + + mvm->radio_stats.tx_time; + do_div(survey->time_tx, USEC_PER_MSEC); + + survey->time_scan = mvm->accu_radio_stats.on_time_scan + + mvm->radio_stats.on_time_scan; + do_div(survey->time_scan, USEC_PER_MSEC); + + out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (!(mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + return; + + /* if beacon filtering isn't on mac80211 does it anyway */ + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) + return; + + if (!vif->bss_conf.assoc) + return; + + mutex_lock(&mvm->mutex); + + if (mvmvif->ap_sta_id != mvmsta->sta_id) + goto unlock; + + if (iwl_mvm_request_statistics(mvm, false)) + goto unlock; + + sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + + mvmvif->beacon_stats.accu_num_beacons; + sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + if (mvmvif->beacon_stats.avg_signal) { + /* firmware only reports a value after RXing a few beacons */ + sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; + sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + } + unlock: + mutex_unlock(&mvm->mutex); +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .ampdu_action = iwl_mvm_mac_ampdu_action, @@ -3647,4 +3789,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { #endif .set_default_unicast_key = iwl_mvm_set_default_unicast_key, #endif + .get_survey = iwl_mvm_mac_get_survey, + .sta_statistics = iwl_mvm_mac_sta_statistics, }; diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 6c69d05..e10172d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -75,6 +75,7 @@ #include "iwl-trans.h" #include "iwl-notif-wait.h" #include "iwl-eeprom-parse.h" +#include "iwl-fw-file.h" #include "sta.h" #include "fw-api.h" #include "constants.h" @@ -145,6 +146,19 @@ struct iwl_mvm_dump_ptrs { u32 op_mode_len; }; +/** + * struct iwl_mvm_dump_desc - describes the dump + * @len: length of trig_desc->data + * @trig_desc: the description of the dump + */ +struct iwl_mvm_dump_desc { + size_t len; + /* must be last */ + struct iwl_fw_error_dump_trigger_desc trig_desc; +}; + +extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; + struct iwl_mvm_phy_ctxt { u16 id; u16 color; @@ -337,8 +351,12 @@ struct iwl_mvm_vif_bf_data { * @beacon_skb: the skb used to hold the AP/GO beacon template * @smps_requests: the SMPS requests of differents parts of the driver, * combined on update to yield the overall request to mac80211. + * @beacon_stats: beacon statistics, containing the # of received beacons, + * # of received beacons accumulated over FW restart, and the current + * average signal of beacons retrieved from the firmware */ struct iwl_mvm_vif { + struct iwl_mvm *mvm; u16 id; u16 color; u8 ap_sta_id; @@ -354,6 +372,11 @@ struct iwl_mvm_vif { bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; + struct { + u32 num_beacons, accu_num_beacons; + u8 avg_signal; + } beacon_stats; + u32 ap_beacon_time; enum iwl_tsf_id tsf_id; @@ -396,7 +419,6 @@ struct iwl_mvm_vif { #endif #ifdef CONFIG_IWLWIFI_DEBUGFS - struct iwl_mvm *mvm; struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; struct iwl_dbgfs_pm dbgfs_pm; @@ -593,6 +615,13 @@ struct iwl_mvm { struct mvm_statistics_rx rx_stats; + struct { + u64 rx_time; + u64 tx_time; + u64 on_time_rf; + u64 on_time_scan; + } radio_stats, accu_radio_stats; + u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; @@ -666,6 +695,7 @@ struct iwl_mvm { struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; + u16 dbgfs_rx_phyinfo; #endif struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; @@ -687,8 +717,9 @@ struct iwl_mvm { /* -1 for always, 0 for never, >0 for that many times */ s8 restart_fw; - struct work_struct fw_error_dump_wk; - enum iwl_fw_dbg_conf fw_dbg_conf; + u8 fw_dbg_conf; + struct delayed_work fw_dump_wk; + struct iwl_mvm_dump_desc *fw_dump_desc; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -824,6 +855,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, + IWL_MVM_STATUS_DUMPING_FW_LOG, }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) @@ -883,6 +915,12 @@ static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm) return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG; } +static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) +{ + return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && + IWL_MVM_BT_COEX_CORUNNING; +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -951,12 +989,13 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) } /* Statistics */ -int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt); int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); +int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); +void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); @@ -1072,13 +1111,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, /* Scanning */ int iwl_mvm_scan_size(struct iwl_mvm *mvm); -int iwl_mvm_scan_request(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req); -int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); int iwl_mvm_cancel_scan(struct iwl_mvm *mvm); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan); @@ -1089,14 +1121,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); -int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies); int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req); -int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req); int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, @@ -1238,7 +1264,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); -bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); @@ -1352,9 +1377,6 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout); } -/* Assoc status */ -bool iwl_mvm_is_idle(struct iwl_mvm *mvm); - /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); @@ -1405,7 +1427,62 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id); -void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm); +int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); +int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, unsigned int delay); +int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, + struct iwl_mvm_dump_desc *desc, + unsigned int delay); +void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); +int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *str, size_t len); + +static inline bool +iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, + struct ieee80211_vif *vif) +{ + u32 trig_vif = le32_to_cpu(trig->vif_type); + + return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; +} + +static inline bool +iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && + (mvm->fw_dbg_conf == FW_DBG_INVALID || + (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); +} + +static inline bool +iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) + return false; + + return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); +} + +static inline void +iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len) +{ + struct iwl_fw_dbg_trigger_tlv *trigger; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig)) + return; + + trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig); + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trigger, str, len); +} #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 2dffc360..fe40922a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -237,8 +237,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), - RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), - RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true), RX_HANDLER(SCAN_ITERATION_COMPLETE, iwl_mvm_rx_scan_offload_iter_complete_notif, false), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, @@ -311,6 +309,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(REPLY_RX_MPDU_CMD), CMD(BEACON_NOTIFICATION), CMD(BEACON_TEMPLATE_CMD), + CMD(STATISTICS_CMD), CMD(STATISTICS_NOTIFICATION), CMD(EOSP_NOTIFICATION), CMD(REDUCE_TX_POWER_CMD), @@ -456,7 +455,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); - INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk); + INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); spin_lock_init(&mvm->d0i3_tx_lock); @@ -504,6 +503,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, sizeof(trans->dbg_conf_tlv)); + trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); @@ -685,6 +685,38 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) mutex_unlock(&mvm->mutex); } +static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_cmd *cmds_trig; + char buf[32]; + int i; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); + cmds_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { + /* don't collect on CMD 0 */ + if (!cmds_trig->cmds[i].cmd_id) + break; + + if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd) + continue; + + memset(buf, 0, sizeof(buf)); + snprintf(buf, sizeof(buf), "CMD 0x%02x received", pkt->hdr.cmd); + iwl_mvm_fw_dbg_collect_trig(mvm, trig, buf, sizeof(buf)); + break; + } +} + static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) @@ -693,6 +725,8 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 i; + iwl_mvm_rx_check_trigger(mvm, pkt); + /* * Do the notification wait before RX handlers so * even if the RX handler consumes the RXB we have @@ -827,7 +861,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) { struct iwl_mvm *mvm = - container_of(work, struct iwl_mvm, fw_error_dump_wk); + container_of(work, struct iwl_mvm, fw_dump_wk.work); if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) return; @@ -879,7 +913,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->restart_fw && fw_error) { - schedule_work(&mvm->fw_error_dump_wk); + iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0); } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c index 5b43616..192b74b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -175,6 +175,10 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); cmd->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (unlikely(mvm->dbgfs_rx_phyinfo)) + cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); +#endif cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2620dd0..33bbdde 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -66,6 +66,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/etherdevice.h> #include <net/mac80211.h> @@ -491,7 +492,7 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, ETH_ALEN)) - memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN); + eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); } static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 194bd1f..6578498 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -134,9 +134,12 @@ enum rs_column_mode { #define MAX_NEXT_COLUMNS 7 #define MAX_COLUMN_CHECKS 3 +struct rs_tx_column; + typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl); + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col); struct rs_tx_column { enum rs_column_mode mode; @@ -147,14 +150,19 @@ struct rs_tx_column { }; static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { - return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); + return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); } static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_vif *mvmvif; + if (!sta->ht_cap.ht_supported) return false; @@ -167,11 +175,17 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) return false; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) + return false; + return true; } static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; @@ -180,7 +194,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { struct rs_rate *rate = &tbl->rate; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; @@ -800,6 +815,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, rate->ldpc = true; if (ucode_rate & RATE_MCS_VHT_STBC_MSK) rate->stbc = true; + if (ucode_rate & RATE_MCS_BF_MSK) + rate->bfer = true; rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; @@ -809,7 +826,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, if (nss == 1) { rate->type = LQ_HT_SISO; - WARN_ON_ONCE(!rate->stbc && num_of_ant != 1); + WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, + "stbc %d bfer %d", + rate->stbc, rate->bfer); } else if (nss == 2) { rate->type = LQ_HT_MIMO2; WARN_ON_ONCE(num_of_ant != 2); @@ -822,7 +841,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, if (nss == 1) { rate->type = LQ_VHT_SISO; - WARN_ON_ONCE(!rate->stbc && num_of_ant != 1); + WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, + "stbc %d bfer %d", + rate->stbc, rate->bfer); } else if (nss == 2) { rate->type = LQ_VHT_MIMO2; WARN_ON_ONCE(num_of_ant != 2); @@ -1001,13 +1022,41 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, rs_get_lower_rate_in_column(lq_sta, rate); } -/* Simple function to compare two rate scale table types */ -static inline bool rs_rate_match(struct rs_rate *a, - struct rs_rate *b) +/* Check if both rates are identical + * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B + * with a rate indicating STBC/BFER and ANT_AB. + */ +static inline bool rs_rate_equal(struct rs_rate *a, + struct rs_rate *b, + bool allow_ant_mismatch) + +{ + bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) && + (a->bfer == b->bfer); + + if (allow_ant_mismatch) { + if (a->stbc || a->bfer) { + WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d", + a->stbc, a->bfer, a->ant); + ant_match |= (b->ant == ANT_A || b->ant == ANT_B); + } else if (b->stbc || b->bfer) { + WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d", + b->stbc, b->bfer, b->ant); + ant_match |= (a->ant == ANT_A || a->ant == ANT_B); + } + } + + return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && + (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match; +} + +/* Check if both rates share the same column */ +static inline bool rs_rate_column_match(struct rs_rate *a, + struct rs_rate *b) { bool ant_match; - if (a->stbc) + if (a->stbc || a->bfer) ant_match = (b->ant == ANT_A || b->ant == ANT_B); else ant_match = (a->ant == b->ant); @@ -1016,18 +1065,6 @@ static inline bool rs_rate_match(struct rs_rate *a, && ant_match; } -static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags) -{ - if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - return RATE_MCS_CHAN_WIDTH_40; - else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - return RATE_MCS_CHAN_WIDTH_80; - else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH) - return RATE_MCS_CHAN_WIDTH_160; - - return RATE_MCS_CHAN_WIDTH_20; -} - static u8 rs_get_tid(struct ieee80211_hdr *hdr) { u8 tid = IWL_MAX_TID_COUNT; @@ -1048,15 +1085,17 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, { int legacy_success; int retries; - int mac_index, i; + int i; struct iwl_lq_cmd *table; - enum mac80211_rate_control_flags mac_flags; - u32 ucode_rate; - struct rs_rate rate; + u32 lq_hwrate; + struct rs_rate lq_rate, tx_resp_rate; struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; + u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; + bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_LQ_SS_PARAMS; /* Treat uninitialized rate scaling data same as non-existing. */ if (!lq_sta) { @@ -1079,39 +1118,6 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - /* - * Ignore this Tx frame response if its initial rate doesn't match - * that of latest Link Quality command. There may be stragglers - * from a previous Link Quality command, but we're no longer interested - * in those; they're either from the "active" mode while we're trying - * to check "search" mode, or a prior "search" mode after we've moved - * to a new "search" mode (which might become the new "active" mode). - */ - table = &lq_sta->lq; - ucode_rate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); - if (info->band == IEEE80211_BAND_5GHZ) - rate.index -= IWL_FIRST_OFDM_RATE; - mac_flags = info->status.rates[0].flags; - mac_index = info->status.rates[0].idx; - /* For HT packets, map MCS to PLCP */ - if (mac_flags & IEEE80211_TX_RC_MCS) { - /* Remove # of streams */ - mac_index &= RATE_HT_MCS_RATE_CODE_MSK; - if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) - mac_index++; - /* - * mac80211 HT index is always zero-indexed; we need to move - * HT OFDM rates after CCK rates in 2.4 GHz band - */ - if (info->band == IEEE80211_BAND_2GHZ) - mac_index += IWL_FIRST_OFDM_RATE; - } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) { - mac_index &= RATE_VHT_MCS_RATE_CODE_MSK; - if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) - mac_index++; - } - if (time_after(jiffies, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { @@ -1126,21 +1132,24 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } lq_sta->last_tx = jiffies; + /* Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + lq_hwrate = le32_to_cpu(table->rs_table[0]); + rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); + rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); + /* Here we actually compare this rate to the latest LQ command */ - if ((mac_index < 0) || - (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || - (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) || - (rate.ant != info->status.antenna) || - (!!(ucode_rate & RATE_MCS_HT_MSK) != - !!(mac_flags & IEEE80211_TX_RC_MCS)) || - (!!(ucode_rate & RATE_MCS_VHT_MSK) != - !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) || - (!!(ucode_rate & RATE_HT_MCS_GF_MSK) != - !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || - (rate.index != mac_index)) { + if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { IWL_DEBUG_RATE(mvm, - "initial rate %d does not match %d (0x%x)\n", - mac_index, rate.index, ucode_rate); + "initial tx resp rate 0x%x does not match 0x%x\n", + tx_resp_hwrate, lq_hwrate); + /* * Since rates mis-match, the last LQ command may have failed. * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with @@ -1168,14 +1177,14 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); } - if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) { + if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) { IWL_DEBUG_RATE(mvm, "Neither active nor search matches tx rate\n"); tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); - rs_dump_rate(mvm, &rate, "ACTUAL"); + rs_dump_rate(mvm, &lq_rate, "ACTUAL"); /* * no matching table found, let's by-pass the data collection @@ -1200,9 +1209,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (info->status.ampdu_ack_len == 0) info->status.ampdu_len = 1; - ucode_rate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); - rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index, + rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, info->status.ampdu_len, info->status.ampdu_ack_len, reduced_txp); @@ -1225,21 +1232,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); /* Collect data for each rate used during failed TX attempts */ for (i = 0; i <= retries; ++i) { - ucode_rate = le32_to_cpu(table->rs_table[i]); - rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); + lq_hwrate = le32_to_cpu(table->rs_table[i]); + rs_rate_from_ucode_rate(lq_hwrate, info->band, + &lq_rate); /* * Only collect stats if retried rate is in the same RS * table as active/search. */ - if (rs_rate_match(&rate, &curr_tbl->rate)) + if (rs_rate_column_match(&lq_rate, &curr_tbl->rate)) tmp_tbl = curr_tbl; - else if (rs_rate_match(&rate, &other_tbl->rate)) + else if (rs_rate_column_match(&lq_rate, + &other_tbl->rate)) tmp_tbl = other_tbl; else continue; - rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1, - i < retries ? 0 : legacy_success, + rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, + 1, i < retries ? 0 : legacy_success, reduced_txp); } @@ -1250,7 +1259,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } } /* The last TX rate is cached in lq_sta; it's set in if/else above */ - lq_sta->last_rate_n_flags = ucode_rate; + lq_sta->last_rate_n_flags = lq_hwrate; IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); done: /* See if there's a better rate or modulation mode to try. */ @@ -1590,7 +1599,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, for (j = 0; j < MAX_COLUMN_CHECKS; j++) { allow_func = next_col->checks[j]; - if (allow_func && !allow_func(mvm, sta, tbl)) + if (allow_func && !allow_func(mvm, sta, tbl, next_col)) break; } @@ -2536,6 +2545,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, #ifdef CONFIG_MAC80211_DEBUGFS lq_sta->pers.dbg_fixed_rate = 0; lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID; + lq_sta->pers.ss_force = RS_SS_FORCE_NONE; #endif lq_sta->pers.chains = 0; memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); @@ -3058,19 +3068,21 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm, if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) goto out; +#ifdef CONFIG_MAC80211_DEBUGFS /* Check if forcing the decision is configured. * Note that SISO is forced by not allowing STBC or BFER */ - if (lq_sta->ss_force == RS_SS_FORCE_STBC) + if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC) ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE); - else if (lq_sta->ss_force == RS_SS_FORCE_BFER) + else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER) ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE); - if (lq_sta->ss_force != RS_SS_FORCE_NONE) { + if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) { IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n", - lq_sta->ss_force); + lq_sta->pers.ss_force); goto out; } +#endif if (lq_sta->stbc_capable) ss_params |= LQ_SS_STBC_1SS_ALLOWED; @@ -3311,6 +3323,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, struct iwl_mvm *mvm; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct rs_rate *rate = &tbl->rate; + u32 ss_params; mvm = lq_sta->pers.drv; buff = kmalloc(2048, GFP_KERNEL); if (!buff) @@ -3357,6 +3370,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, lq_sta->lq.agg_frame_cnt_limit); desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); + ss_params = le32_to_cpu(lq_sta->lq.ss_params); + desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n", + (ss_params & LQ_SS_PARAMS_VALID) ? + "VALID," : "INVALID", + (ss_params & LQ_SS_BFER_ALLOWED) ? + "BFER," : "", + (ss_params & LQ_SS_STBC_1SS_ALLOWED) ? + "STBC," : "", + (ss_params & LQ_SS_FORCE) ? + "FORCE" : ""); desc += sprintf(buff+desc, "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", lq_sta->lq.initial_rate_index[0], @@ -3533,7 +3556,7 @@ static ssize_t iwl_dbgfs_ss_force_read(struct file *file, }; pos += scnprintf(buf+pos, bufsz-pos, "%s\n", - ss_force_name[lq_sta->ss_force]); + ss_force_name[lq_sta->pers.ss_force]); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -3544,12 +3567,12 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, int ret = 0; if (!strncmp("none", buf, 4)) { - lq_sta->ss_force = RS_SS_FORCE_NONE; + lq_sta->pers.ss_force = RS_SS_FORCE_NONE; } else if (!strncmp("siso", buf, 4)) { - lq_sta->ss_force = RS_SS_FORCE_SISO; + lq_sta->pers.ss_force = RS_SS_FORCE_SISO; } else if (!strncmp("stbc", buf, 4)) { if (lq_sta->stbc_capable) { - lq_sta->ss_force = RS_SS_FORCE_STBC; + lq_sta->pers.ss_force = RS_SS_FORCE_STBC; } else { IWL_ERR(mvm, "can't force STBC. peer doesn't support\n"); @@ -3557,7 +3580,7 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, } } else if (!strncmp("bfer", buf, 4)) { if (lq_sta->bfer_capable) { - lq_sta->ss_force = RS_SS_FORCE_BFER; + lq_sta->pers.ss_force = RS_SS_FORCE_BFER; } else { IWL_ERR(mvm, "can't force BFER. peer doesn't support\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index dc4ef3d..e4aa934 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -170,6 +170,7 @@ struct rs_rate { bool sgi; bool ldpc; bool stbc; + bool bfer; }; @@ -331,14 +332,14 @@ struct iwl_lq_sta { /* tx power reduce for this sta */ int tpc_reduce; - /* force STBC/BFER/SISO for testing */ - enum rs_ss_force_opt ss_force; - /* persistent fields - initialized only once - keep last! */ struct lq_sta_pers { #ifdef CONFIG_MAC80211_DEBUGFS u32 dbg_fixed_rate; u8 dbg_fixed_txp_reduction; + + /* force STBC/BFER/SISO for testing */ + enum rs_ss_force_opt ss_force; #endif u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index f922131..6177e24 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -345,6 +345,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_mvm_sta *mvmsta; mvmsta = iwl_mvm_sta_from_mac80211(sta); rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); + + if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && + ieee80211_is_beacon(hdr->frame_control)) { + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; + bool trig_check; + s32 rssi; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, + FW_DBG_TRIGGER_RSSI); + rssi_trig = (void *)trig->data; + rssi = le32_to_cpu(rssi_trig->rssi); + + trig_check = + iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + trig); + if (trig_check && rx_status->signal < rssi) + iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0); + } } rcu_read_unlock(); @@ -416,35 +435,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, } static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, - struct iwl_notif_statistics *stats) + struct mvm_statistics_rx *rx_stats) { - /* - * NOTE FW aggregates the statistics - BUT the statistics are cleared - * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS - * bit set. - */ lockdep_assert_held(&mvm->mutex); - memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx)); + + mvm->rx_stats = *rx_stats; } struct iwl_mvm_stat_data { - struct iwl_notif_statistics *stats; struct iwl_mvm *mvm; + __le32 mac_id; + __s8 beacon_filter_average_energy; + struct mvm_statistics_general_v8 *general; }; static void iwl_mvm_stat_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data *data = _data; - struct iwl_notif_statistics *stats = data->stats; struct iwl_mvm *mvm = data->mvm; - int sig = -stats->general.beacon_filter_average_energy; + int sig = -data->beacon_filter_average_energy; int last_event; int thold = vif->bss_conf.cqm_rssi_thold; int hyst = vif->bss_conf.cqm_rssi_hyst; - u16 id = le32_to_cpu(stats->rx.general.mac_id); + u16 id = le32_to_cpu(data->mac_id); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + /* This doesn't need the MAC ID check since it's not taking the + * data copied into the "data" struct, but rather the data from + * the notification directly. + */ + if (data->general) { + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(data->general->beacon_counter[mvmvif->id]); + mvmvif->beacon_stats.avg_signal = + -data->general->beacon_average_energy[mvmvif->id]; + } + if (mvmvif->id != id) return; @@ -500,34 +527,101 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, } } -/* - * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler - * - * TODO: This handler is implemented partially. - */ -int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static inline void +iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_notif_statistics *stats = (void *)&pkt->data; + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_stats *trig_stats; + u32 trig_offset, trig_thold; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); + trig_stats = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + trig_offset = le32_to_cpu(trig_stats->stop_offset); + trig_thold = le32_to_cpu(trig_stats->stop_threshold); + + if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) + return; + + if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0); +} + +void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + size_t v8_len = sizeof(struct iwl_notif_statistics_v8); + size_t v10_len = sizeof(struct iwl_notif_statistics_v10); struct iwl_mvm_stat_data data = { - .stats = stats, .mvm = mvm, }; + u32 temperature; + + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) { + struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; + + if (iwl_rx_packet_payload_len(pkt) != v10_len) + goto invalid; + + temperature = le32_to_cpu(stats->general.radio_temperature); + data.mac_id = stats->rx.general.mac_id; + data.beacon_filter_average_energy = + stats->general.beacon_filter_average_energy; + + iwl_mvm_update_rx_statistics(mvm, &stats->rx); + + mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time); + mvm->radio_stats.on_time_rf = + le64_to_cpu(stats->general.on_time_rf); + mvm->radio_stats.on_time_scan = + le64_to_cpu(stats->general.on_time_scan); + + data.general = &stats->general; + } else { + struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data; + + if (iwl_rx_packet_payload_len(pkt) != v8_len) + goto invalid; + + temperature = le32_to_cpu(stats->general.radio_temperature); + data.mac_id = stats->rx.general.mac_id; + data.beacon_filter_average_energy = + stats->general.beacon_filter_average_energy; + + iwl_mvm_update_rx_statistics(mvm, &stats->rx); + } + + iwl_mvm_rx_stats_check_trigger(mvm, pkt); /* Only handle rx statistics temperature changes if async temp * notifications are not supported */ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM)) - iwl_mvm_tt_temp_changed(mvm, - le32_to_cpu(stats->general.radio_temperature)); - - iwl_mvm_update_rx_statistics(mvm, stats); + iwl_mvm_tt_temp_changed(mvm, temperature); ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); + return; + invalid: + IWL_ERR(mvm, "received invalid statistics size (%d)!\n", + iwl_rx_packet_payload_len(pkt)); +} + +int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7e9aa3c..f0946b5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -82,6 +82,7 @@ struct iwl_mvm_scan_params { struct _dwell { u16 passive; u16 active; + u16 fragmented; } dwell[IEEE80211_NUM_BANDS]; }; @@ -191,101 +192,6 @@ static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; } -static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd, - struct cfg80211_scan_request *req, - bool basic_ssid, - struct iwl_mvm_scan_params *params) -{ - struct iwl_scan_channel *chan = (struct iwl_scan_channel *) - (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); - int i; - int type = BIT(req->n_ssids) - 1; - enum ieee80211_band band = req->channels[0]->band; - - if (!basic_ssid) - type |= BIT(req->n_ssids); - - for (i = 0; i < cmd->channel_count; i++) { - chan->channel = cpu_to_le16(req->channels[i]->hw_value); - chan->type = cpu_to_le32(type); - if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR) - chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE); - chan->active_dwell = cpu_to_le16(params->dwell[band].active); - chan->passive_dwell = cpu_to_le16(params->dwell[band].passive); - chan->iteration_count = cpu_to_le16(1); - chan++; - } -} - -/* - * Fill in probe request with the following parameters: - * TA is our vif HW address, which mac80211 ensures we have. - * Packet is broadcasted, so this is both SA and DA. - * The probe request IE is made out of two: first comes the most prioritized - * SSID if a directed scan is requested. Second comes whatever extra - * information was given to us as the scan request IE. - */ -static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta, - int n_ssids, const u8 *ssid, int ssid_len, - const u8 *band_ie, int band_ie_len, - const u8 *common_ie, int common_ie_len, - int left) -{ - int len = 0; - u8 *pos = NULL; - - /* Make sure there is enough space for the probe request, - * two mandatory IEs and the data */ - left -= 24; - if (left < 0) - return 0; - - frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); - eth_broadcast_addr(frame->da); - memcpy(frame->sa, ta, ETH_ALEN); - eth_broadcast_addr(frame->bssid); - frame->seq_ctrl = 0; - - len += 24; - - /* for passive scans, no need to fill anything */ - if (n_ssids == 0) - return (u16)len; - - /* points to the payload of the request */ - pos = &frame->u.probe_req.variable[0]; - - /* fill in our SSID IE */ - left -= ssid_len + 2; - if (left < 0) - return 0; - *pos++ = WLAN_EID_SSID; - *pos++ = ssid_len; - if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */ - memcpy(pos, ssid, ssid_len); - pos += ssid_len; - } - - len += ssid_len + 2; - - if (WARN_ON(left < band_ie_len + common_ie_len)) - return len; - - if (band_ie && band_ie_len) { - memcpy(pos, band_ie, band_ie_len); - pos += band_ie_len; - len += band_ie_len; - } - - if (common_ie && common_ie_len) { - memcpy(pos, common_ie, common_ie_len); - pos += common_ie_len; - len += common_ie_len; - } - - return (u16)len; -} - static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -325,7 +231,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, * If there is more than one active interface make * passive scan more fragmented. */ - frag_passive_dwell = (global_cnt < 2) ? 40 : 20; + frag_passive_dwell = 40; params->max_out_time = frag_passive_dwell; } else { params->suspend_time = 120; @@ -358,10 +264,10 @@ not_bound: for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { if (params->passive_fragmented) - params->dwell[band].passive = frag_passive_dwell; - else - params->dwell[band].passive = - iwl_mvm_get_passive_dwell(mvm, band); + params->dwell[band].fragmented = frag_passive_dwell; + + params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm, + band); params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, n_ssids); } @@ -379,20 +285,11 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm, { int max_probe_len; - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) - max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; - else - max_probe_len = mvm->fw->ucode_capa.max_probe_length; + max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; /* we create the 802.11 header and SSID element */ max_probe_len -= 24 + 2; - /* basic ssid is added only for hw_scan with and old api */ - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) && - !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) && - !is_sched_scan) - max_probe_len -= 32; - /* DS parameter set element is added on 2.4GHZ band if required */ if (iwl_mvm_rrm_scan_needed(mvm)) max_probe_len -= 3; @@ -404,9 +301,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan) { int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) - return max_ie_len; - /* TODO: [BUG] This function should return the maximum allowed size of * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs * in the same command. So the correct implementation of this function @@ -420,129 +314,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan) return max_ie_len; } -int iwl_mvm_scan_request(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) -{ - struct iwl_host_cmd hcmd = { - .id = SCAN_REQUEST_CMD, - .len = { 0, }, - .data = { mvm->scan_cmd, }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - struct iwl_scan_cmd *cmd = mvm->scan_cmd; - int ret; - u32 status; - int ssid_len = 0; - u8 *ssid = NULL; - bool basic_ssid = !(mvm->fw->ucode_capa.flags & - IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID); - struct iwl_mvm_scan_params params = {}; - - lockdep_assert_held(&mvm->mutex); - - /* we should have failed registration if scan_cmd was NULL */ - if (WARN_ON(mvm->scan_cmd == NULL)) - return -ENOMEM; - - IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n"); - mvm->scan_status = IWL_MVM_SCAN_OS; - memset(cmd, 0, ksize(cmd)); - - cmd->channel_count = (u8)req->n_channels; - cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); - cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); - cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm); - - iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, ¶ms); - cmd->max_out_time = cpu_to_le32(params.max_out_time); - cmd->suspend_time = cpu_to_le32(params.suspend_time); - if (params.passive_fragmented) - cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN; - - cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band); - cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | - MAC_FILTER_IN_BEACON); - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED); - else - cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); - - cmd->repeats = cpu_to_le32(1); - - /* - * If the user asked for passive scan, don't change to active scan if - * you see any activity on the channel - remain passive. - */ - if (req->n_ssids > 0) { - cmd->passive2active = cpu_to_le16(1); - cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE; - if (basic_ssid) { - ssid = req->ssids[0].ssid; - ssid_len = req->ssids[0].ssid_len; - } - } else { - cmd->passive2active = 0; - cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE; - } - - iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids, - basic_ssid ? 1 : 0); - - cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | - 3 << TX_CMD_FLG_BT_PRIO_POS); - - cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; - cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); - cmd->tx_cmd.rate_n_flags = - iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band, - req->no_cck); - - cmd->tx_cmd.len = - cpu_to_le16(iwl_mvm_fill_probe_req( - (struct ieee80211_mgmt *)cmd->data, - vif->addr, - req->n_ssids, ssid, ssid_len, - req->ie, req->ie_len, NULL, 0, - mvm->fw->ucode_capa.max_probe_length)); - - iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, ¶ms); - - cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) + - le16_to_cpu(cmd->tx_cmd.len) + - (cmd->channel_count * sizeof(struct iwl_scan_channel))); - hcmd.len[0] = le16_to_cpu(cmd->len); - - status = SCAN_RESPONSE_OK; - ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status); - if (!ret && status == SCAN_RESPONSE_OK) { - IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); - } else { - /* - * If the scan failed, it usually means that the FW was unable - * to allocate the time events. Warn on it, but maybe we - * should try to send the command again with different params. - */ - IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n", - status, ret); - mvm->scan_status = IWL_MVM_SCAN_NONE; - ret = -EIO; - } - return ret; -} - -int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_cmd_response *resp = (void *)pkt->data; - - IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n", - le32_to_cpu(resp->status)); - return 0; -} - int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) @@ -556,130 +327,25 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, return 0; } -int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scan_complete_notif *notif = (void *)pkt->data; - - lockdep_assert_held(&mvm->mutex); - - IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n", - notif->status, notif->scanned_channels); - - if (mvm->scan_status == IWL_MVM_SCAN_OS) - mvm->scan_status = IWL_MVM_SCAN_NONE; - ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK); - - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - - return 0; -} - int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) && - !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { - struct iwl_sched_scan_results *notif = (void *)pkt->data; - - if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN)) - return 0; - } - IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); return 0; } -static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - struct iwl_scan_complete_notif *notif; - u32 *resp; - - switch (pkt->hdr.cmd) { - case SCAN_ABORT_CMD: - resp = (void *)pkt->data; - if (*resp == CAN_ABORT_STATUS) { - IWL_DEBUG_SCAN(mvm, - "Scan can be aborted, wait until completion\n"); - return false; - } - - /* - * If scan cannot be aborted, it means that we had a - * SCAN_COMPLETE_NOTIFICATION in the pipe and it called - * ieee80211_scan_completed already. - */ - IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", - *resp); - return true; - - case SCAN_COMPLETE_NOTIFICATION: - notif = (void *)pkt->data; - IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n", - notif->status); - return true; - - default: - WARN_ON(1); - return false; - }; -} - -static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm) -{ - struct iwl_notification_wait wait_scan_abort; - static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD, - SCAN_COMPLETE_NOTIFICATION }; - int ret; - - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, - scan_abort_notif, - ARRAY_SIZE(scan_abort_notif), - iwl_mvm_scan_abort_notif, NULL); - - ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL); - if (ret) { - IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); - /* mac80211's state will be cleaned in the nic_restart flow */ - goto out_remove_notif; - } - - return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ); - -out_remove_notif: - iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort); - return ret; -} - int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - u8 status, ebs_status; + struct iwl_periodic_scan_complete *scan_notif; - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) { - struct iwl_periodic_scan_complete *scan_notif; + scan_notif = (void *)pkt->data; - scan_notif = (void *)pkt->data; - status = scan_notif->status; - ebs_status = scan_notif->ebs_status; - } else { - struct iwl_scan_offload_complete *scan_notif; - - scan_notif = (void *)pkt->data; - status = scan_notif->status; - ebs_status = scan_notif->ebs_status; - } /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex); @@ -687,9 +353,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, "%s completed, status %s, EBS status %s\n", mvm->scan_status == IWL_MVM_SCAN_SCHED ? "Scheduled scan" : "Scan", - status == IWL_SCAN_OFFLOAD_COMPLETED ? + scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", - ebs_status == IWL_SCAN_EBS_SUCCESS ? + scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ? "success" : "failed"); @@ -700,64 +366,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, } else if (mvm->scan_status == IWL_MVM_SCAN_OS) { mvm->scan_status = IWL_MVM_SCAN_NONE; ieee80211_scan_completed(mvm->hw, - status == IWL_SCAN_OFFLOAD_ABORTED); + scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } - if (ebs_status) + if (scan_notif->ebs_status) mvm->last_ebs_successful = false; return 0; } -static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_scan_ies *ies, - enum ieee80211_band band, - struct iwl_tx_cmd *cmd, - u8 *data) -{ - u16 cmd_len; - - cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL); - cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); - cmd->sta_id = mvm->aux_sta.sta_id; - - cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false); - - cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data, - vif->addr, - 1, NULL, 0, - ies->ies[band], ies->len[band], - ies->common_ies, ies->common_ie_len, - SCAN_OFFLOAD_PROBE_REQ_SIZE); - cmd->len = cpu_to_le16(cmd_len); -} - -static void iwl_build_scan_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct iwl_scan_offload_cmd *scan, - struct iwl_mvm_scan_params *params) -{ - scan->channel_count = req->n_channels; - scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); - scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); - scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; - scan->rx_chain = iwl_mvm_scan_rx_chain(mvm); - - scan->max_out_time = cpu_to_le32(params->max_out_time); - scan->suspend_time = cpu_to_le32(params->suspend_time); - - scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP | - MAC_FILTER_IN_BEACON); - scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND); - scan->rep_count = cpu_to_le32(1); - - if (params->passive_fragmented) - scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN; -} - static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) { int i; @@ -815,127 +433,6 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req, } } -static void iwl_build_channel_cfg(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req, - u8 *channels_buffer, - enum ieee80211_band band, - int *head, - u32 ssid_bitmap, - struct iwl_mvm_scan_params *params) -{ - u32 n_channels = mvm->fw->ucode_capa.n_scan_channels; - __le32 *type = (__le32 *)channels_buffer; - __le16 *channel_number = (__le16 *)(type + n_channels); - __le16 *iter_count = channel_number + n_channels; - __le32 *iter_interval = (__le32 *)(iter_count + n_channels); - u8 *active_dwell = (u8 *)(iter_interval + n_channels); - u8 *passive_dwell = active_dwell + n_channels; - int i, index = 0; - - for (i = 0; i < req->n_channels; i++) { - struct ieee80211_channel *chan = req->channels[i]; - - if (chan->band != band) - continue; - - index = *head; - (*head)++; - - channel_number[index] = cpu_to_le16(chan->hw_value); - active_dwell[index] = params->dwell[band].active; - passive_dwell[index] = params->dwell[band].passive; - - iter_count[index] = cpu_to_le16(1); - iter_interval[index] = 0; - - if (!(chan->flags & IEEE80211_CHAN_NO_IR)) - type[index] |= - cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE); - - type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL | - IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL); - - if (chan->flags & IEEE80211_CHAN_NO_HT40) - type[index] |= - cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW); - - /* scan for all SSIDs from req->ssids */ - type[index] |= cpu_to_le32(ssid_bitmap); - } -} - -int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies) -{ - int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; - int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; - int head = 0; - u32 ssid_bitmap; - int cmd_len; - int ret; - u8 *probes; - bool basic_ssid = !(mvm->fw->ucode_capa.flags & - IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID); - - struct iwl_scan_offload_cfg *scan_cfg; - struct iwl_host_cmd cmd = { - .id = SCAN_OFFLOAD_CONFIG_CMD, - }; - struct iwl_mvm_scan_params params = {}; - - lockdep_assert_held(&mvm->mutex); - - cmd_len = sizeof(struct iwl_scan_offload_cfg) + - mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE + - 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE; - - scan_cfg = kzalloc(cmd_len, GFP_KERNEL); - if (!scan_cfg) - return -ENOMEM; - - probes = scan_cfg->data + - mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE; - - iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, ¶ms); - iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, ¶ms); - scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len); - - iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan, - &ssid_bitmap, basic_ssid); - /* build tx frames for supported bands */ - if (band_2ghz) { - iwl_scan_offload_build_tx_cmd(mvm, vif, ies, - IEEE80211_BAND_2GHZ, - &scan_cfg->scan_cmd.tx_cmd[0], - probes); - iwl_build_channel_cfg(mvm, req, scan_cfg->data, - IEEE80211_BAND_2GHZ, &head, - ssid_bitmap, ¶ms); - } - if (band_5ghz) { - iwl_scan_offload_build_tx_cmd(mvm, vif, ies, - IEEE80211_BAND_5GHZ, - &scan_cfg->scan_cmd.tx_cmd[1], - probes + - SCAN_OFFLOAD_PROBE_REQ_SIZE); - iwl_build_channel_cfg(mvm, req, scan_cfg->data, - IEEE80211_BAND_5GHZ, &head, - ssid_bitmap, ¶ms); - } - - cmd.data[0] = scan_cfg; - cmd.len[0] = cmd_len; - cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - - IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n"); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - kfree(scan_cfg); - return ret; -} - int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) { @@ -1018,33 +515,6 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, return true; } -int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req) -{ - struct iwl_scan_offload_req scan_req = { - .watchdog = IWL_SCHED_SCAN_WATCHDOG, - - .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS, - .schedule_line[0].delay = cpu_to_le16(req->interval / 1000), - .schedule_line[0].full_scan_mul = 1, - - .schedule_line[1].iterations = 0xff, - .schedule_line[1].delay = cpu_to_le16(req->interval / 1000), - .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER, - }; - - if (iwl_mvm_scan_pass_all(mvm, req)) - scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL); - - if (mvm->last_ebs_successful && - mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) - scan_req.flags |= - cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE); - - return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0, - sizeof(scan_req), &scan_req); -} - int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, @@ -1057,21 +527,12 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, if (ret) return ret; ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies); - } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { - mvm->scan_status = IWL_MVM_SCAN_SCHED; - ret = iwl_mvm_config_sched_scan_profiles(mvm, req); - if (ret) - return ret; - ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); } else { mvm->scan_status = IWL_MVM_SCAN_SCHED; - ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies); - if (ret) - return ret; ret = iwl_mvm_config_sched_scan_profiles(mvm, req); if (ret) return ret; - ret = iwl_mvm_sched_scan_start(mvm, req); + ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); } return ret; @@ -1088,9 +549,7 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) /* Exit instantly with error when device is not ready * to receive scan abort command or it does not perform * scheduled scan currently */ - if (mvm->scan_status != IWL_MVM_SCAN_SCHED && - (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || - mvm->scan_status != IWL_MVM_SCAN_OS)) + if (mvm->scan_status == IWL_MVM_SCAN_NONE) return -EIO; ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); @@ -1131,13 +590,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (iwl_mvm_is_radio_killed(mvm)) goto out; - if (mvm->scan_status != IWL_MVM_SCAN_SCHED && - (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || - mvm->scan_status != IWL_MVM_SCAN_OS)) { - IWL_DEBUG_SCAN(mvm, "No scan to stop\n"); - return 0; - } - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), @@ -1317,7 +769,7 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; if (params->passive_fragmented) cmd->fragmented_dwell = - params->dwell[IEEE80211_BAND_2GHZ].passive; + params->dwell[IEEE80211_BAND_2GHZ].fragmented; cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); @@ -1580,9 +1032,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) return 0; } - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) - return iwl_mvm_scan_offload_stop(mvm, true); - return iwl_mvm_cancel_regular_scan(mvm); + return iwl_mvm_scan_offload_stop(mvm, true); } /* UMAC scan API */ @@ -1765,7 +1215,7 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm, cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; if (params->passive_fragmented) cmd->fragmented_dwell = - params->dwell[IEEE80211_BAND_2GHZ].passive; + params->dwell[IEEE80211_BAND_2GHZ].fragmented; cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); @@ -2159,14 +1609,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) mvm->fw->ucode_capa.n_scan_channels + sizeof(struct iwl_scan_req_umac_tail); - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) - return sizeof(struct iwl_scan_req_unified_lmac) + - sizeof(struct iwl_scan_channel_cfg_lmac) * - mvm->fw->ucode_capa.n_scan_channels + - sizeof(struct iwl_scan_probe_req); - - return sizeof(struct iwl_scan_cmd) + - mvm->fw->ucode_capa.max_probe_length + - mvm->fw->ucode_capa.n_scan_channels * - sizeof(struct iwl_scan_channel); + return sizeof(struct iwl_scan_req_unified_lmac) + + sizeof(struct iwl_scan_channel_cfg_lmac) * + mvm->fw->ucode_capa.n_scan_channels + + sizeof(struct iwl_scan_probe_req); } diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 07304e1..7906b97 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -664,6 +664,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), info); + info->status.status_driver_data[1] = + (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ if (txq_id >= mvm->first_agg_queue && @@ -909,6 +911,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, info->status.tx_time = tid_data->tx_time; info->status.status_driver_data[0] = (void *)(uintptr_t)tid_data->reduced_tpc; + info->status.status_driver_data[1] = + (void *)(uintptr_t)tid_data->rate_n_flags; } int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 8decf99..2b9de63 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -332,7 +332,7 @@ static const char *desc_lookup(u32 num) * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ -struct iwl_error_event_table { +struct iwl_error_event_table_v1 { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 pc; /* program counter */ @@ -377,7 +377,55 @@ struct iwl_error_event_table { u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed; +} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; + +struct iwl_error_event_table { + u32 valid; /* (nonzero) valid, (0) log is empty */ + u32 error_id; /* type of error */ + u32 pc; /* program counter */ + u32 blink1; /* branch link */ + u32 blink2; /* branch link */ + u32 ilink1; /* interrupt link */ + u32 ilink2; /* interrupt link */ + u32 data1; /* error-specific data */ + u32 data2; /* error-specific data */ + u32 data3; /* error-specific data */ + u32 bcon_time; /* beacon timer */ + u32 tsf_low; /* network timestamp function timer */ + u32 tsf_hi; /* network timestamp function timer */ + u32 gp1; /* GP1 timer register */ + u32 gp2; /* GP2 timer register */ + u32 gp3; /* GP3 timer register */ + u32 major; /* uCode version major */ + u32 minor; /* uCode version minor */ + u32 hw_ver; /* HW Silicon version */ + u32 brd_ver; /* HW board version */ + u32 log_pc; /* log program counter */ + u32 frame_ptr; /* frame pointer */ + u32 stack_ptr; /* stack pointer */ + u32 hcmd; /* last host command header */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: + * rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: + * host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: + * enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: + * time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: + * wico interrupt */ + u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 wait_event; /* wait event() caller address */ + u32 l2p_control; /* L2pControlField */ + u32 l2p_duration; /* L2pDurationField */ + u32 l2p_mhvalid; /* L2pMhValidBits */ + u32 l2p_addr_match; /* L2pAddrMatchStat */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on + * (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the + * compilation */ + u32 flow_handler; /* FH read/write pointers, RX credit */ +} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; /* * UMAC error struct - relevant starting from family 8000 chip. @@ -396,11 +444,11 @@ struct iwl_umac_error_event_table { u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ - u32 umac_fw_ver; /* UMAC version */ - u32 umac_fw_api_ver; /* UMAC FW API ver */ + u32 umac_major; + u32 umac_minor; u32 frame_pointer; /* core register 27*/ u32 stack_pointer; /* core register 28 */ - u32 cmd_header; /* latest host cmd sent to UMAC */ + u32 cmd_header; /* latest host cmd sent to UMAC */ u32 nic_isr_pref; /* ISR status register */ } __packed; @@ -441,18 +489,18 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); - IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver); - IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver); + IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); + IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; - struct iwl_error_event_table table; + struct iwl_error_event_table_v1 table; u32 base; base = mvm->error_event_table; @@ -489,7 +537,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) table.data1, table.data2, table.data3, table.blink1, table.blink2, table.ilink1, table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, + table.gp2, table.gp3, table.ucode_ver, 0, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); @@ -530,6 +578,92 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_umac_error_log(mvm); } +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + struct iwl_error_event_table table; + u32 base; + + if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) { + iwl_mvm_dump_nic_error_log_old(mvm); + return; + } + + base = mvm->error_event_table; + if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = mvm->fw->init_errlog_ptr; + } else { + if (!base) + base = mvm->fw->inst_errlog_ptr; + } + + if (base < 0x800000) { + IWL_ERR(mvm, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (mvm->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + mvm->status, table.valid); + } + + /* Do not change this output - scripts rely on it */ + + IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); + + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, + table.data1, table.data2, table.data3, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.major, + table.minor, table.hw_ver, table.brd_ver); + IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); + IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(mvm, "0x%08X | data1\n", table.data1); + IWL_ERR(mvm, "0x%08X | data2\n", table.data2); + IWL_ERR(mvm, "0x%08X | data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); + IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); + IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); + IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); + IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); + IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); + IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); + IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); + IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); + + if (mvm->support_umac_log) + iwl_mvm_dump_umac_error_log(mvm); +} void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) @@ -643,6 +777,40 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ieee80211_request_smps(vif, smps_mode); } +int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) +{ + struct iwl_statistics_cmd scmd = { + .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, + }; + struct iwl_host_cmd cmd = { + .id = STATISTICS_CMD, + .len[0] = sizeof(scmd), + .data[0] = &scmd, + .flags = CMD_WANT_SKB, + }; + int ret; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); + iwl_free_resp(&cmd); + + if (clear) + iwl_mvm_accu_radio_stats(mvm); + + return 0; +} + +void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) +{ + mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; + mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; + mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; + mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; +} + static void iwl_mvm_diversity_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -717,25 +885,6 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm) return result; } -static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) -{ - bool *idle = _data; - - if (!vif->bss_conf.idle) - *idle = false; -} - -bool iwl_mvm_is_idle(struct iwl_mvm *mvm) -{ - bool idle = true; - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_idle_iter, &idle); - - return idle; -} - struct iwl_bss_iter_data { struct ieee80211_vif *vif; bool error; diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 69935aa..f31a941 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -898,6 +898,9 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans, IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); + if (trans->dbg_dest_tlv) + iwl_pcie_apply_destination(trans); + /* configure the ucode to be ready to get the secured image */ /* release CPU reset */ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); @@ -914,9 +917,6 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans, if (ret) return ret; - if (trans->dbg_dest_tlv) - iwl_pcie_apply_destination(trans); - /* wait for image verification to complete */ ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0, LMPM_SECURE_BOOT_STATUS_SUCCESS, diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index cc6a0a5..26cbf1d 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -742,8 +742,7 @@ void lbs_debugfs_init(void) void lbs_debugfs_remove(void) { - if (lbs_dir) - debugfs_remove(lbs_dir); + debugfs_remove(lbs_dir); } void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 569b64e..8079560 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -667,7 +667,7 @@ static int lbs_setup_firmware(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_FW); /* Read MAC address from firmware */ - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); ret = lbs_update_hw_spec(priv); if (ret) goto done; @@ -871,7 +871,7 @@ static int lbs_init_adapter(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MAIN); - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); priv->connect_status = LBS_DISCONNECTED; priv->channel = DEFAULT_AD_HOC_CHANNEL; diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 25c5acc..ed02e4b 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -152,7 +152,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv) /* * Read priv address from HW */ - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); ret = lbtf_update_hw_spec(priv); if (ret) { ret = -1; @@ -199,7 +199,7 @@ out: static int lbtf_init_adapter(struct lbtf_private *priv) { lbtf_deb_enter(LBTF_DEB_MAIN); - memset(priv->current_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->current_addr); mutex_init(&priv->lock); priv->vif = NULL; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 8908be6..d56b785 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1911,7 +1911,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, printk(KERN_DEBUG "hwsim sw_scan_complete\n"); hwsim->scanning = false; - memset(hwsim->scan_addr, 0, ETH_ALEN); + eth_zero_addr(hwsim->scan_addr); mutex_unlock(&hwsim->mutex); } @@ -2267,7 +2267,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, skb_queue_head_init(&data->pending); SET_IEEE80211_DEV(hw, data->dev); - memset(addr, 0, ETH_ALEN); + eth_zero_addr(addr); addr[0] = 0x02; addr[3] = idx >> 8; addr[4] = idx; @@ -2600,7 +2600,7 @@ static void hwsim_mon_setup(struct net_device *dev) ether_setup(dev); dev->tx_queue_len = 0; dev->type = ARPHRD_IEEE80211_RADIOTAP; - memset(dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(dev->dev_addr); dev->dev_addr[0] = 0x12; } diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 41c8e25..8e1f681 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1563,7 +1563,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); - memset(deauth_mac, 0, ETH_ALEN); + eth_zero_addr(deauth_mac); spin_lock_irqsave(&priv->sta_list_spinlock, flags); sta_node = mwifiex_get_sta_entry(priv, params->mac); @@ -1786,7 +1786,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" " reason code %d\n", priv->cfg_bssid, reason_code); - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); priv->hs2_enabled = false; return 0; @@ -2046,7 +2046,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, dev_dbg(priv->adapter->dev, "info: association to bssid %pM failed\n", priv->cfg_bssid); - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); if (ret > 0) cfg80211_connect_result(priv->netdev, priv->cfg_bssid, @@ -2194,7 +2194,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); return 0; } @@ -2397,7 +2397,6 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info, ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; } -#define MWIFIEX_MAX_WQ_LEN 30 /* * create a new virtual interface with the given name */ @@ -2411,7 +2410,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, struct mwifiex_private *priv; struct net_device *dev; void *mdev_priv; - char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN]; if (!adapter) return ERR_PTR(-EFAULT); @@ -2576,12 +2574,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, return ERR_PTR(-EFAULT); } - strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC"); - strcat(dfs_cac_str, name); - priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str, + priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, 1); + WQ_UNBOUND, 1, name); if (!priv->dfs_cac_workqueue) { wiphy_err(wiphy, "cannot register virtual network device\n"); free_netdev(dev); @@ -2594,11 +2590,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue); - strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW"); - strcat(dfs_chsw_str, name); - priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str, + priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s", WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); + WQ_MEM_RECLAIM, 1, name); if (!priv->dfs_chan_sw_workqueue) { wiphy_err(wiphy, "cannot register virtual network device\n"); free_netdev(dev); diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index 88d0ead..cf2fa11 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -33,6 +33,7 @@ #define MWIFIEX_MAX_BSS_NUM (3) #define MWIFIEX_DMA_ALIGN_SZ 64 +#define MWIFIEX_RX_HEADROOM 64 #define MAX_TXPD_SZ 32 #define INTF_HDR_ALIGN 4 diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index b77ba74..0153ce6 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -76,7 +76,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) u32 i; priv->media_connected = false; - memset(priv->curr_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->curr_addr); priv->pkt_tx_ctrl = 0; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; @@ -296,10 +296,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); adapter->arp_filter_size = 0; adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; - adapter->ext_scan = false; adapter->key_api_major_ver = 0; adapter->key_api_minor_ver = 0; - memset(adapter->perm_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(adapter->perm_addr); adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM; adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM; adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 7e74b4f..74488ab 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -190,14 +190,16 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) /* Check if already processing */ if (adapter->mwifiex_processing) { + adapter->more_task_flag = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); goto exit_main_proc; } else { adapter->mwifiex_processing = true; - spin_unlock_irqrestore(&adapter->main_proc_lock, flags); } process_start: do { + adapter->more_task_flag = false; + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) || (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)) break; @@ -238,6 +240,7 @@ process_start: adapter->pm_wakeup_fw_try = true; mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); adapter->if_ops.wakeup(adapter); + spin_lock_irqsave(&adapter->main_proc_lock, flags); continue; } @@ -295,8 +298,10 @@ process_start: if ((adapter->ps_state == PS_STATE_SLEEP) || (adapter->ps_state == PS_STATE_PRE_SLEEP) || (adapter->ps_state == PS_STATE_SLEEP_CFM) || - adapter->tx_lock_flag) + adapter->tx_lock_flag){ + spin_lock_irqsave(&adapter->main_proc_lock, flags); continue; + } if (!adapter->cmd_sent && !adapter->curr_cmd) { if (mwifiex_exec_next_cmd(adapter) == -1) { @@ -330,15 +335,12 @@ process_start: } break; } + spin_lock_irqsave(&adapter->main_proc_lock, flags); } while (true); spin_lock_irqsave(&adapter->main_proc_lock, flags); - if (!adapter->delay_main_work && - (adapter->int_status || IS_CARD_RX_RCVD(adapter))) { - spin_unlock_irqrestore(&adapter->main_proc_lock, flags); + if (adapter->more_task_flag) goto process_start; - } - adapter->mwifiex_processing = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index f0a6af1..16be45e 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -140,6 +140,9 @@ enum { #define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000 +/* Address alignment */ +#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1)) + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; @@ -774,6 +777,7 @@ struct mwifiex_adapter { /* spin lock for main process */ spinlock_t main_proc_lock; u32 mwifiex_processing; + u8 more_task_flag; u16 tx_buf_size; u16 curr_tx_buf_size; u32 ioport; @@ -1417,6 +1421,7 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter); +void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index a5828da..4b463c3 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -203,7 +203,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->pcie.reg = data->reg; card->pcie.blksz_fw_dl = data->blksz_fw_dl; card->pcie.tx_buf_size = data->tx_buf_size; - card->pcie.supports_fw_dump = data->supports_fw_dump; + card->pcie.can_dump_fw = data->can_dump_fw; card->pcie.can_ext_scan = data->can_ext_scan; } @@ -498,7 +498,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { /* Allocate skb here so that firmware can DMA data from it */ - skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); + skb = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE, + GFP_KERNEL | GFP_DMA); if (!skb) { dev_err(adapter->dev, "Unable to allocate skb for RX ring.\n"); @@ -1297,7 +1298,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) } } - skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); + skb_tmp = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE, + GFP_KERNEL | GFP_DMA); if (!skb_tmp) { dev_err(adapter->dev, "Unable to allocate skb.\n"); @@ -2271,7 +2273,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) int ret; static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; - if (!card->pcie.supports_fw_dump) + if (!card->pcie.can_dump_fw) return; for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index 666d40e..0e7ee8b 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -205,7 +205,7 @@ struct mwifiex_pcie_device { const struct mwifiex_pcie_card_reg *reg; u16 blksz_fw_dl; u16 tx_buf_size; - bool supports_fw_dump; + bool can_dump_fw; bool can_ext_scan; }; @@ -214,7 +214,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = { .reg = &mwifiex_reg_8766, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, - .supports_fw_dump = false, + .can_dump_fw = false, .can_ext_scan = true, }; @@ -223,7 +223,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = { .reg = &mwifiex_reg_8897, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, - .supports_fw_dump = true, + .can_dump_fw = true, .can_ext_scan = true, }; diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 91e36cd..57d85ab 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -105,8 +105,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->tx_buf_size = data->tx_buf_size; card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size; card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size; - card->supports_fw_dump = data->supports_fw_dump; - card->auto_tdls = data->auto_tdls; + card->can_dump_fw = data->can_dump_fw; + card->can_auto_tdls = data->can_auto_tdls; card->can_ext_scan = data->can_ext_scan; } @@ -1357,7 +1357,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) return -1; rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); - skb = dev_alloc_skb(rx_len); + skb = mwifiex_alloc_rx_buf(rx_len, GFP_KERNEL | GFP_DMA); if (!skb) return -1; @@ -1454,7 +1454,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) } rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); - skb = dev_alloc_skb(rx_len); + skb = mwifiex_alloc_rx_buf(rx_len, + GFP_KERNEL | GFP_DMA); if (!skb) { dev_err(adapter->dev, "%s: failed to alloc skb", @@ -1887,7 +1888,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) return -1; } - adapter->auto_tdls = card->auto_tdls; + adapter->auto_tdls = card->can_auto_tdls; adapter->ext_scan = card->can_ext_scan; return ret; } @@ -2032,7 +2033,7 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work) mwifiex_dump_drv_info(adapter); - if (!card->supports_fw_dump) + if (!card->can_dump_fw) return; for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 957cca2..c636944 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -238,9 +238,6 @@ struct sdio_mmc_card { const struct mwifiex_sdio_card_reg *reg; u8 max_ports; u8 mp_agg_pkt_limit; - bool supports_sdio_new_mode; - bool has_control_mask; - bool supports_fw_dump; u16 tx_buf_size; u32 mp_tx_agg_buf_size; u32 mp_rx_agg_buf_size; @@ -255,7 +252,10 @@ struct sdio_mmc_card { u8 curr_wr_port; u8 *mp_regs; - u8 auto_tdls; + bool supports_sdio_new_mode; + bool has_control_mask; + bool can_dump_fw; + bool can_auto_tdls; bool can_ext_scan; struct mwifiex_sdio_mpa_tx mpa_tx; @@ -267,13 +267,13 @@ struct mwifiex_sdio_device { const struct mwifiex_sdio_card_reg *reg; u8 max_ports; u8 mp_agg_pkt_limit; - bool supports_sdio_new_mode; - bool has_control_mask; - bool supports_fw_dump; u16 tx_buf_size; u32 mp_tx_agg_buf_size; u32 mp_rx_agg_buf_size; - u8 auto_tdls; + bool supports_sdio_new_mode; + bool has_control_mask; + bool can_dump_fw; + bool can_auto_tdls; bool can_ext_scan; }; @@ -412,13 +412,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { .reg = &mwifiex_reg_sd87xx, .max_ports = 16, .mp_agg_pkt_limit = 8, - .supports_sdio_new_mode = false, - .has_control_mask = true, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, - .supports_fw_dump = false, - .auto_tdls = false, + .supports_sdio_new_mode = false, + .has_control_mask = true, + .can_dump_fw = false, + .can_auto_tdls = false, .can_ext_scan = false, }; @@ -427,13 +427,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { .reg = &mwifiex_reg_sd87xx, .max_ports = 16, .mp_agg_pkt_limit = 8, - .supports_sdio_new_mode = false, - .has_control_mask = true, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, - .supports_fw_dump = false, - .auto_tdls = false, + .supports_sdio_new_mode = false, + .has_control_mask = true, + .can_dump_fw = false, + .can_auto_tdls = false, .can_ext_scan = true, }; @@ -442,13 +442,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { .reg = &mwifiex_reg_sd87xx, .max_ports = 16, .mp_agg_pkt_limit = 8, - .supports_sdio_new_mode = false, - .has_control_mask = true, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, - .supports_fw_dump = false, - .auto_tdls = false, + .supports_sdio_new_mode = false, + .has_control_mask = true, + .can_dump_fw = false, + .can_auto_tdls = false, .can_ext_scan = true, }; @@ -457,13 +457,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .reg = &mwifiex_reg_sd8897, .max_ports = 32, .mp_agg_pkt_limit = 16, - .supports_sdio_new_mode = true, - .has_control_mask = false, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, - .supports_fw_dump = true, - .auto_tdls = false, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .can_dump_fw = true, + .can_auto_tdls = false, .can_ext_scan = true, }; @@ -472,13 +472,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { .reg = &mwifiex_reg_sd8887, .max_ports = 32, .mp_agg_pkt_limit = 16, - .supports_sdio_new_mode = true, - .has_control_mask = false, - .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, - .supports_fw_dump = false, - .auto_tdls = true, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .can_dump_fw = false, + .can_auto_tdls = true, .can_ext_scan = true, }; @@ -492,8 +492,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = { .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, - .supports_fw_dump = false, - .auto_tdls = false, + .can_dump_fw = false, + .can_auto_tdls = false, .can_ext_scan = true, }; diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 80ffe741..64c4223 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -135,7 +135,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, GFP_KERNEL); } - memset(priv->cfg_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->cfg_bssid); mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index ac93557..ea4549f 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -80,11 +80,13 @@ EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { - int ret = -1; + int hroom, ret = -1; struct mwifiex_adapter *adapter = priv->adapter; u8 *head_ptr; struct txpd *local_tx_pd = NULL; + hroom = (adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN; + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) head_ptr = mwifiex_process_uap_txpd(priv, skb); else @@ -92,11 +94,9 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, if (head_ptr) { if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) - local_tx_pd = - (struct txpd *) (head_ptr + INTF_HEADER_LEN); + local_tx_pd = (struct txpd *)(head_ptr + hroom); if (adapter->iface_type == MWIFIEX_USB) { adapter->data_sent = true; - skb_pull(skb, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, skb, NULL); diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 3085506..2148a57 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -367,6 +367,13 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, if (!skb) return -1; + if (!priv->mgmt_frame_mask || + priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) { + dev_dbg(priv->adapter->dev, + "do not receive mgmt frames on uninitialized intf"); + return -1; + } + rx_pd = (struct rxpd *)skb->data; skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset)); @@ -624,3 +631,26 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv) for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++) atomic_set(&phist_data->sig_str[ix], 0); } + +void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags) +{ + struct sk_buff *skb; + int buf_len, pad; + + buf_len = rx_len + MWIFIEX_RX_HEADROOM + MWIFIEX_DMA_ALIGN_SZ; + + skb = __dev_alloc_skb(buf_len, flags); + + if (!skb) + return NULL; + + skb_reserve(skb, MWIFIEX_RX_HEADROOM); + + pad = MWIFIEX_ALIGN_ADDR(skb->data, MWIFIEX_DMA_ALIGN_SZ) - + (long)skb->data; + + skb_reserve(skb, pad); + + return skb; +} +EXPORT_SYMBOL_GPL(mwifiex_alloc_rx_buf); diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index ef717ac..0cd4f6be 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -730,7 +730,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, } else { memcpy(ra, skb->data, ETH_ALEN); if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb)) - memset(ra, 0xff, ETH_ALEN); + eth_broadcast_addr(ra); ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index f9b1218..9592116 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1277,7 +1277,7 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, struct mwl8k_priv *priv = hw->priv; priv->capture_beacon = false; - memset(priv->capture_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->capture_bssid); /* * Use GFP_ATOMIC as rxq_process is called from diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 6abdaf0..1d4dae4 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c @@ -168,7 +168,7 @@ static int orinoco_ioctl_setwap(struct net_device *dev, if (is_zero_ether_addr(ap_addr->sa_data) || is_broadcast_ether_addr(ap_addr->sa_data)) { priv->bssid_fixed = 0; - memset(priv->desired_bssid, 0, ETH_ALEN); + eth_zero_addr(priv->desired_bssid); /* "off" means keep existing connection */ if (ap_addr->sa_data[0] == 0) { diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index 5367d51..275408e 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c @@ -671,7 +671,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, if (addr) memcpy(rxkey->mac, addr, ETH_ALEN); else - memset(rxkey->mac, ~0, ETH_ALEN); + eth_broadcast_addr(rxkey->mac); switch (algo) { case P54_CRYPTO_WEP: diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index b9250d75..e79674f 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -182,7 +182,7 @@ static int p54_start(struct ieee80211_hw *dev) if (err) goto out; - memset(priv->bssid, ~0, ETH_ALEN); + eth_broadcast_addr(priv->bssid); priv->mode = NL80211_IFTYPE_MONITOR; err = p54_setup_mac(priv); if (err) { @@ -274,8 +274,8 @@ static void p54_remove_interface(struct ieee80211_hw *dev, wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); } priv->mode = NL80211_IFTYPE_MONITOR; - memset(priv->mac_addr, 0, ETH_ALEN); - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->mac_addr); + eth_zero_addr(priv->bssid); p54_setup_mac(priv); mutex_unlock(&priv->conf_mutex); } @@ -794,7 +794,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len) init_completion(&priv->beacon_comp); INIT_DELAYED_WORK(&priv->work, p54_work); - memset(&priv->mc_maclist[0], ~0, ETH_ALEN); + eth_broadcast_addr(priv->mc_maclist[0]); priv->curchan = NULL; p54_reset_stats(priv); return dev; diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 8330fa3..477f863 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -808,7 +808,7 @@ static int ray_dev_init(struct net_device *dev) /* copy mac and broadcast addresses to linux device */ memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); - memset(dev->broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(dev->broadcast); dev_dbg(&link->dev, "ray_dev_init ending\n"); return 0; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 60d44ce..d72ff8e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -199,13 +199,13 @@ enum ndis_80211_pmkid_cand_list_flag_bits { struct ndis_80211_auth_request { __le32 length; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[2]; __le32 flags; } __packed; struct ndis_80211_pmkid_candidate { - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[2]; __le32 flags; } __packed; @@ -248,7 +248,7 @@ struct ndis_80211_conf { struct ndis_80211_bssid_ex { __le32 length; - u8 mac[6]; + u8 mac[ETH_ALEN]; u8 padding[2]; struct ndis_80211_ssid ssid; __le32 privacy; @@ -283,7 +283,7 @@ struct ndis_80211_key { __le32 size; __le32 index; __le32 length; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[6]; u8 rsc[8]; u8 material[32]; @@ -292,7 +292,7 @@ struct ndis_80211_key { struct ndis_80211_remove_key { __le32 size; __le32 index; - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 padding[2]; } __packed; @@ -310,7 +310,7 @@ struct ndis_80211_assoc_info { struct req_ie { __le16 capa; __le16 listen_interval; - u8 cur_ap_address[6]; + u8 cur_ap_address[ETH_ALEN]; } req_ie; __le32 req_ie_length; __le32 offset_req_ies; @@ -338,7 +338,7 @@ struct ndis_80211_capability { } __packed; struct ndis_80211_bssid_info { - u8 bssid[6]; + u8 bssid[ETH_ALEN]; u8 pmkid[16]; } __packed; @@ -1037,7 +1037,7 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) bssid, &len); if (ret != 0) - memset(bssid, 0, ETH_ALEN); + eth_zero_addr(bssid); return ret; } @@ -1391,7 +1391,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, priv->encr_keys[index].len = key_len; priv->encr_keys[index].cipher = cipher; memcpy(&priv->encr_keys[index].material, key, key_len); - memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->encr_keys[index].bssid); return 0; } @@ -1466,7 +1466,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, } else { /* group key */ if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) - memset(ndis_key.bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(ndis_key.bssid); else get_bssid(usbdev, ndis_key.bssid); } @@ -1486,7 +1486,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); else - memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + eth_broadcast_addr(priv->encr_keys[index].bssid); if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) priv->encr_tx_key_index = index; @@ -2280,7 +2280,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); priv->connected = false; - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); return deauthenticate(usbdev); } @@ -2392,7 +2392,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev) netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); priv->connected = false; - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); return deauthenticate(usbdev); } @@ -2857,7 +2857,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) if (priv->connected) { priv->connected = false; - memset(priv->bssid, 0, ETH_ALEN); + eth_zero_addr(priv->bssid); deauthenticate(usbdev); diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h index c6cb49c..dee4ac2 100644 --- a/drivers/net/wireless/rtlwifi/base.h +++ b/drivers/net/wireless/rtlwifi/base.h @@ -45,9 +45,6 @@ enum ap_peer { #define RTL_TX_DESC_SIZE 32 #define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE) -#define HT_AMSDU_SIZE_4K 3839 -#define HT_AMSDU_SIZE_8K 7935 - #define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */ #define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */ @@ -61,9 +58,6 @@ enum ap_peer { #define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS9 390 /* Mbps */ #define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS7 293 /* Mbps */ -#define RTL_RATE_COUNT_LEGACY 12 -#define RTL_CHANNEL_COUNT 14 - #define FRAME_OFFSET_FRAME_CONTROL 0 #define FRAME_OFFSET_DURATION 2 #define FRAME_OFFSET_ADDRESS1 4 diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h index 3550808..e2e647d 100644 --- a/drivers/net/wireless/rtlwifi/cam.h +++ b/drivers/net/wireless/rtlwifi/cam.h @@ -28,13 +28,11 @@ #define CAM_CONTENT_COUNT 8 -#define CFG_DEFAULT_KEY BIT(5) #define CFG_VALID BIT(15) #define PAIRWISE_KEYIDX 0 #define CAM_PAIRWISE_KEY_POSITION 4 -#define CAM_CONFIG_USEDK 1 #define CAM_CONFIG_NO_USEDK 0 void rtl_cam_reset_all_entry(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index a31a127..3b3a88b 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -195,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) if (!(support_remote_wakeup && rtlhal->enter_pnp_sleep)) { mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, 6); + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; /* reset sec info */ @@ -357,7 +357,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, mac->p2p = 0; mac->vif = NULL; mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, ETH_ALEN); + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; mac->opmode = NL80211_IFTYPE_UNSPECIFIED; rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); @@ -1157,7 +1157,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, ETH_ALEN); + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; mac->mode = 0; diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h index 7b64e34..82733c6 100644 --- a/drivers/net/wireless/rtlwifi/core.h +++ b/drivers/net/wireless/rtlwifi/core.h @@ -33,8 +33,6 @@ FIF_FCSFAIL | \ FIF_BCN_PRBRESP_PROMISC) -#define RTL_SUPPORTED_CTRL_FILTER 0xFF - #define DM_DIG_THRESH_HIGH 40 #define DM_DIG_THRESH_LOW 35 #define DM_FALSEALARM_THRESH_LOW 400 diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h index fdab824..be02e78 100644 --- a/drivers/net/wireless/rtlwifi/efuse.h +++ b/drivers/net/wireless/rtlwifi/efuse.h @@ -40,12 +40,6 @@ #define PG_STATE_WORD_3 0x10 #define PG_STATE_DATA 0x20 -#define PG_SWBYTE_H 0x01 -#define PG_SWBYTE_L 0x02 - -#define _POWERON_DELAY_ -#define _PRE_EXECUTE_READ_CMD_ - #define EFUSE_REPEAT_THRESHOLD_ 3 #define EFUSE_ERROE_HANDLE 1 diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h index d9ea9d0..0532b98 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h @@ -26,53 +26,12 @@ #ifndef __RTL92C_DEF_H__ #define __RTL92C_DEF_H__ -#define HAL_RETRY_LIMIT_INFRA 48 -#define HAL_RETRY_LIMIT_AP_ADHOC 7 - -#define RESET_DELAY_8185 20 - -#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) -#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) - -#define NUM_OF_FIRMWARE_QUEUE 10 -#define NUM_OF_PAGES_IN_FW 0x100 -#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 - -#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 - -#define MAX_LINES_HWCONFIG_TXT 1000 -#define MAX_BYTES_LINE_HWCONFIG_TXT 256 - -#define SW_THREE_WIRE 0 -#define HW_THREE_WIRE 2 - -#define BT_DEMO_BOARD 0 -#define BT_QA_BOARD 1 -#define BT_FPGA 2 - #define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 #define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define MAX_H2C_QUEUE_NUM 10 - #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 -#define AC2QUEUEID(_AC) (_AC) #define C2H_RX_CMD_HDR_LEN 8 #define GET_C2H_CMD_CMD_LEN(__prxhdr) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c index f2b9713..edc2cbb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c @@ -566,7 +566,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~ACMHW_BEQEN); + acm_ctrl &= (~ACMHW_VOQEN); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c index 3f6c59c..a2bb02c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c @@ -452,9 +452,10 @@ static void handle_branch1(struct ieee80211_hw *hw, u16 arraylen, READ_NEXT_PAIR(v1, v2, i); while (v2 != 0xDEAD && v2 != 0xCDEF && - v2 != 0xCDCD && i < arraylen - 2) + v2 != 0xCDCD && i < arraylen - 2) { _rtl8188e_config_bb_reg(hw, v1, v2); READ_NEXT_PAIR(v1, v2, i); + } while (v2 != 0xDEAD && i < arraylen - 2) READ_NEXT_PAIR(v1, v2, i); diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h index 5c1472d..0eca030 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h @@ -27,7 +27,6 @@ #define __RTL92C_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h index 9b660df..690a7a1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h @@ -30,59 +30,18 @@ #ifndef __RTL92C_DEF_H__ #define __RTL92C_DEF_H__ -#define HAL_RETRY_LIMIT_INFRA 48 -#define HAL_RETRY_LIMIT_AP_ADHOC 7 - #define PHY_RSSI_SLID_WIN_MAX 100 #define PHY_LINKQUALITY_SLID_WIN_MAX 20 #define PHY_BEACON_RSSI_SLID_WIN_MAX 10 -#define RESET_DELAY_8185 20 - -#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) -#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) - -#define NUM_OF_FIRMWARE_QUEUE 10 -#define NUM_OF_PAGES_IN_FW 0x100 -#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 - -#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 - -#define MAX_LINES_HWCONFIG_TXT 1000 -#define MAX_BYTES_LINE_HWCONFIG_TXT 256 - -#define SW_THREE_WIRE 0 -#define HW_THREE_WIRE 2 - -#define BT_DEMO_BOARD 0 -#define BT_QA_BOARD 1 -#define BT_FPGA 2 - #define RX_SMOOTH_FACTOR 20 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 #define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define MAX_H2C_QUEUE_NUM 10 - #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 -#define AC2QUEUEID(_AC) (_AC) #define C2H_RX_CMD_HDR_LEN 8 #define GET_C2H_CMD_CMD_LEN(__prxhdr) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index 303b299..04eb5c3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -363,7 +363,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_ViqEn); break; case AC3_VO: - acm_ctrl &= (~AcmHw_BeqEn); + acm_ctrl &= (~AcmHw_VoqEn); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h index d8fe68b..ebd72ca 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h @@ -31,7 +31,6 @@ #define __RTL92C_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F #define RF6052_MAX_PATH 2 void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index fe4b699..0c20dd7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1589,6 +1589,8 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HW_VAR_DATA_FILTER: *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2); break; + case HAL_DEF_WOWLAN: + break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); @@ -1871,7 +1873,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_ViqEn); break; case AC3_VO: - acm_ctrl &= (~AcmHw_BeqEn); + acm_ctrl &= (~AcmHw_VoqEn); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h index c1e33b0..6758808 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h @@ -32,8 +32,6 @@ #define H2C_RA_MASK 6 -#define LLT_POLLING_LLT_THRESHOLD 20 -#define LLT_POLLING_READY_TIMEOUT_COUNT 100 #define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 #define RX_PAGE_SIZE_REG_VALUE PBP_128 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h index 11b439d..6f987de 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h @@ -31,7 +31,6 @@ #define __RTL92CU_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F #define RF6052_MAX_PATH 2 void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h index 939c905..0a443ed 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h @@ -35,61 +35,22 @@ #define MAX_MSS_DENSITY_1T 0x0A #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F #define RF6052_MAX_PATH 2 -#define HAL_RETRY_LIMIT_INFRA 48 -#define HAL_RETRY_LIMIT_AP_ADHOC 7 - #define PHY_RSSI_SLID_WIN_MAX 100 #define PHY_LINKQUALITY_SLID_WIN_MAX 20 #define PHY_BEACON_RSSI_SLID_WIN_MAX 10 -#define RESET_DELAY_8185 20 - -#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) #define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) -#define NUM_OF_FIRMWARE_QUEUE 10 -#define NUM_OF_PAGES_IN_FW 0x100 -#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 - -#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 - -#define MAX_LINES_HWCONFIG_TXT 1000 -#define MAX_BYTES_LINE_HWCONFIG_TXT 256 - -#define SW_THREE_WIRE 0 -#define HW_THREE_WIRE 2 - -#define BT_DEMO_BOARD 0 -#define BT_QA_BOARD 1 -#define BT_FPGA 2 - #define RX_SMOOTH_FACTOR 20 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 #define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define MAX_H2C_QUEUE_NUM 10 - #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 #define C2H_RX_CMD_HDR_LEN 8 #define GET_C2H_CMD_CMD_LEN(__prxhdr) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c index b461b31..db230a3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c @@ -562,7 +562,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~ACMHW_BEQEN); + acm_ctrl &= (~ACMHW_VOQEN); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h index 8bdeed3..039c013 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h @@ -27,7 +27,6 @@ #define __RTL92E_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h index ef87c09..41466f9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h @@ -31,7 +31,6 @@ #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 #define SHORT_SLOT_TIME 9 #define NON_SHORT_SLOT_TIME 20 diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c index 5761d5b..dee88a8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c @@ -293,7 +293,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_ViqEn); break; case AC3_VO: - acm_ctrl &= (~AcmHw_BeqEn); + acm_ctrl &= (~AcmHw_VoqEn); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h index 94bdd4b..bcdf227 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h @@ -26,53 +26,12 @@ #ifndef __RTL8723E_DEF_H__ #define __RTL8723E_DEF_H__ -#define HAL_RETRY_LIMIT_INFRA 48 -#define HAL_RETRY_LIMIT_AP_ADHOC 7 - -#define RESET_DELAY_8185 20 - -#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) -#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) - -#define NUM_OF_FIRMWARE_QUEUE 10 -#define NUM_OF_PAGES_IN_FW 0x100 -#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 - -#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 - -#define MAX_LINES_HWCONFIG_TXT 1000 -#define MAX_BYTES_LINE_HWCONFIG_TXT 256 - -#define SW_THREE_WIRE 0 -#define HW_THREE_WIRE 2 - -#define BT_DEMO_BOARD 0 -#define BT_QA_BOARD 1 -#define BT_FPGA 2 - #define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 #define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define MAX_H2C_QUEUE_NUM 10 - #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 -#define AC2QUEUEID(_AC) (_AC) #define C2H_RX_CMD_HDR_LEN 8 #define GET_C2H_CMD_CMD_LEN(__prxhdr) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c index aa08546..b3b0947 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c @@ -362,7 +362,7 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~ACMHW_BEQEN); + acm_ctrl &= (~ACMHW_VOQEN); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h index f3f45b1..7b44ebc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h @@ -27,7 +27,6 @@ #define __RTL8723E_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c index 6dad28e..b469983 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c @@ -603,7 +603,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~ACMHW_BEQEN); + acm_ctrl &= (~ACMHW_VOQEN); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h index a6fea10..f423e15 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h @@ -27,7 +27,6 @@ #define __RTL8723BE_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h index ee7c208..dfbdf53 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h @@ -118,55 +118,14 @@ #define WIFI_NAV_UPPER_US 30000 #define HAL_92C_NAV_UPPER_UNIT 128 -#define HAL_RETRY_LIMIT_INFRA 48 -#define HAL_RETRY_LIMIT_AP_ADHOC 7 - -#define RESET_DELAY_8185 20 - -#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) -#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) - -#define NUM_OF_FIRMWARE_QUEUE 10 -#define NUM_OF_PAGES_IN_FW 0x100 -#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 - -#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 - #define MAX_RX_DMA_BUFFER_SIZE 0x3E80 -#define MAX_LINES_HWCONFIG_TXT 1000 -#define MAX_BYTES_LINE_HWCONFIG_TXT 256 - -#define SW_THREE_WIRE 0 -#define HW_THREE_WIRE 2 - -#define BT_DEMO_BOARD 0 -#define BT_QA_BOARD 1 -#define BT_FPGA 2 - #define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 #define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define MAX_H2C_QUEUE_NUM 10 - #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 -#define AC2QUEUEID(_AC) (_AC) #define MAX_RX_DMA_BUFFER_SIZE_8812 0x3E80 diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 8ec8200..2a0a71b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -667,7 +667,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~ACMHW_BEQEN); + acm_ctrl &= (~ACMHW_VOQEN); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, @@ -1515,7 +1515,7 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary, (u8 *)(&support_remote_wakeup)); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "boundary=0x%#X, NPQ_RQPNValue=0x%#X, RQPNValue=0x%#X\n", + "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n", boundary, npq_rqpn_value, rqpn_val); /* stop PCIe DMA diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h index d9582ee..efd22bd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h @@ -27,7 +27,6 @@ #define __RTL8821AE_RF_H__ #define RF6052_MAX_TX_PWR 0x3F -#define RF6052_MAX_REG 0x3F void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 46ee956..f0188c8 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -701,12 +701,18 @@ free: static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) { + struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); + cancel_work_sync(&rtlpriv->works.lps_change_work); + + flush_workqueue(rtlpriv->works.rtl_wq); + destroy_workqueue(rtlpriv->works.rtl_wq); + skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { @@ -794,8 +800,6 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw) struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; - SET_USB_STOP(rtlusb); - /* clean up rx stuff. */ _rtl_usb_cleanup_rx(hw); @@ -834,7 +838,6 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); - rtl_usb_deinit(hw); rtlpriv->cfg->ops->hw_disable(hw); } @@ -1147,9 +1150,9 @@ void rtl_usb_disconnect(struct usb_interface *intf) if (unlikely(!rtlpriv)) return; - /* just in case driver is removed before firmware callback */ wait_for_completion(&rtlpriv->firmware_loading_complete); + clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index d4ba009..d1e9a13 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -468,7 +468,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) wl1251_tx_flush(wl); wl1251_power_off(wl); - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); wl->listen_int = 1; wl->bss_type = MAX_BSS_TYPE; @@ -547,7 +547,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); wl->vif = NULL; - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); mutex_unlock(&wl->mutex); } diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index c26fc21..68919f8 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -367,7 +367,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) wl->links[*hlid].allocated_pkts = 0; wl->links[*hlid].prev_freed_pkts = 0; wl->links[*hlid].ba_bitmap = 0; - memset(wl->links[*hlid].addr, 0, ETH_ALEN); + eth_zero_addr(wl->links[*hlid].addr); /* * At this point op_tx() will not add more packets to the queues. We @@ -1293,7 +1293,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif) hdr->frame_control = cpu_to_le16(fc); memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); memcpy(hdr->addr2, vif->addr, ETH_ALEN); - memset(hdr->addr3, 0xff, ETH_ALEN); + eth_broadcast_addr(hdr->addr3); ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP, skb->data, skb->len, 0, diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 3aa8648..d2ada7c 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -437,7 +437,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, * stolen by an Ethernet bridge for STP purposes. * (FE:FF:FF:FF:FF:FF) */ - memset(dev->dev_addr, 0xFF, ETH_ALEN); + eth_broadcast_addr(dev->dev_addr); dev->dev_addr[0] &= ~0x01; netif_carrier_off(dev); diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index f1b5111..b2837b1 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -57,17 +57,6 @@ config SMSGIUCV_EVENT To compile as a module, choose M. The module name is "smsgiucv_app". -config CLAW - def_tristate m - prompt "CLAW device support" - depends on CCW && NETDEVICES - help - This driver supports channel attached CLAW devices. - CLAW is Common Link Access for Workstation. Common devices - that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. - To compile as a module, choose M. The module name is claw. - To compile into the kernel, choose Y. - config QETH def_tristate y prompt "Gigabit Ethernet device support" @@ -106,6 +95,6 @@ config QETH_IPV6 config CCWGROUP tristate - default (LCS || CTCM || QETH || CLAW) + default (LCS || CTCM || QETH) endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index d28f05d..c351b07 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o obj-$(CONFIG_LCS) += lcs.o -obj-$(CONFIG_CLAW) += claw.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c deleted file mode 100644 index d609ca0..0000000 --- a/drivers/s390/net/claw.c +++ /dev/null @@ -1,3377 +0,0 @@ -/* - * ESCON CLAW network driver - * - * Linux for zSeries version - * Copyright IBM Corp. 2002, 2009 - * Author(s) Original code written by: - * Kazuo Iimura <iimura@jp.ibm.com> - * Rewritten by - * Andy Richter <richtera@us.ibm.com> - * Marc Price <mwprice@us.ibm.com> - * - * sysfs parms: - * group x.x.rrrr,x.x.wwww - * read_buffer nnnnnnn - * write_buffer nnnnnn - * host_name aaaaaaaa - * adapter_name aaaaaaaa - * api_type aaaaaaaa - * - * eg. - * group 0.0.0200 0.0.0201 - * read_buffer 25 - * write_buffer 20 - * host_name LINUX390 - * adapter_name RS6K - * api_type TCPIP - * - * where - * - * The device id is decided by the order entries - * are added to the group the first is claw0 the second claw1 - * up to CLAW_MAX_DEV - * - * rrrr - the first of 2 consecutive device addresses used for the - * CLAW protocol. - * The specified address is always used as the input (Read) - * channel and the next address is used as the output channel. - * - * wwww - the second of 2 consecutive device addresses used for - * the CLAW protocol. - * The specified address is always used as the output - * channel and the previous address is used as the input channel. - * - * read_buffer - specifies number of input buffers to allocate. - * write_buffer - specifies number of output buffers to allocate. - * host_name - host name - * adaptor_name - adaptor name - * api_type - API type TCPIP or API will be sent and expected - * as ws_name - * - * Note the following requirements: - * 1) host_name must match the configured adapter_name on the remote side - * 2) adaptor_name must match the configured host name on the remote side - * - * Change History - * 1.00 Initial release shipped - * 1.10 Changes for Buffer allocation - * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower - * 1.25 Added Packing support - * 1.5 - */ - -#define KMSG_COMPONENT "claw" - -#include <asm/ccwdev.h> -#include <asm/ccwgroup.h> -#include <asm/debug.h> -#include <asm/idals.h> -#include <asm/io.h> -#include <linux/bitops.h> -#include <linux/ctype.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/if_arp.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ip.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/proc_fs.h> -#include <linux/sched.h> -#include <linux/signal.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/tcp.h> -#include <linux/timer.h> -#include <linux/types.h> - -#include "claw.h" - -/* - CLAW uses the s390dbf file system see claw_trace and claw_setup -*/ - -static char version[] __initdata = "CLAW driver"; -static char debug_buffer[255]; -/** - * Debug Facility Stuff - */ -static debug_info_t *claw_dbf_setup; -static debug_info_t *claw_dbf_trace; - -/** - * CLAW Debug Facility functions - */ -static void -claw_unregister_debug_facility(void) -{ - debug_unregister(claw_dbf_setup); - debug_unregister(claw_dbf_trace); -} - -static int -claw_register_debug_facility(void) -{ - claw_dbf_setup = debug_register("claw_setup", 2, 1, 8); - claw_dbf_trace = debug_register("claw_trace", 2, 2, 8); - if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) { - claw_unregister_debug_facility(); - return -ENOMEM; - } - debug_register_view(claw_dbf_setup, &debug_hex_ascii_view); - debug_set_level(claw_dbf_setup, 2); - debug_register_view(claw_dbf_trace, &debug_hex_ascii_view); - debug_set_level(claw_dbf_trace, 2); - return 0; -} - -static inline void -claw_set_busy(struct net_device *dev) -{ - ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; -} - -static inline void -claw_clear_busy(struct net_device *dev) -{ - clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); - netif_wake_queue(dev); -} - -static inline int -claw_check_busy(struct net_device *dev) -{ - return ((struct claw_privbk *) dev->ml_priv)->tbusy; -} - -static inline void -claw_setbit_busy(int nr,struct net_device *dev) -{ - netif_stop_queue(dev); - set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); -} - -static inline void -claw_clearbit_busy(int nr,struct net_device *dev) -{ - clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); - netif_wake_queue(dev); -} - -static inline int -claw_test_and_setbit_busy(int nr,struct net_device *dev) -{ - netif_stop_queue(dev); - return test_and_set_bit(nr, - (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy)); -} - - -/* Functions for the DEV methods */ - -static int claw_probe(struct ccwgroup_device *cgdev); -static void claw_remove_device(struct ccwgroup_device *cgdev); -static void claw_purge_skb_queue(struct sk_buff_head *q); -static int claw_new_device(struct ccwgroup_device *cgdev); -static int claw_shutdown_device(struct ccwgroup_device *cgdev); -static int claw_tx(struct sk_buff *skb, struct net_device *dev); -static int claw_change_mtu( struct net_device *dev, int new_mtu); -static int claw_open(struct net_device *dev); -static void claw_irq_handler(struct ccw_device *cdev, - unsigned long intparm, struct irb *irb); -static void claw_irq_tasklet ( unsigned long data ); -static int claw_release(struct net_device *dev); -static void claw_write_retry ( struct chbk * p_ch ); -static void claw_write_next ( struct chbk * p_ch ); -static void claw_timer ( struct chbk * p_ch ); - -/* Functions */ -static int add_claw_reads(struct net_device *dev, - struct ccwbk* p_first, struct ccwbk* p_last); -static void ccw_check_return_code (struct ccw_device *cdev, int return_code); -static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); -static int find_link(struct net_device *dev, char *host_name, char *ws_name ); -static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); -static int init_ccw_bk(struct net_device *dev); -static void probe_error( struct ccwgroup_device *cgdev); -static struct net_device_stats *claw_stats(struct net_device *dev); -static int pages_to_order_of_mag(int num_of_pages); -static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); -/* sysfs Functions */ -static ssize_t claw_hname_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_hname_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_adname_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_adname_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_apname_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_apname_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_wbuff_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_wbuff_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t claw_rbuff_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t claw_rbuff_write(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); - -/* Functions for System Validate */ -static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); -static int claw_send_control(struct net_device *dev, __u8 type, __u8 link, - __u8 correlator, __u8 rc , char *local_name, char *remote_name); -static int claw_snd_conn_req(struct net_device *dev, __u8 link); -static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl); -static int claw_snd_sys_validate_rsp(struct net_device *dev, - struct clawctl * p_ctl, __u32 return_code); -static int claw_strt_conn_req(struct net_device *dev ); -static void claw_strt_read(struct net_device *dev, int lock); -static void claw_strt_out_IO(struct net_device *dev); -static void claw_free_wrt_buf(struct net_device *dev); - -/* Functions for unpack reads */ -static void unpack_read(struct net_device *dev); - -static int claw_pm_prepare(struct ccwgroup_device *gdev) -{ - return -EPERM; -} - -/* the root device for claw group devices */ -static struct device *claw_root_dev; - -/* ccwgroup table */ - -static struct ccwgroup_driver claw_group_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "claw", - }, - .setup = claw_probe, - .remove = claw_remove_device, - .set_online = claw_new_device, - .set_offline = claw_shutdown_device, - .prepare = claw_pm_prepare, -}; - -static struct ccw_device_id claw_ids[] = { - {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, claw_ids); - -static struct ccw_driver claw_ccw_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "claw", - }, - .ids = claw_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, - .int_class = IRQIO_CLW, -}; - -static ssize_t claw_driver_group_store(struct device_driver *ddrv, - const char *buf, size_t count) -{ - int err; - err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf); - return err ? err : count; -} -static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); - -static struct attribute *claw_drv_attrs[] = { - &driver_attr_group.attr, - NULL, -}; -static struct attribute_group claw_drv_attr_group = { - .attrs = claw_drv_attrs, -}; -static const struct attribute_group *claw_drv_attr_groups[] = { - &claw_drv_attr_group, - NULL, -}; - -/* -* Key functions -*/ - -/*-------------------------------------------------------------------* - * claw_tx * - *-------------------------------------------------------------------*/ - -static int -claw_tx(struct sk_buff *skb, struct net_device *dev) -{ - int rc; - struct claw_privbk *privptr = dev->ml_priv; - unsigned long saveflags; - struct chbk *p_ch; - - CLAW_DBF_TEXT(4, trace, "claw_tx"); - p_ch = &privptr->channel[WRITE_CHANNEL]; - spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); - rc=claw_hw_tx( skb, dev, 1 ); - spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); - CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc); - if (rc) - rc = NETDEV_TX_BUSY; - else - rc = NETDEV_TX_OK; - return rc; -} /* end of claw_tx */ - -/*------------------------------------------------------------------* - * pack the collect queue into an skb and return it * - * If not packing just return the top skb from the queue * - *------------------------------------------------------------------*/ - -static struct sk_buff * -claw_pack_skb(struct claw_privbk *privptr) -{ - struct sk_buff *new_skb,*held_skb; - struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL]; - struct claw_env *p_env = privptr->p_env; - int pkt_cnt,pk_ind,so_far; - - new_skb = NULL; /* assume no dice */ - pkt_cnt = 0; - CLAW_DBF_TEXT(4, trace, "PackSKBe"); - if (!skb_queue_empty(&p_ch->collect_queue)) { - /* some data */ - held_skb = skb_dequeue(&p_ch->collect_queue); - if (held_skb) - dev_kfree_skb_any(held_skb); - else - return NULL; - if (p_env->packing != DO_PACKED) - return held_skb; - /* get a new SKB we will pack at least one */ - new_skb = dev_alloc_skb(p_env->write_size); - if (new_skb == NULL) { - atomic_inc(&held_skb->users); - skb_queue_head(&p_ch->collect_queue,held_skb); - return NULL; - } - /* we have packed packet and a place to put it */ - pk_ind = 1; - so_far = 0; - new_skb->cb[1] = 'P'; /* every skb on queue has pack header */ - while ((pk_ind) && (held_skb != NULL)) { - if (held_skb->len+so_far <= p_env->write_size-8) { - memcpy(skb_put(new_skb,held_skb->len), - held_skb->data,held_skb->len); - privptr->stats.tx_packets++; - so_far += held_skb->len; - pkt_cnt++; - dev_kfree_skb_any(held_skb); - held_skb = skb_dequeue(&p_ch->collect_queue); - if (held_skb) - atomic_dec(&held_skb->users); - } else { - pk_ind = 0; - atomic_inc(&held_skb->users); - skb_queue_head(&p_ch->collect_queue,held_skb); - } - } - } - CLAW_DBF_TEXT(4, trace, "PackSKBx"); - return new_skb; -} - -/*-------------------------------------------------------------------* - * claw_change_mtu * - * * - *-------------------------------------------------------------------*/ - -static int -claw_change_mtu(struct net_device *dev, int new_mtu) -{ - struct claw_privbk *privptr = dev->ml_priv; - int buff_size; - CLAW_DBF_TEXT(4, trace, "setmtu"); - buff_size = privptr->p_env->write_size; - if ((new_mtu < 60) || (new_mtu > buff_size)) { - return -EINVAL; - } - dev->mtu = new_mtu; - return 0; -} /* end of claw_change_mtu */ - - -/*-------------------------------------------------------------------* - * claw_open * - * * - *-------------------------------------------------------------------*/ -static int -claw_open(struct net_device *dev) -{ - - int rc; - int i; - unsigned long saveflags=0; - unsigned long parm; - struct claw_privbk *privptr; - DECLARE_WAITQUEUE(wait, current); - struct timer_list timer; - struct ccwbk *p_buf; - - CLAW_DBF_TEXT(4, trace, "open"); - privptr = (struct claw_privbk *)dev->ml_priv; - /* allocate and initialize CCW blocks */ - if (privptr->buffs_alloc == 0) { - rc=init_ccw_bk(dev); - if (rc) { - CLAW_DBF_TEXT(2, trace, "openmem"); - return -ENOMEM; - } - } - privptr->system_validate_comp=0; - privptr->release_pend=0; - if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { - privptr->p_env->read_size=DEF_PACK_BUFSIZE; - privptr->p_env->write_size=DEF_PACK_BUFSIZE; - privptr->p_env->packing=PACKING_ASK; - } else { - privptr->p_env->packing=0; - privptr->p_env->read_size=CLAW_FRAME_SIZE; - privptr->p_env->write_size=CLAW_FRAME_SIZE; - } - claw_set_busy(dev); - tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet, - (unsigned long) &privptr->channel[READ_CHANNEL]); - for ( i = 0; i < 2; i++) { - CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); - init_waitqueue_head(&privptr->channel[i].wait); - /* skb_queue_head_init(&p_ch->io_queue); */ - if (i == WRITE_CHANNEL) - skb_queue_head_init( - &privptr->channel[WRITE_CHANNEL].collect_queue); - privptr->channel[i].flag_a = 0; - privptr->channel[i].IO_active = 0; - privptr->channel[i].flag &= ~CLAW_TIMER; - init_timer(&timer); - timer.function = (void *)claw_timer; - timer.data = (unsigned long)(&privptr->channel[i]); - timer.expires = jiffies + 15*HZ; - add_timer(&timer); - spin_lock_irqsave(get_ccwdev_lock( - privptr->channel[i].cdev), saveflags); - parm = (unsigned long) &privptr->channel[i]; - privptr->channel[i].claw_state = CLAW_START_HALT_IO; - rc = 0; - add_wait_queue(&privptr->channel[i].wait, &wait); - rc = ccw_device_halt( - (struct ccw_device *)privptr->channel[i].cdev,parm); - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore( - get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - schedule(); - remove_wait_queue(&privptr->channel[i].wait, &wait); - if(rc != 0) - ccw_check_return_code(privptr->channel[i].cdev, rc); - if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) - del_timer(&timer); - } - if ((((privptr->channel[READ_CHANNEL].last_dstat | - privptr->channel[WRITE_CHANNEL].last_dstat) & - ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || - (((privptr->channel[READ_CHANNEL].flag | - privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) { - dev_info(&privptr->channel[READ_CHANNEL].cdev->dev, - "%s: remote side is not ready\n", dev->name); - CLAW_DBF_TEXT(2, trace, "notrdy"); - - for ( i = 0; i < 2; i++) { - spin_lock_irqsave( - get_ccwdev_lock(privptr->channel[i].cdev), - saveflags); - parm = (unsigned long) &privptr->channel[i]; - privptr->channel[i].claw_state = CLAW_STOP; - rc = ccw_device_halt( - (struct ccw_device *)&privptr->channel[i].cdev, - parm); - spin_unlock_irqrestore( - get_ccwdev_lock(privptr->channel[i].cdev), - saveflags); - if (rc != 0) { - ccw_check_return_code( - privptr->channel[i].cdev, rc); - } - } - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); - if (privptr->p_env->read_size < PAGE_SIZE) { - free_pages((unsigned long)privptr->p_buff_read, - (int)pages_to_order_of_mag( - privptr->p_buff_read_num)); - } - else { - p_buf=privptr->p_read_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread )); - p_buf=p_buf->next; - } - } - if (privptr->p_env->write_size < PAGE_SIZE ) { - free_pages((unsigned long)privptr->p_buff_write, - (int)pages_to_order_of_mag( - privptr->p_buff_write_num)); - } - else { - p_buf=privptr->p_write_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite )); - p_buf=p_buf->next; - } - } - privptr->buffs_alloc = 0; - privptr->channel[READ_CHANNEL].flag = 0x00; - privptr->channel[WRITE_CHANNEL].flag = 0x00; - privptr->p_buff_ccw=NULL; - privptr->p_buff_read=NULL; - privptr->p_buff_write=NULL; - claw_clear_busy(dev); - CLAW_DBF_TEXT(2, trace, "open EIO"); - return -EIO; - } - - /* Send SystemValidate command */ - - claw_clear_busy(dev); - CLAW_DBF_TEXT(4, trace, "openok"); - return 0; -} /* end of claw_open */ - -/*-------------------------------------------------------------------* -* * -* claw_irq_handler * -* * -*--------------------------------------------------------------------*/ -static void -claw_irq_handler(struct ccw_device *cdev, - unsigned long intparm, struct irb *irb) -{ - struct chbk *p_ch = NULL; - struct claw_privbk *privptr = NULL; - struct net_device *dev = NULL; - struct claw_env *p_env; - struct chbk *p_ch_r=NULL; - - CLAW_DBF_TEXT(4, trace, "clawirq"); - /* Bypass all 'unsolicited interrupts' */ - privptr = dev_get_drvdata(&cdev->dev); - if (!privptr) { - dev_warn(&cdev->dev, "An uninitialized CLAW device received an" - " IRQ, c-%02x d-%02x\n", - irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); - CLAW_DBF_TEXT(2, trace, "badirq"); - return; - } - - /* Try to extract channel from driver data. */ - if (privptr->channel[READ_CHANNEL].cdev == cdev) - p_ch = &privptr->channel[READ_CHANNEL]; - else if (privptr->channel[WRITE_CHANNEL].cdev == cdev) - p_ch = &privptr->channel[WRITE_CHANNEL]; - else { - dev_warn(&cdev->dev, "The device is not a CLAW device\n"); - CLAW_DBF_TEXT(2, trace, "badchan"); - return; - } - CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag); - - dev = (struct net_device *) (p_ch->ndev); - p_env=privptr->p_env; - - /* Copy interruption response block. */ - memcpy(p_ch->irb, irb, sizeof(struct irb)); - - /* Check for good subchannel return code, otherwise info message */ - if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { - dev_info(&cdev->dev, - "%s: subchannel check for device: %04x -" - " Sch Stat %02x Dev Stat %02x CPA - %04x\n", - dev->name, p_ch->devno, - irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, - irb->scsw.cmd.cpa); - CLAW_DBF_TEXT(2, trace, "chanchk"); - /* return; */ - } - - /* Check the reason-code of a unit check */ - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) - ccw_check_unit_check(p_ch, irb->ecw[0]); - - /* State machine to bring the connection up, down and to restart */ - p_ch->last_dstat = irb->scsw.cmd.dstat; - - switch (p_ch->claw_state) { - case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) - return; - wake_up(&p_ch->wait); /* wake up claw_release */ - CLAW_DBF_TEXT(4, trace, "stop"); - return; - case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */ - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { - CLAW_DBF_TEXT(4, trace, "haltio"); - return; - } - if (p_ch->flag == CLAW_READ) { - p_ch->claw_state = CLAW_START_READ; - wake_up(&p_ch->wait); /* wake claw_open (READ)*/ - } else if (p_ch->flag == CLAW_WRITE) { - p_ch->claw_state = CLAW_START_WRITE; - /* send SYSTEM_VALIDATE */ - claw_strt_read(dev, LOCK_NO); - claw_send_control(dev, - SYSTEM_VALIDATE_REQUEST, - 0, 0, 0, - p_env->host_name, - p_env->adapter_name); - } else { - dev_warn(&cdev->dev, "The CLAW device received" - " an unexpected IRQ, " - "c-%02x d-%02x\n", - irb->scsw.cmd.cstat, - irb->scsw.cmd.dstat); - return; - } - CLAW_DBF_TEXT(4, trace, "haltio"); - return; - case CLAW_START_READ: - CLAW_DBF_TEXT(4, trace, "ReadIRQ"); - if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - clear_bit(0, (void *)&p_ch->IO_active); - if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || - (p_ch->irb->ecw[0] & 0x40) == 0x40 || - (p_ch->irb->ecw[0]) == 0) { - privptr->stats.rx_errors++; - dev_info(&cdev->dev, - "%s: Restart is required after remote " - "side recovers \n", - dev->name); - } - CLAW_DBF_TEXT(4, trace, "notrdy"); - return; - } - if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && - (p_ch->irb->scsw.cmd.dstat == 0)) { - if (test_and_set_bit(CLAW_BH_ACTIVE, - (void *)&p_ch->flag_a) == 0) - tasklet_schedule(&p_ch->tasklet); - else - CLAW_DBF_TEXT(4, trace, "PCINoBH"); - CLAW_DBF_TEXT(4, trace, "PCI_read"); - return; - } - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { - CLAW_DBF_TEXT(4, trace, "SPend_rd"); - return; - } - clear_bit(0, (void *)&p_ch->IO_active); - claw_clearbit_busy(TB_RETRY, dev); - if (test_and_set_bit(CLAW_BH_ACTIVE, - (void *)&p_ch->flag_a) == 0) - tasklet_schedule(&p_ch->tasklet); - else - CLAW_DBF_TEXT(4, trace, "RdBHAct"); - CLAW_DBF_TEXT(4, trace, "RdIRQXit"); - return; - case CLAW_START_WRITE: - if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - dev_info(&cdev->dev, - "%s: Unit Check Occurred in " - "write channel\n", dev->name); - clear_bit(0, (void *)&p_ch->IO_active); - if (p_ch->irb->ecw[0] & 0x80) { - dev_info(&cdev->dev, - "%s: Resetting Event " - "occurred:\n", dev->name); - init_timer(&p_ch->timer); - p_ch->timer.function = - (void *)claw_write_retry; - p_ch->timer.data = (unsigned long)p_ch; - p_ch->timer.expires = jiffies + 10*HZ; - add_timer(&p_ch->timer); - dev_info(&cdev->dev, - "%s: write connection " - "restarting\n", dev->name); - } - CLAW_DBF_TEXT(4, trace, "rstrtwrt"); - return; - } - if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { - clear_bit(0, (void *)&p_ch->IO_active); - dev_info(&cdev->dev, - "%s: Unit Exception " - "occurred in write channel\n", - dev->name); - } - if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || - (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || - (p_ch->irb->scsw.cmd.stctl == - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { - CLAW_DBF_TEXT(4, trace, "writeUE"); - return; - } - clear_bit(0, (void *)&p_ch->IO_active); - if (claw_test_and_setbit_busy(TB_TX, dev) == 0) { - claw_write_next(p_ch); - claw_clearbit_busy(TB_TX, dev); - claw_clear_busy(dev); - } - p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL]; - if (test_and_set_bit(CLAW_BH_ACTIVE, - (void *)&p_ch_r->flag_a) == 0) - tasklet_schedule(&p_ch_r->tasklet); - CLAW_DBF_TEXT(4, trace, "StWtExit"); - return; - default: - dev_warn(&cdev->dev, - "The CLAW device for %s received an unexpected IRQ\n", - dev->name); - CLAW_DBF_TEXT(2, trace, "badIRQ"); - return; - } - -} /* end of claw_irq_handler */ - - -/*-------------------------------------------------------------------* -* claw_irq_tasklet * -* * -*--------------------------------------------------------------------*/ -static void -claw_irq_tasklet ( unsigned long data ) -{ - struct chbk * p_ch; - struct net_device *dev; - - p_ch = (struct chbk *) data; - dev = (struct net_device *)p_ch->ndev; - CLAW_DBF_TEXT(4, trace, "IRQtask"); - unpack_read(dev); - clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); - CLAW_DBF_TEXT(4, trace, "TskletXt"); - return; -} /* end of claw_irq_bh */ - -/*-------------------------------------------------------------------* -* claw_release * -* * -*--------------------------------------------------------------------*/ -static int -claw_release(struct net_device *dev) -{ - int rc; - int i; - unsigned long saveflags; - unsigned long parm; - struct claw_privbk *privptr; - DECLARE_WAITQUEUE(wait, current); - struct ccwbk* p_this_ccw; - struct ccwbk* p_buf; - - if (!dev) - return 0; - privptr = (struct claw_privbk *)dev->ml_priv; - if (!privptr) - return 0; - CLAW_DBF_TEXT(4, trace, "release"); - privptr->release_pend=1; - claw_setbit_busy(TB_STOP,dev); - for ( i = 1; i >=0 ; i--) { - spin_lock_irqsave( - get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - /* del_timer(&privptr->channel[READ_CHANNEL].timer); */ - privptr->channel[i].claw_state = CLAW_STOP; - privptr->channel[i].IO_active = 0; - parm = (unsigned long) &privptr->channel[i]; - if (i == WRITE_CHANNEL) - claw_purge_skb_queue( - &privptr->channel[WRITE_CHANNEL].collect_queue); - rc = ccw_device_halt (privptr->channel[i].cdev, parm); - if (privptr->system_validate_comp==0x00) /* never opened? */ - init_waitqueue_head(&privptr->channel[i].wait); - add_wait_queue(&privptr->channel[i].wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore( - get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - schedule(); - remove_wait_queue(&privptr->channel[i].wait, &wait); - if (rc != 0) { - ccw_check_return_code(privptr->channel[i].cdev, rc); - } - } - if (privptr->pk_skb != NULL) { - dev_kfree_skb_any(privptr->pk_skb); - privptr->pk_skb = NULL; - } - if(privptr->buffs_alloc != 1) { - CLAW_DBF_TEXT(4, trace, "none2fre"); - return 0; - } - CLAW_DBF_TEXT(4, trace, "freebufs"); - if (privptr->p_buff_ccw != NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); - } - CLAW_DBF_TEXT(4, trace, "freeread"); - if (privptr->p_env->read_size < PAGE_SIZE) { - if (privptr->p_buff_read != NULL) { - free_pages((unsigned long)privptr->p_buff_read, - (int)pages_to_order_of_mag(privptr->p_buff_read_num)); - } - } - else { - p_buf=privptr->p_read_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread )); - p_buf=p_buf->next; - } - } - CLAW_DBF_TEXT(4, trace, "freewrit"); - if (privptr->p_env->write_size < PAGE_SIZE ) { - free_pages((unsigned long)privptr->p_buff_write, - (int)pages_to_order_of_mag(privptr->p_buff_write_num)); - } - else { - p_buf=privptr->p_write_active_first; - while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite )); - p_buf=p_buf->next; - } - } - CLAW_DBF_TEXT(4, trace, "clearptr"); - privptr->buffs_alloc = 0; - privptr->p_buff_ccw=NULL; - privptr->p_buff_read=NULL; - privptr->p_buff_write=NULL; - privptr->system_validate_comp=0; - privptr->release_pend=0; - /* Remove any writes that were pending and reset all reads */ - p_this_ccw=privptr->p_read_active_first; - while (p_this_ccw!=NULL) { - p_this_ccw->header.length=0xffff; - p_this_ccw->header.opcode=0xff; - p_this_ccw->header.flag=0x00; - p_this_ccw=p_this_ccw->next; - } - - while (privptr->p_write_active_first!=NULL) { - p_this_ccw=privptr->p_write_active_first; - p_this_ccw->header.flag=CLAW_PENDING; - privptr->p_write_active_first=p_this_ccw->next; - p_this_ccw->next=privptr->p_write_free_chain; - privptr->p_write_free_chain=p_this_ccw; - ++privptr->write_free_count; - } - privptr->p_write_active_last=NULL; - privptr->mtc_logical_link = -1; - privptr->mtc_skipping = 1; - privptr->mtc_offset=0; - - if (((privptr->channel[READ_CHANNEL].last_dstat | - privptr->channel[WRITE_CHANNEL].last_dstat) & - ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { - dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev, - "Deactivating %s completed with incorrect" - " subchannel status " - "(read %02x, write %02x)\n", - dev->name, - privptr->channel[READ_CHANNEL].last_dstat, - privptr->channel[WRITE_CHANNEL].last_dstat); - CLAW_DBF_TEXT(2, trace, "badclose"); - } - CLAW_DBF_TEXT(4, trace, "rlsexit"); - return 0; -} /* end of claw_release */ - -/*-------------------------------------------------------------------* -* claw_write_retry * -* * -*--------------------------------------------------------------------*/ - -static void -claw_write_retry ( struct chbk *p_ch ) -{ - - struct net_device *dev=p_ch->ndev; - - CLAW_DBF_TEXT(4, trace, "w_retry"); - if (p_ch->claw_state == CLAW_STOP) { - return; - } - claw_strt_out_IO( dev ); - CLAW_DBF_TEXT(4, trace, "rtry_xit"); - return; -} /* end of claw_write_retry */ - - -/*-------------------------------------------------------------------* -* claw_write_next * -* * -*--------------------------------------------------------------------*/ - -static void -claw_write_next ( struct chbk * p_ch ) -{ - - struct net_device *dev; - struct claw_privbk *privptr=NULL; - struct sk_buff *pk_skb; - - CLAW_DBF_TEXT(4, trace, "claw_wrt"); - if (p_ch->claw_state == CLAW_STOP) - return; - dev = (struct net_device *) p_ch->ndev; - privptr = (struct claw_privbk *) dev->ml_priv; - claw_free_wrt_buf( dev ); - if ((privptr->write_free_count > 0) && - !skb_queue_empty(&p_ch->collect_queue)) { - pk_skb = claw_pack_skb(privptr); - while (pk_skb != NULL) { - claw_hw_tx(pk_skb, dev, 1); - if (privptr->write_free_count > 0) { - pk_skb = claw_pack_skb(privptr); - } else - pk_skb = NULL; - } - } - if (privptr->p_write_active_first!=NULL) { - claw_strt_out_IO(dev); - } - return; -} /* end of claw_write_next */ - -/*-------------------------------------------------------------------* -* * -* claw_timer * -*--------------------------------------------------------------------*/ - -static void -claw_timer ( struct chbk * p_ch ) -{ - CLAW_DBF_TEXT(4, trace, "timer"); - p_ch->flag |= CLAW_TIMER; - wake_up(&p_ch->wait); - return; -} /* end of claw_timer */ - -/* -* -* functions -*/ - - -/*-------------------------------------------------------------------* -* * -* pages_to_order_of_mag * -* * -* takes a number of pages from 1 to 512 and returns the * -* log(num_pages)/log(2) get_free_pages() needs a base 2 order * -* of magnitude get_free_pages() has an upper order of 9 * -*--------------------------------------------------------------------*/ - -static int -pages_to_order_of_mag(int num_of_pages) -{ - int order_of_mag=1; /* assume 2 pages */ - int nump; - - CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages); - if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ - /* 512 pages = 2Meg on 4k page systems */ - if (num_of_pages >= 512) {return 9; } - /* we have two or more pages order is at least 1 */ - for (nump=2 ;nump <= 512;nump*=2) { - if (num_of_pages <= nump) - break; - order_of_mag +=1; - } - if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ - CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag); - return order_of_mag; -} - -/*-------------------------------------------------------------------* -* * -* add_claw_reads * -* * -*--------------------------------------------------------------------*/ -static int -add_claw_reads(struct net_device *dev, struct ccwbk* p_first, - struct ccwbk* p_last) -{ - struct claw_privbk *privptr; - struct ccw1 temp_ccw; - struct endccw * p_end; - CLAW_DBF_TEXT(4, trace, "addreads"); - privptr = dev->ml_priv; - p_end = privptr->p_end_ccw; - - /* first CCW and last CCW contains a new set of read channel programs - * to apend the running channel programs - */ - if ( p_first==NULL) { - CLAW_DBF_TEXT(4, trace, "addexit"); - return 0; - } - - /* set up ending CCW sequence for this segment */ - if (p_end->read1) { - p_end->read1=0x00; /* second ending CCW is now active */ - /* reset ending CCWs and setup TIC CCWs */ - p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1); - p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1); - p_end->read2_nop2.cda=0; - p_end->read2_nop2.count=1; - } - else { - p_end->read1=0x01; /* first ending CCW is now active */ - /* reset ending CCWs and setup TIC CCWs */ - p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1); - p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1); - p_end->read1_nop2.cda=0; - p_end->read1_nop2.count=1; - } - - if ( privptr-> p_read_active_first ==NULL ) { - privptr->p_read_active_first = p_first; /* set new first */ - privptr->p_read_active_last = p_last; /* set new last */ - } - else { - - /* set up TIC ccw */ - temp_ccw.cda= (__u32)__pa(&p_first->read); - temp_ccw.count=0; - temp_ccw.flags=0; - temp_ccw.cmd_code = CCW_CLAW_CMD_TIC; - - - if (p_end->read1) { - - /* first set of CCW's is chained to the new read */ - /* chain, so the second set is chained to the active chain. */ - /* Therefore modify the second set to point to the new */ - /* read chain set up TIC CCWs */ - /* make sure we update the CCW so channel doesn't fetch it */ - /* when it's only half done */ - memcpy( &p_end->read2_nop2, &temp_ccw , - sizeof(struct ccw1)); - privptr->p_read_active_last->r_TIC_1.cda= - (__u32)__pa(&p_first->read); - privptr->p_read_active_last->r_TIC_2.cda= - (__u32)__pa(&p_first->read); - } - else { - /* make sure we update the CCW so channel doesn't */ - /* fetch it when it is only half done */ - memcpy( &p_end->read1_nop2, &temp_ccw , - sizeof(struct ccw1)); - privptr->p_read_active_last->r_TIC_1.cda= - (__u32)__pa(&p_first->read); - privptr->p_read_active_last->r_TIC_2.cda= - (__u32)__pa(&p_first->read); - } - /* chain in new set of blocks */ - privptr->p_read_active_last->next = p_first; - privptr->p_read_active_last=p_last; - } /* end of if ( privptr-> p_read_active_first ==NULL) */ - CLAW_DBF_TEXT(4, trace, "addexit"); - return 0; -} /* end of add_claw_reads */ - -/*-------------------------------------------------------------------* - * ccw_check_return_code * - * * - *-------------------------------------------------------------------*/ - -static void -ccw_check_return_code(struct ccw_device *cdev, int return_code) -{ - CLAW_DBF_TEXT(4, trace, "ccwret"); - if (return_code != 0) { - switch (return_code) { - case -EBUSY: /* BUSY is a transient state no action needed */ - break; - case -ENODEV: - dev_err(&cdev->dev, "The remote channel adapter is not" - " available\n"); - break; - case -EINVAL: - dev_err(&cdev->dev, - "The status of the remote channel adapter" - " is not valid\n"); - break; - default: - dev_err(&cdev->dev, "The common device layer" - " returned error code %d\n", - return_code); - } - } - CLAW_DBF_TEXT(4, trace, "ccwret"); -} /* end of ccw_check_return_code */ - -/*-------------------------------------------------------------------* -* ccw_check_unit_check * -*--------------------------------------------------------------------*/ - -static void -ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) -{ - struct net_device *ndev = p_ch->ndev; - struct device *dev = &p_ch->cdev->dev; - - CLAW_DBF_TEXT(4, trace, "unitchek"); - dev_warn(dev, "The communication peer of %s disconnected\n", - ndev->name); - - if (sense & 0x40) { - if (sense & 0x01) { - dev_warn(dev, "The remote channel adapter for" - " %s has been reset\n", - ndev->name); - } - } else if (sense & 0x20) { - if (sense & 0x04) { - dev_warn(dev, "A data streaming timeout occurred" - " for %s\n", - ndev->name); - } else if (sense & 0x10) { - dev_warn(dev, "The remote channel adapter for %s" - " is faulty\n", - ndev->name); - } else { - dev_warn(dev, "A data transfer parity error occurred" - " for %s\n", - ndev->name); - } - } else if (sense & 0x10) { - dev_warn(dev, "A read data parity error occurred" - " for %s\n", - ndev->name); - } - -} /* end of ccw_check_unit_check */ - -/*-------------------------------------------------------------------* -* find_link * -*--------------------------------------------------------------------*/ -static int -find_link(struct net_device *dev, char *host_name, char *ws_name ) -{ - struct claw_privbk *privptr; - struct claw_env *p_env; - int rc=0; - - CLAW_DBF_TEXT(2, setup, "findlink"); - privptr = dev->ml_priv; - p_env=privptr->p_env; - switch (p_env->packing) - { - case PACKING_ASK: - if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) || - (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 )) - rc = EINVAL; - break; - case DO_PACKED: - case PACK_SEND: - if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) || - (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 )) - rc = EINVAL; - break; - default: - if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) || - (memcmp(p_env->api_type , ws_name, 8)!=0)) - rc = EINVAL; - break; - } - - return rc; -} /* end of find_link */ - -/*-------------------------------------------------------------------* - * claw_hw_tx * - * * - * * - *-------------------------------------------------------------------*/ - -static int -claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) -{ - int rc=0; - struct claw_privbk *privptr; - struct ccwbk *p_this_ccw; - struct ccwbk *p_first_ccw; - struct ccwbk *p_last_ccw; - __u32 numBuffers; - signed long len_of_data; - unsigned long bytesInThisBuffer; - unsigned char *pDataAddress; - struct endccw *pEnd; - struct ccw1 tempCCW; - struct claw_env *p_env; - struct clawph *pk_head; - struct chbk *ch; - - CLAW_DBF_TEXT(4, trace, "hw_tx"); - privptr = (struct claw_privbk *)(dev->ml_priv); - p_env =privptr->p_env; - claw_free_wrt_buf(dev); /* Clean up free chain if posible */ - /* scan the write queue to free any completed write packets */ - p_first_ccw=NULL; - p_last_ccw=NULL; - if ((p_env->packing >= PACK_SEND) && - (skb->cb[1] != 'P')) { - skb_push(skb,sizeof(struct clawph)); - pk_head=(struct clawph *)skb->data; - pk_head->len=skb->len-sizeof(struct clawph); - if (pk_head->len%4) { - pk_head->len+= 4-(pk_head->len%4); - skb_pad(skb,4-(pk_head->len%4)); - skb_put(skb,4-(pk_head->len%4)); - } - if (p_env->packing == DO_PACKED) - pk_head->link_num = linkid; - else - pk_head->link_num = 0; - pk_head->flag = 0x00; - skb_pad(skb,4); - skb->cb[1] = 'P'; - } - if (linkid == 0) { - if (claw_check_busy(dev)) { - if (privptr->write_free_count!=0) { - claw_clear_busy(dev); - } - else { - claw_strt_out_IO(dev ); - claw_free_wrt_buf( dev ); - if (privptr->write_free_count==0) { - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - goto Done; - } - else { - claw_clear_busy(dev); - } - } - } - /* tx lock */ - if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - claw_strt_out_IO(dev ); - rc=-EBUSY; - goto Done2; - } - } - /* See how many write buffers are required to hold this data */ - numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size); - - /* If that number of buffers isn't available, give up for now */ - if (privptr->write_free_count < numBuffers || - privptr->p_write_free_chain == NULL ) { - - claw_setbit_busy(TB_NOBUFFER,dev); - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - CLAW_DBF_TEXT(2, trace, "clawbusy"); - goto Done2; - } - pDataAddress=skb->data; - len_of_data=skb->len; - - while (len_of_data > 0) { - p_this_ccw=privptr->p_write_free_chain; /* get a block */ - if (p_this_ccw == NULL) { /* lost the race */ - ch = &privptr->channel[WRITE_CHANNEL]; - atomic_inc(&skb->users); - skb_queue_tail(&ch->collect_queue, skb); - goto Done2; - } - privptr->p_write_free_chain=p_this_ccw->next; - p_this_ccw->next=NULL; - --privptr->write_free_count; /* -1 */ - if (len_of_data >= privptr->p_env->write_size) - bytesInThisBuffer = privptr->p_env->write_size; - else - bytesInThisBuffer = len_of_data; - memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer); - len_of_data-=bytesInThisBuffer; - pDataAddress+=(unsigned long)bytesInThisBuffer; - /* setup write CCW */ - p_this_ccw->write.cmd_code = (linkid * 8) +1; - if (len_of_data>0) { - p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG; - } - p_this_ccw->write.count=bytesInThisBuffer; - /* now add to end of this chain */ - if (p_first_ccw==NULL) { - p_first_ccw=p_this_ccw; - } - if (p_last_ccw!=NULL) { - p_last_ccw->next=p_this_ccw; - /* set up TIC ccws */ - p_last_ccw->w_TIC_1.cda= - (__u32)__pa(&p_this_ccw->write); - } - p_last_ccw=p_this_ccw; /* save new last block */ - } - - /* FirstCCW and LastCCW now contain a new set of write channel - * programs to append to the running channel program - */ - - if (p_first_ccw!=NULL) { - /* setup ending ccw sequence for this segment */ - pEnd=privptr->p_end_ccw; - if (pEnd->write1) { - pEnd->write1=0x00; /* second end ccw is now active */ - /* set up Tic CCWs */ - p_last_ccw->w_TIC_1.cda= - (__u32)__pa(&pEnd->write2_nop1); - pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - pEnd->write2_nop2.flags = - CCW_FLAG_SLI | CCW_FLAG_SKIP; - pEnd->write2_nop2.cda=0; - pEnd->write2_nop2.count=1; - } - else { /* end of if (pEnd->write1)*/ - pEnd->write1=0x01; /* first end ccw is now active */ - /* set up Tic CCWs */ - p_last_ccw->w_TIC_1.cda= - (__u32)__pa(&pEnd->write1_nop1); - pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - pEnd->write1_nop2.flags = - CCW_FLAG_SLI | CCW_FLAG_SKIP; - pEnd->write1_nop2.cda=0; - pEnd->write1_nop2.count=1; - } /* end if if (pEnd->write1) */ - - if (privptr->p_write_active_first==NULL ) { - privptr->p_write_active_first=p_first_ccw; - privptr->p_write_active_last=p_last_ccw; - } - else { - /* set up Tic CCWs */ - - tempCCW.cda=(__u32)__pa(&p_first_ccw->write); - tempCCW.count=0; - tempCCW.flags=0; - tempCCW.cmd_code=CCW_CLAW_CMD_TIC; - - if (pEnd->write1) { - - /* - * first set of ending CCW's is chained to the new write - * chain, so the second set is chained to the active chain - * Therefore modify the second set to point the new write chain. - * make sure we update the CCW atomically - * so channel does not fetch it when it's only half done - */ - memcpy( &pEnd->write2_nop2, &tempCCW , - sizeof(struct ccw1)); - privptr->p_write_active_last->w_TIC_1.cda= - (__u32)__pa(&p_first_ccw->write); - } - else { - - /*make sure we update the CCW atomically - *so channel does not fetch it when it's only half done - */ - memcpy(&pEnd->write1_nop2, &tempCCW , - sizeof(struct ccw1)); - privptr->p_write_active_last->w_TIC_1.cda= - (__u32)__pa(&p_first_ccw->write); - - } /* end if if (pEnd->write1) */ - - privptr->p_write_active_last->next=p_first_ccw; - privptr->p_write_active_last=p_last_ccw; - } - - } /* endif (p_first_ccw!=NULL) */ - dev_kfree_skb_any(skb); - claw_strt_out_IO(dev ); - /* if write free count is zero , set NOBUFFER */ - if (privptr->write_free_count==0) { - claw_setbit_busy(TB_NOBUFFER,dev); - } -Done2: - claw_clearbit_busy(TB_TX,dev); -Done: - return(rc); -} /* end of claw_hw_tx */ - -/*-------------------------------------------------------------------* -* * -* init_ccw_bk * -* * -*--------------------------------------------------------------------*/ - -static int -init_ccw_bk(struct net_device *dev) -{ - - __u32 ccw_blocks_required; - __u32 ccw_blocks_perpage; - __u32 ccw_pages_required; - __u32 claw_reads_perpage=1; - __u32 claw_read_pages; - __u32 claw_writes_perpage=1; - __u32 claw_write_pages; - void *p_buff=NULL; - struct ccwbk*p_free_chain; - struct ccwbk*p_buf; - struct ccwbk*p_last_CCWB; - struct ccwbk*p_first_CCWB; - struct endccw *p_endccw=NULL; - addr_t real_address; - struct claw_privbk *privptr = dev->ml_priv; - struct clawh *pClawH=NULL; - addr_t real_TIC_address; - int i,j; - CLAW_DBF_TEXT(4, trace, "init_ccw"); - - /* initialize statistics field */ - privptr->active_link_ID=0; - /* initialize ccwbk pointers */ - privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/ - privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/ - privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/ - privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/ - privptr->p_read_active_last=NULL; /* pointer to the last read ccw */ - privptr->p_end_ccw=NULL; /* pointer to ending ccw */ - privptr->p_claw_signal_blk=NULL; /* pointer to signal block */ - privptr->buffs_alloc = 0; - memset(&privptr->end_ccw, 0x00, sizeof(struct endccw)); - memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl)); - /* initialize free write ccwbk counter */ - privptr->write_free_count=0; /* number of free bufs on write chain */ - p_last_CCWB = NULL; - p_first_CCWB= NULL; - /* - * We need 1 CCW block for each read buffer, 1 for each - * write buffer, plus 1 for ClawSignalBlock - */ - ccw_blocks_required = - privptr->p_env->read_buffers+privptr->p_env->write_buffers+1; - /* - * compute number of CCW blocks that will fit in a page - */ - ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; - ccw_pages_required= - DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage); - - /* - * read and write sizes are set by 2 constants in claw.h - * 4k and 32k. Unpacked values other than 4k are not going to - * provide good performance. With packing buffers support 32k - * buffers are used. - */ - if (privptr->p_env->read_size < PAGE_SIZE) { - claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size; - claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers, - claw_reads_perpage); - } - else { /* > or equal */ - privptr->p_buff_pages_perread = - DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); - claw_read_pages = privptr->p_env->read_buffers * - privptr->p_buff_pages_perread; - } - if (privptr->p_env->write_size < PAGE_SIZE) { - claw_writes_perpage = - PAGE_SIZE / privptr->p_env->write_size; - claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers, - claw_writes_perpage); - - } - else { /* > or equal */ - privptr->p_buff_pages_perwrite = - DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); - claw_write_pages = privptr->p_env->write_buffers * - privptr->p_buff_pages_perwrite; - } - /* - * allocate ccw_pages_required - */ - if (privptr->p_buff_ccw==NULL) { - privptr->p_buff_ccw= - (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(ccw_pages_required )); - if (privptr->p_buff_ccw==NULL) { - return -ENOMEM; - } - privptr->p_buff_ccw_num=ccw_pages_required; - } - memset(privptr->p_buff_ccw, 0x00, - privptr->p_buff_ccw_num * PAGE_SIZE); - - /* - * obtain ending ccw block address - * - */ - privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw; - real_address = (__u32)__pa(privptr->p_end_ccw); - /* Initialize ending CCW block */ - p_endccw=privptr->p_end_ccw; - p_endccw->real=real_address; - p_endccw->write1=0x00; - p_endccw->read1=0x00; - - /* write1_nop1 */ - p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->write1_nop1.count = 1; - p_endccw->write1_nop1.cda = 0; - - /* write1_nop2 */ - p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->write1_nop2.count = 1; - p_endccw->write1_nop2.cda = 0; - - /* write2_nop1 */ - p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->write2_nop1.count = 1; - p_endccw->write2_nop1.cda = 0; - - /* write2_nop2 */ - p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->write2_nop2.count = 1; - p_endccw->write2_nop2.cda = 0; - - /* read1_nop1 */ - p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->read1_nop1.count = 1; - p_endccw->read1_nop1.cda = 0; - - /* read1_nop2 */ - p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->read1_nop2.count = 1; - p_endccw->read1_nop2.cda = 0; - - /* read2_nop1 */ - p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP; - p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_endccw->read2_nop1.count = 1; - p_endccw->read2_nop1.cda = 0; - - /* read2_nop2 */ - p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; - p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; - p_endccw->read2_nop2.count = 1; - p_endccw->read2_nop2.cda = 0; - - /* - * Build a chain of CCWs - * - */ - p_buff=privptr->p_buff_ccw; - - p_free_chain=NULL; - for (i=0 ; i < ccw_pages_required; i++ ) { - real_address = (__u32)__pa(p_buff); - p_buf=p_buff; - for (j=0 ; j < ccw_blocks_perpage ; j++) { - p_buf->next = p_free_chain; - p_free_chain = p_buf; - p_buf->real=(__u32)__pa(p_buf); - ++p_buf; - } - p_buff+=PAGE_SIZE; - } - /* - * Initialize ClawSignalBlock - * - */ - if (privptr->p_claw_signal_blk==NULL) { - privptr->p_claw_signal_blk=p_free_chain; - p_free_chain=p_free_chain->next; - pClawH=(struct clawh *)privptr->p_claw_signal_blk; - pClawH->length=0xffff; - pClawH->opcode=0xff; - pClawH->flag=CLAW_BUSY; - } - - /* - * allocate write_pages_required and add to free chain - */ - if (privptr->p_buff_write==NULL) { - if (privptr->p_env->write_size < PAGE_SIZE) { - privptr->p_buff_write= - (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(claw_write_pages )); - if (privptr->p_buff_write==NULL) { - privptr->p_buff_ccw=NULL; - return -ENOMEM; - } - /* - * Build CLAW write free chain - * - */ - - memset(privptr->p_buff_write, 0x00, - ccw_pages_required * PAGE_SIZE); - privptr->p_write_free_chain=NULL; - - p_buff=privptr->p_buff_write; - - for (i=0 ; i< privptr->p_env->write_buffers ; i++) { - p_buf = p_free_chain; /* get a CCW */ - p_free_chain = p_buf->next; - p_buf->next =privptr->p_write_free_chain; - privptr->p_write_free_chain = p_buf; - p_buf-> p_buffer = (struct clawbuf *)p_buff; - p_buf-> write.cda = (__u32)__pa(p_buff); - p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.count = 1; - p_buf-> w_read_FF.cda = - (__u32)__pa(&p_buf-> header.flag); - p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> w_TIC_1.flags = 0; - p_buf-> w_TIC_1.count = 0; - - if (((unsigned long)p_buff + - privptr->p_env->write_size) >= - ((unsigned long)(p_buff+2* - (privptr->p_env->write_size) - 1) & PAGE_MASK)) { - p_buff = p_buff+privptr->p_env->write_size; - } - } - } - else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */ - { - privptr->p_write_free_chain=NULL; - for (i = 0; i< privptr->p_env->write_buffers ; i++) { - p_buff=(void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite) ); - if (p_buff==NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag( - privptr->p_buff_ccw_num)); - privptr->p_buff_ccw=NULL; - p_buf=privptr->p_buff_write; - while (p_buf!=NULL) { - free_pages((unsigned long) - p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite)); - p_buf=p_buf->next; - } - return -ENOMEM; - } /* Error on get_pages */ - memset(p_buff, 0x00, privptr->p_env->write_size ); - p_buf = p_free_chain; - p_free_chain = p_buf->next; - p_buf->next = privptr->p_write_free_chain; - privptr->p_write_free_chain = p_buf; - privptr->p_buff_write = p_buf; - p_buf->p_buffer=(struct clawbuf *)p_buff; - p_buf-> write.cda = (__u32)__pa(p_buff); - p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> w_read_FF.count = 1; - p_buf-> w_read_FF.cda = - (__u32)__pa(&p_buf-> header.flag); - p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> w_TIC_1.flags = 0; - p_buf-> w_TIC_1.count = 0; - } /* for all write_buffers */ - - } /* else buffers are PAGE_SIZE or bigger */ - - } - privptr->p_buff_write_num=claw_write_pages; - privptr->write_free_count=privptr->p_env->write_buffers; - - - /* - * allocate read_pages_required and chain to free chain - */ - if (privptr->p_buff_read==NULL) { - if (privptr->p_env->read_size < PAGE_SIZE) { - privptr->p_buff_read= - (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(claw_read_pages) ); - if (privptr->p_buff_read==NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag( - privptr->p_buff_ccw_num)); - /* free the write pages size is < page size */ - free_pages((unsigned long)privptr->p_buff_write, - (int)pages_to_order_of_mag( - privptr->p_buff_write_num)); - privptr->p_buff_ccw=NULL; - privptr->p_buff_write=NULL; - return -ENOMEM; - } - memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE); - privptr->p_buff_read_num=claw_read_pages; - /* - * Build CLAW read free chain - * - */ - p_buff=privptr->p_buff_read; - for (i=0 ; i< privptr->p_env->read_buffers ; i++) { - p_buf = p_free_chain; - p_free_chain = p_buf->next; - - if (p_last_CCWB==NULL) { - p_buf->next=NULL; - real_TIC_address=0; - p_last_CCWB=p_buf; - } - else { - p_buf->next=p_first_CCWB; - real_TIC_address= - (__u32)__pa(&p_first_CCWB -> read ); - } - - p_first_CCWB=p_buf; - - p_buf->p_buffer=(struct clawbuf *)p_buff; - /* initialize read command */ - p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; - p_buf-> read.cda = (__u32)__pa(p_buff); - p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read.count = privptr->p_env->read_size; - - /* initialize read_h command */ - p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; - p_buf-> read_h.cda = - (__u32)__pa(&(p_buf->header)); - p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read_h.count = sizeof(struct clawh); - - /* initialize Signal command */ - p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; - p_buf-> signal.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> signal.count = 1; - - /* initialize r_TIC_1 command */ - p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; - p_buf-> r_TIC_1.flags = 0; - p_buf-> r_TIC_1.count = 0; - - /* initialize r_read_FF command */ - p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> r_read_FF.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> r_read_FF.flags = - CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; - p_buf-> r_read_FF.count = 1; - - /* initialize r_TIC_2 */ - memcpy(&p_buf->r_TIC_2, - &p_buf->r_TIC_1, sizeof(struct ccw1)); - - /* initialize Header */ - p_buf->header.length=0xffff; - p_buf->header.opcode=0xff; - p_buf->header.flag=CLAW_PENDING; - - if (((unsigned long)p_buff+privptr->p_env->read_size) >= - ((unsigned long)(p_buff+2*(privptr->p_env->read_size) - -1) - & PAGE_MASK)) { - p_buff= p_buff+privptr->p_env->read_size; - } - else { - p_buff= - (void *)((unsigned long) - (p_buff+2*(privptr->p_env->read_size)-1) - & PAGE_MASK) ; - } - } /* for read_buffers */ - } /* read_size < PAGE_SIZE */ - else { /* read Size >= PAGE_SIZE */ - for (i=0 ; i< privptr->p_env->read_buffers ; i++) { - p_buff = (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread)); - if (p_buff==NULL) { - free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr-> - p_buff_ccw_num)); - /* free the write pages */ - p_buf=privptr->p_buff_write; - while (p_buf!=NULL) { - free_pages( - (unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite)); - p_buf=p_buf->next; - } - /* free any read pages already alloc */ - p_buf=privptr->p_buff_read; - while (p_buf!=NULL) { - free_pages( - (unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread)); - p_buf=p_buf->next; - } - privptr->p_buff_ccw=NULL; - privptr->p_buff_write=NULL; - return -ENOMEM; - } - memset(p_buff, 0x00, privptr->p_env->read_size); - p_buf = p_free_chain; - privptr->p_buff_read = p_buf; - p_free_chain = p_buf->next; - - if (p_last_CCWB==NULL) { - p_buf->next=NULL; - real_TIC_address=0; - p_last_CCWB=p_buf; - } - else { - p_buf->next=p_first_CCWB; - real_TIC_address= - (addr_t)__pa( - &p_first_CCWB -> read ); - } - - p_first_CCWB=p_buf; - /* save buff address */ - p_buf->p_buffer=(struct clawbuf *)p_buff; - /* initialize read command */ - p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; - p_buf-> read.cda = (__u32)__pa(p_buff); - p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read.count = privptr->p_env->read_size; - - /* initialize read_h command */ - p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; - p_buf-> read_h.cda = - (__u32)__pa(&(p_buf->header)); - p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> read_h.count = sizeof(struct clawh); - - /* initialize Signal command */ - p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; - p_buf-> signal.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; - p_buf-> signal.count = 1; - - /* initialize r_TIC_1 command */ - p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; - p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; - p_buf-> r_TIC_1.flags = 0; - p_buf-> r_TIC_1.count = 0; - - /* initialize r_read_FF command */ - p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; - p_buf-> r_read_FF.cda = - (__u32)__pa(&(pClawH->flag)); - p_buf-> r_read_FF.flags = - CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; - p_buf-> r_read_FF.count = 1; - - /* initialize r_TIC_2 */ - memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1, - sizeof(struct ccw1)); - - /* initialize Header */ - p_buf->header.length=0xffff; - p_buf->header.opcode=0xff; - p_buf->header.flag=CLAW_PENDING; - - } /* For read_buffers */ - } /* read_size >= PAGE_SIZE */ - } /* pBuffread = NULL */ - add_claw_reads( dev ,p_first_CCWB , p_last_CCWB); - privptr->buffs_alloc = 1; - - return 0; -} /* end of init_ccw_bk */ - -/*-------------------------------------------------------------------* -* * -* probe_error * -* * -*--------------------------------------------------------------------*/ - -static void -probe_error( struct ccwgroup_device *cgdev) -{ - struct claw_privbk *privptr; - - CLAW_DBF_TEXT(4, trace, "proberr"); - privptr = dev_get_drvdata(&cgdev->dev); - if (privptr != NULL) { - dev_set_drvdata(&cgdev->dev, NULL); - kfree(privptr->p_env); - kfree(privptr->p_mtc_envelope); - kfree(privptr); - } -} /* probe_error */ - -/*-------------------------------------------------------------------* -* claw_process_control * -* * -* * -*--------------------------------------------------------------------*/ - -static int -claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) -{ - - struct clawbuf *p_buf; - struct clawctl ctlbk; - struct clawctl *p_ctlbk; - char temp_host_name[8]; - char temp_ws_name[8]; - struct claw_privbk *privptr; - struct claw_env *p_env; - struct sysval *p_sysval; - struct conncmd *p_connect=NULL; - int rc; - struct chbk *p_ch = NULL; - struct device *tdev; - CLAW_DBF_TEXT(2, setup, "clw_cntl"); - udelay(1000); /* Wait a ms for the control packets to - *catch up to each other */ - privptr = dev->ml_priv; - p_env=privptr->p_env; - tdev = &privptr->channel[READ_CHANNEL].cdev->dev; - memcpy( &temp_host_name, p_env->host_name, 8); - memcpy( &temp_ws_name, p_env->adapter_name , 8); - dev_info(tdev, "%s: CLAW device %.8s: " - "Received Control Packet\n", - dev->name, temp_ws_name); - if (privptr->release_pend==1) { - return 0; - } - p_buf=p_ccw->p_buffer; - p_ctlbk=&ctlbk; - if (p_env->packing == DO_PACKED) { /* packing in progress?*/ - memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl)); - } else { - memcpy(p_ctlbk, p_buf, sizeof(struct clawctl)); - } - switch (p_ctlbk->command) - { - case SYSTEM_VALIDATE_REQUEST: - if (p_ctlbk->version != CLAW_VERSION_ID) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_WRONG_VERSION); - dev_warn(tdev, "The communication peer of %s" - " uses an incorrect API version %d\n", - dev->name, p_ctlbk->version); - } - p_sysval = (struct sysval *)&(p_ctlbk->data); - dev_info(tdev, "%s: Recv Sys Validate Request: " - "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s," - "Host name=%.8s\n", - dev->name, p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_sysval->WS_name, - p_sysval->host_name); - if (memcmp(temp_host_name, p_sysval->host_name, 8)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_NAME_MISMATCH); - CLAW_DBF_TEXT(2, setup, "HSTBAD"); - CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); - CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); - dev_warn(tdev, - "Host name %s for %s does not match the" - " remote adapter name %s\n", - p_sysval->host_name, - dev->name, - temp_host_name); - } - if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_NAME_MISMATCH); - CLAW_DBF_TEXT(2, setup, "WSNBAD"); - CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); - CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); - dev_warn(tdev, "Adapter name %s for %s does not match" - " the remote host name %s\n", - p_sysval->WS_name, - dev->name, - temp_ws_name); - } - if ((p_sysval->write_frame_size < p_env->write_size) && - (p_env->packing == 0)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_HOST_RCV_TOO_SMALL); - dev_warn(tdev, - "The local write buffer is smaller than the" - " remote read buffer\n"); - CLAW_DBF_TEXT(2, setup, "wrtszbad"); - } - if ((p_sysval->read_frame_size < p_env->read_size) && - (p_env->packing == 0)) { - claw_snd_sys_validate_rsp(dev, p_ctlbk, - CLAW_RC_HOST_RCV_TOO_SMALL); - dev_warn(tdev, - "The local read buffer is smaller than the" - " remote write buffer\n"); - CLAW_DBF_TEXT(2, setup, "rdsizbad"); - } - claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); - dev_info(tdev, - "CLAW device %.8s: System validate" - " completed.\n", temp_ws_name); - dev_info(tdev, - "%s: sys Validate Rsize:%d Wsize:%d\n", - dev->name, p_sysval->read_frame_size, - p_sysval->write_frame_size); - privptr->system_validate_comp = 1; - if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) - p_env->packing = PACKING_ASK; - claw_strt_conn_req(dev); - break; - case SYSTEM_VALIDATE_RESPONSE: - p_sysval = (struct sysval *)&(p_ctlbk->data); - dev_info(tdev, - "Settings for %s validated (version=%d, " - "remote device=%d, rc=%d, adapter name=%.8s, " - "host name=%.8s)\n", - dev->name, - p_ctlbk->version, - p_ctlbk->correlator, - p_ctlbk->rc, - p_sysval->WS_name, - p_sysval->host_name); - switch (p_ctlbk->rc) { - case 0: - dev_info(tdev, "%s: CLAW device " - "%.8s: System validate completed.\n", - dev->name, temp_ws_name); - if (privptr->system_validate_comp == 0) - claw_strt_conn_req(dev); - privptr->system_validate_comp = 1; - break; - case CLAW_RC_NAME_MISMATCH: - dev_warn(tdev, "Validating %s failed because of" - " a host or adapter name mismatch\n", - dev->name); - break; - case CLAW_RC_WRONG_VERSION: - dev_warn(tdev, "Validating %s failed because of a" - " version conflict\n", - dev->name); - break; - case CLAW_RC_HOST_RCV_TOO_SMALL: - dev_warn(tdev, "Validating %s failed because of a" - " frame size conflict\n", - dev->name); - break; - default: - dev_warn(tdev, "The communication peer of %s rejected" - " the connection\n", - dev->name); - break; - } - break; - - case CONNECTION_REQUEST: - p_connect = (struct conncmd *)&(p_ctlbk->data); - dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d," - "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", - dev->name, - p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_connect->host_name, - p_connect->WS_name); - if (privptr->active_link_ID != 0) { - claw_snd_disc(dev, p_ctlbk); - dev_info(tdev, "%s rejected a connection request" - " because it is already active\n", - dev->name); - } - if (p_ctlbk->linkid != 1) { - claw_snd_disc(dev, p_ctlbk); - dev_info(tdev, "%s rejected a request to open multiple" - " connections\n", - dev->name); - } - rc = find_link(dev, p_connect->host_name, p_connect->WS_name); - if (rc != 0) { - claw_snd_disc(dev, p_ctlbk); - dev_info(tdev, "%s rejected a connection request" - " because of a type mismatch\n", - dev->name); - } - claw_send_control(dev, - CONNECTION_CONFIRM, p_ctlbk->linkid, - p_ctlbk->correlator, - 0, p_connect->host_name, - p_connect->WS_name); - if (p_env->packing == PACKING_ASK) { - p_env->packing = PACK_SEND; - claw_snd_conn_req(dev, 0); - } - dev_info(tdev, "%s: CLAW device %.8s: Connection " - "completed link_id=%d.\n", - dev->name, temp_ws_name, - p_ctlbk->linkid); - privptr->active_link_ID = p_ctlbk->linkid; - p_ch = &privptr->channel[WRITE_CHANNEL]; - wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ - break; - case CONNECTION_RESPONSE: - p_connect = (struct conncmd *)&(p_ctlbk->data); - dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d," - "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", - dev->name, - p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_ctlbk->rc, - p_connect->host_name, - p_connect->WS_name); - - if (p_ctlbk->rc != 0) { - dev_warn(tdev, "The communication peer of %s rejected" - " a connection request\n", - dev->name); - return 1; - } - rc = find_link(dev, - p_connect->host_name, p_connect->WS_name); - if (rc != 0) { - claw_snd_disc(dev, p_ctlbk); - dev_warn(tdev, "The communication peer of %s" - " rejected a connection " - "request because of a type mismatch\n", - dev->name); - } - /* should be until CONNECTION_CONFIRM */ - privptr->active_link_ID = -(p_ctlbk->linkid); - break; - case CONNECTION_CONFIRM: - p_connect = (struct conncmd *)&(p_ctlbk->data); - dev_info(tdev, - "%s: Recv Conn Confirm:Vers=%d,link_id=%d," - "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", - dev->name, - p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_connect->host_name, - p_connect->WS_name); - if (p_ctlbk->linkid == -(privptr->active_link_ID)) { - privptr->active_link_ID = p_ctlbk->linkid; - if (p_env->packing > PACKING_ASK) { - dev_info(tdev, - "%s: Confirmed Now packing\n", dev->name); - p_env->packing = DO_PACKED; - } - p_ch = &privptr->channel[WRITE_CHANNEL]; - wake_up(&p_ch->wait); - } else { - dev_warn(tdev, "Activating %s failed because of" - " an incorrect link ID=%d\n", - dev->name, p_ctlbk->linkid); - claw_snd_disc(dev, p_ctlbk); - } - break; - case DISCONNECT: - dev_info(tdev, "%s: Disconnect: " - "Vers=%d,link_id=%d,Corr=%d\n", - dev->name, p_ctlbk->version, - p_ctlbk->linkid, p_ctlbk->correlator); - if ((p_ctlbk->linkid == 2) && - (p_env->packing == PACK_SEND)) { - privptr->active_link_ID = 1; - p_env->packing = DO_PACKED; - } else - privptr->active_link_ID = 0; - break; - case CLAW_ERROR: - dev_warn(tdev, "The communication peer of %s failed\n", - dev->name); - break; - default: - dev_warn(tdev, "The communication peer of %s sent" - " an unknown command code\n", - dev->name); - break; - } - - return 0; -} /* end of claw_process_control */ - - -/*-------------------------------------------------------------------* -* claw_send_control * -* * -*--------------------------------------------------------------------*/ - -static int -claw_send_control(struct net_device *dev, __u8 type, __u8 link, - __u8 correlator, __u8 rc, char *local_name, char *remote_name) -{ - struct claw_privbk *privptr; - struct clawctl *p_ctl; - struct sysval *p_sysval; - struct conncmd *p_connect; - struct sk_buff *skb; - - CLAW_DBF_TEXT(2, setup, "sndcntl"); - privptr = dev->ml_priv; - p_ctl=(struct clawctl *)&privptr->ctl_bk; - - p_ctl->command=type; - p_ctl->version=CLAW_VERSION_ID; - p_ctl->linkid=link; - p_ctl->correlator=correlator; - p_ctl->rc=rc; - - p_sysval=(struct sysval *)&p_ctl->data; - p_connect=(struct conncmd *)&p_ctl->data; - - switch (p_ctl->command) { - case SYSTEM_VALIDATE_REQUEST: - case SYSTEM_VALIDATE_RESPONSE: - memcpy(&p_sysval->host_name, local_name, 8); - memcpy(&p_sysval->WS_name, remote_name, 8); - if (privptr->p_env->packing > 0) { - p_sysval->read_frame_size = DEF_PACK_BUFSIZE; - p_sysval->write_frame_size = DEF_PACK_BUFSIZE; - } else { - /* how big is the biggest group of packets */ - p_sysval->read_frame_size = - privptr->p_env->read_size; - p_sysval->write_frame_size = - privptr->p_env->write_size; - } - memset(&p_sysval->reserved, 0x00, 4); - break; - case CONNECTION_REQUEST: - case CONNECTION_RESPONSE: - case CONNECTION_CONFIRM: - case DISCONNECT: - memcpy(&p_sysval->host_name, local_name, 8); - memcpy(&p_sysval->WS_name, remote_name, 8); - if (privptr->p_env->packing > 0) { - /* How big is the biggest packet */ - p_connect->reserved1[0]=CLAW_FRAME_SIZE; - p_connect->reserved1[1]=CLAW_FRAME_SIZE; - } else { - memset(&p_connect->reserved1, 0x00, 4); - memset(&p_connect->reserved2, 0x00, 4); - } - break; - default: - break; - } - - /* write Control Record to the device */ - - - skb = dev_alloc_skb(sizeof(struct clawctl)); - if (!skb) { - return -ENOMEM; - } - memcpy(skb_put(skb, sizeof(struct clawctl)), - p_ctl, sizeof(struct clawctl)); - if (privptr->p_env->packing >= PACK_SEND) - claw_hw_tx(skb, dev, 1); - else - claw_hw_tx(skb, dev, 0); - return 0; -} /* end of claw_send_control */ - -/*-------------------------------------------------------------------* -* claw_snd_conn_req * -* * -*--------------------------------------------------------------------*/ -static int -claw_snd_conn_req(struct net_device *dev, __u8 link) -{ - int rc; - struct claw_privbk *privptr = dev->ml_priv; - struct clawctl *p_ctl; - - CLAW_DBF_TEXT(2, setup, "snd_conn"); - rc = 1; - p_ctl=(struct clawctl *)&privptr->ctl_bk; - p_ctl->linkid = link; - if ( privptr->system_validate_comp==0x00 ) { - return rc; - } - if (privptr->p_env->packing == PACKING_ASK ) - rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, - WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED); - if (privptr->p_env->packing == PACK_SEND) { - rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, - WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME); - } - if (privptr->p_env->packing == 0) - rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, - HOST_APPL_NAME, privptr->p_env->api_type); - return rc; - -} /* end of claw_snd_conn_req */ - - -/*-------------------------------------------------------------------* -* claw_snd_disc * -* * -*--------------------------------------------------------------------*/ - -static int -claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl) -{ - int rc; - struct conncmd * p_connect; - - CLAW_DBF_TEXT(2, setup, "snd_dsc"); - p_connect=(struct conncmd *)&p_ctl->data; - - rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid, - p_ctl->correlator, 0, - p_connect->host_name, p_connect->WS_name); - return rc; -} /* end of claw_snd_disc */ - - -/*-------------------------------------------------------------------* -* claw_snd_sys_validate_rsp * -* * -*--------------------------------------------------------------------*/ - -static int -claw_snd_sys_validate_rsp(struct net_device *dev, - struct clawctl *p_ctl, __u32 return_code) -{ - struct claw_env * p_env; - struct claw_privbk *privptr; - int rc; - - CLAW_DBF_TEXT(2, setup, "chkresp"); - privptr = dev->ml_priv; - p_env=privptr->p_env; - rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, - p_ctl->linkid, - p_ctl->correlator, - return_code, - p_env->host_name, - p_env->adapter_name ); - return rc; -} /* end of claw_snd_sys_validate_rsp */ - -/*-------------------------------------------------------------------* -* claw_strt_conn_req * -* * -*--------------------------------------------------------------------*/ - -static int -claw_strt_conn_req(struct net_device *dev ) -{ - int rc; - - CLAW_DBF_TEXT(2, setup, "conn_req"); - rc=claw_snd_conn_req(dev, 1); - return rc; -} /* end of claw_strt_conn_req */ - - - -/*-------------------------------------------------------------------* - * claw_stats * - *-------------------------------------------------------------------*/ - -static struct -net_device_stats *claw_stats(struct net_device *dev) -{ - struct claw_privbk *privptr; - - CLAW_DBF_TEXT(4, trace, "stats"); - privptr = dev->ml_priv; - return &privptr->stats; -} /* end of claw_stats */ - - -/*-------------------------------------------------------------------* -* unpack_read * -* * -*--------------------------------------------------------------------*/ -static void -unpack_read(struct net_device *dev ) -{ - struct sk_buff *skb; - struct claw_privbk *privptr; - struct claw_env *p_env; - struct ccwbk *p_this_ccw; - struct ccwbk *p_first_ccw; - struct ccwbk *p_last_ccw; - struct clawph *p_packh; - void *p_packd; - struct clawctl *p_ctlrec=NULL; - struct device *p_dev; - - __u32 len_of_data; - __u32 pack_off; - __u8 link_num; - __u8 mtc_this_frm=0; - __u32 bytes_to_mov; - int i=0; - int p=0; - - CLAW_DBF_TEXT(4, trace, "unpkread"); - p_first_ccw=NULL; - p_last_ccw=NULL; - p_packh=NULL; - p_packd=NULL; - privptr = dev->ml_priv; - - p_dev = &privptr->channel[READ_CHANNEL].cdev->dev; - p_env = privptr->p_env; - p_this_ccw=privptr->p_read_active_first; - while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { - pack_off = 0; - p = 0; - p_this_ccw->header.flag=CLAW_PENDING; - privptr->p_read_active_first=p_this_ccw->next; - p_this_ccw->next=NULL; - p_packh = (struct clawph *)p_this_ccw->p_buffer; - if ((p_env->packing == PACK_SEND) && - (p_packh->len == 32) && - (p_packh->link_num == 0)) { /* is it a packed ctl rec? */ - p_packh++; /* peek past pack header */ - p_ctlrec = (struct clawctl *)p_packh; - p_packh--; /* un peek */ - if ((p_ctlrec->command == CONNECTION_RESPONSE) || - (p_ctlrec->command == CONNECTION_CONFIRM)) - p_env->packing = DO_PACKED; - } - if (p_env->packing == DO_PACKED) - link_num=p_packh->link_num; - else - link_num=p_this_ccw->header.opcode / 8; - if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { - mtc_this_frm=1; - if (p_this_ccw->header.length!= - privptr->p_env->read_size ) { - dev_warn(p_dev, - "The communication peer of %s" - " sent a faulty" - " frame of length %02x\n", - dev->name, p_this_ccw->header.length); - } - } - - if (privptr->mtc_skipping) { - /* - * We're in the mode of skipping past a - * multi-frame message - * that we can't process for some reason or other. - * The first frame without the More-To-Come flag is - * the last frame of the skipped message. - */ - /* in case of More-To-Come not set in this frame */ - if (mtc_this_frm==0) { - privptr->mtc_skipping=0; /* Ok, the end */ - privptr->mtc_logical_link=-1; - } - goto NextFrame; - } - - if (link_num==0) { - claw_process_control(dev, p_this_ccw); - CLAW_DBF_TEXT(4, trace, "UnpkCntl"); - goto NextFrame; - } -unpack_next: - if (p_env->packing == DO_PACKED) { - if (pack_off > p_env->read_size) - goto NextFrame; - p_packd = p_this_ccw->p_buffer+pack_off; - p_packh = (struct clawph *) p_packd; - if ((p_packh->len == 0) || /* done with this frame? */ - (p_packh->flag != 0)) - goto NextFrame; - bytes_to_mov = p_packh->len; - pack_off += bytes_to_mov+sizeof(struct clawph); - p++; - } else { - bytes_to_mov=p_this_ccw->header.length; - } - if (privptr->mtc_logical_link<0) { - - /* - * if More-To-Come is set in this frame then we don't know - * length of entire message, and hence have to allocate - * large buffer */ - - /* We are starting a new envelope */ - privptr->mtc_offset=0; - privptr->mtc_logical_link=link_num; - } - - if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) { - /* error */ - privptr->stats.rx_frame_errors++; - goto NextFrame; - } - if (p_env->packing == DO_PACKED) { - memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, - p_packd+sizeof(struct clawph), bytes_to_mov); - - } else { - memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, - p_this_ccw->p_buffer, bytes_to_mov); - } - if (mtc_this_frm==0) { - len_of_data=privptr->mtc_offset+bytes_to_mov; - skb=dev_alloc_skb(len_of_data); - if (skb) { - memcpy(skb_put(skb,len_of_data), - privptr->p_mtc_envelope, - len_of_data); - skb->dev=dev; - skb_reset_mac_header(skb); - skb->protocol=htons(ETH_P_IP); - skb->ip_summed=CHECKSUM_UNNECESSARY; - privptr->stats.rx_packets++; - privptr->stats.rx_bytes+=len_of_data; - netif_rx(skb); - } - else { - dev_info(p_dev, "Allocating a buffer for" - " incoming data failed\n"); - privptr->stats.rx_dropped++; - } - privptr->mtc_offset=0; - privptr->mtc_logical_link=-1; - } - else { - privptr->mtc_offset+=bytes_to_mov; - } - if (p_env->packing == DO_PACKED) - goto unpack_next; -NextFrame: - /* - * Remove ThisCCWblock from active read queue, and add it - * to queue of free blocks to be reused. - */ - i++; - p_this_ccw->header.length=0xffff; - p_this_ccw->header.opcode=0xff; - /* - * add this one to the free queue for later reuse - */ - if (p_first_ccw==NULL) { - p_first_ccw = p_this_ccw; - } - else { - p_last_ccw->next = p_this_ccw; - } - p_last_ccw = p_this_ccw; - /* - * chain to next block on active read queue - */ - p_this_ccw = privptr->p_read_active_first; - CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p); - } /* end of while */ - - /* check validity */ - - CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i); - add_claw_reads(dev, p_first_ccw, p_last_ccw); - claw_strt_read(dev, LOCK_YES); - return; -} /* end of unpack_read */ - -/*-------------------------------------------------------------------* -* claw_strt_read * -* * -*--------------------------------------------------------------------*/ -static void -claw_strt_read (struct net_device *dev, int lock ) -{ - int rc = 0; - __u32 parm; - unsigned long saveflags = 0; - struct claw_privbk *privptr = dev->ml_priv; - struct ccwbk*p_ccwbk; - struct chbk *p_ch; - struct clawh *p_clawh; - p_ch = &privptr->channel[READ_CHANNEL]; - - CLAW_DBF_TEXT(4, trace, "StRdNter"); - p_clawh=(struct clawh *)privptr->p_claw_signal_blk; - p_clawh->flag=CLAW_IDLE; /* 0x00 */ - - if ((privptr->p_write_active_first!=NULL && - privptr->p_write_active_first->header.flag!=CLAW_PENDING) || - (privptr->p_read_active_first!=NULL && - privptr->p_read_active_first->header.flag!=CLAW_PENDING )) { - p_clawh->flag=CLAW_BUSY; /* 0xff */ - } - if (lock==LOCK_YES) { - spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); - } - if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { - CLAW_DBF_TEXT(4, trace, "HotRead"); - p_ccwbk=privptr->p_read_active_first; - parm = (unsigned long) p_ch; - rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm, - 0xff, 0); - if (rc != 0) { - ccw_check_return_code(p_ch->cdev, rc); - } - } - else { - CLAW_DBF_TEXT(2, trace, "ReadAct"); - } - - if (lock==LOCK_YES) { - spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); - } - CLAW_DBF_TEXT(4, trace, "StRdExit"); - return; -} /* end of claw_strt_read */ - -/*-------------------------------------------------------------------* -* claw_strt_out_IO * -* * -*--------------------------------------------------------------------*/ - -static void -claw_strt_out_IO( struct net_device *dev ) -{ - int rc = 0; - unsigned long parm; - struct claw_privbk *privptr; - struct chbk *p_ch; - struct ccwbk *p_first_ccw; - - if (!dev) { - return; - } - privptr = (struct claw_privbk *)dev->ml_priv; - p_ch = &privptr->channel[WRITE_CHANNEL]; - - CLAW_DBF_TEXT(4, trace, "strt_io"); - p_first_ccw=privptr->p_write_active_first; - - if (p_ch->claw_state == CLAW_STOP) - return; - if (p_first_ccw == NULL) { - return; - } - if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { - parm = (unsigned long) p_ch; - CLAW_DBF_TEXT(2, trace, "StWrtIO"); - rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm, - 0xff, 0); - if (rc != 0) { - ccw_check_return_code(p_ch->cdev, rc); - } - } - dev->trans_start = jiffies; - return; -} /* end of claw_strt_out_IO */ - -/*-------------------------------------------------------------------* -* Free write buffers * -* * -*--------------------------------------------------------------------*/ - -static void -claw_free_wrt_buf( struct net_device *dev ) -{ - - struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; - struct ccwbk*p_this_ccw; - struct ccwbk*p_next_ccw; - - CLAW_DBF_TEXT(4, trace, "freewrtb"); - /* scan the write queue to free any completed write packets */ - p_this_ccw=privptr->p_write_active_first; - while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) - { - p_next_ccw = p_this_ccw->next; - if (((p_next_ccw!=NULL) && - (p_next_ccw->header.flag!=CLAW_PENDING)) || - ((p_this_ccw == privptr->p_write_active_last) && - (p_this_ccw->header.flag!=CLAW_PENDING))) { - /* The next CCW is OK or this is */ - /* the last CCW...free it @A1A */ - privptr->p_write_active_first=p_this_ccw->next; - p_this_ccw->header.flag=CLAW_PENDING; - p_this_ccw->next=privptr->p_write_free_chain; - privptr->p_write_free_chain=p_this_ccw; - ++privptr->write_free_count; - privptr->stats.tx_bytes+= p_this_ccw->write.count; - p_this_ccw=privptr->p_write_active_first; - privptr->stats.tx_packets++; - } - else { - break; - } - } - if (privptr->write_free_count!=0) { - claw_clearbit_busy(TB_NOBUFFER,dev); - } - /* whole chain removed? */ - if (privptr->p_write_active_first==NULL) { - privptr->p_write_active_last=NULL; - } - CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count); - return; -} - -/*-------------------------------------------------------------------* -* claw free netdevice * -* * -*--------------------------------------------------------------------*/ -static void -claw_free_netdevice(struct net_device * dev, int free_dev) -{ - struct claw_privbk *privptr; - - CLAW_DBF_TEXT(2, setup, "free_dev"); - if (!dev) - return; - CLAW_DBF_TEXT_(2, setup, "%s", dev->name); - privptr = dev->ml_priv; - if (dev->flags & IFF_RUNNING) - claw_release(dev); - if (privptr) { - privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */ - } - dev->ml_priv = NULL; -#ifdef MODULE - if (free_dev) { - free_netdev(dev); - } -#endif - CLAW_DBF_TEXT(2, setup, "free_ok"); -} - -/** - * Claw init netdevice - * Initialize everything of the net device except the name and the - * channel structs. - */ -static const struct net_device_ops claw_netdev_ops = { - .ndo_open = claw_open, - .ndo_stop = claw_release, - .ndo_get_stats = claw_stats, - .ndo_start_xmit = claw_tx, - .ndo_change_mtu = claw_change_mtu, -}; - -static void -claw_init_netdevice(struct net_device * dev) -{ - CLAW_DBF_TEXT(2, setup, "init_dev"); - CLAW_DBF_TEXT_(2, setup, "%s", dev->name); - dev->mtu = CLAW_DEFAULT_MTU_SIZE; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->type = ARPHRD_SLIP; - dev->tx_queue_len = 1300; - dev->flags = IFF_POINTOPOINT | IFF_NOARP; - dev->netdev_ops = &claw_netdev_ops; - CLAW_DBF_TEXT(2, setup, "initok"); - return; -} - -/** - * Init a new channel in the privptr->channel[i]. - * - * @param cdev The ccw_device to be added. - * - * @return 0 on success, !0 on error. - */ -static int -add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) -{ - struct chbk *p_ch; - struct ccw_dev_id dev_id; - - CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev)); - privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ - p_ch = &privptr->channel[i]; - p_ch->cdev = cdev; - snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev)); - ccw_device_get_id(cdev, &dev_id); - p_ch->devno = dev_id.devno; - if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { - return -ENOMEM; - } - return 0; -} - - -/** - * - * Setup an interface. - * - * @param cgdev Device to be setup. - * - * @returns 0 on success, !0 on failure. - */ -static int -claw_new_device(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *privptr; - struct claw_env *p_env; - struct net_device *dev; - int ret; - struct ccw_dev_id dev_id; - - dev_info(&cgdev->dev, "add for %s\n", - dev_name(&cgdev->cdev[READ_CHANNEL]->dev)); - CLAW_DBF_TEXT(2, setup, "new_dev"); - privptr = dev_get_drvdata(&cgdev->dev); - dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); - dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); - if (!privptr) - return -ENODEV; - p_env = privptr->p_env; - ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id); - p_env->devno[READ_CHANNEL] = dev_id.devno; - ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id); - p_env->devno[WRITE_CHANNEL] = dev_id.devno; - ret = add_channel(cgdev->cdev[0],0,privptr); - if (ret == 0) - ret = add_channel(cgdev->cdev[1],1,privptr); - if (ret != 0) { - dev_warn(&cgdev->dev, "Creating a CLAW group device" - " failed with error code %d\n", ret); - goto out; - } - ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]); - if (ret != 0) { - dev_warn(&cgdev->dev, - "Setting the read subchannel online" - " failed with error code %d\n", ret); - goto out; - } - ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]); - if (ret != 0) { - dev_warn(&cgdev->dev, - "Setting the write subchannel online " - "failed with error code %d\n", ret); - goto out; - } - dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice); - if (!dev) { - dev_warn(&cgdev->dev, - "Activating the CLAW device failed\n"); - goto out; - } - dev->ml_priv = privptr; - dev_set_drvdata(&cgdev->dev, privptr); - dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); - dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); - /* sysfs magic */ - SET_NETDEV_DEV(dev, &cgdev->dev); - if (register_netdev(dev) != 0) { - claw_free_netdevice(dev, 1); - CLAW_DBF_TEXT(2, trace, "regfail"); - goto out; - } - dev->flags &=~IFF_RUNNING; - if (privptr->buffs_alloc == 0) { - ret=init_ccw_bk(dev); - if (ret !=0) { - unregister_netdev(dev); - claw_free_netdevice(dev,1); - CLAW_DBF_TEXT(2, trace, "ccwmem"); - goto out; - } - } - privptr->channel[READ_CHANNEL].ndev = dev; - privptr->channel[WRITE_CHANNEL].ndev = dev; - privptr->p_env->ndev = dev; - - dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " - "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", - dev->name, p_env->read_size, - p_env->write_size, p_env->read_buffers, - p_env->write_buffers, p_env->devno[READ_CHANNEL], - p_env->devno[WRITE_CHANNEL]); - dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " - ":%.8s api_type: %.8s\n", - dev->name, p_env->host_name, - p_env->adapter_name , p_env->api_type); - return 0; -out: - ccw_device_set_offline(cgdev->cdev[1]); - ccw_device_set_offline(cgdev->cdev[0]); - return -ENODEV; -} - -static void -claw_purge_skb_queue(struct sk_buff_head *q) -{ - struct sk_buff *skb; - - CLAW_DBF_TEXT(4, trace, "purgque"); - while ((skb = skb_dequeue(q))) { - atomic_dec(&skb->users); - dev_kfree_skb_any(skb); - } -} - -/** - * Shutdown an interface. - * - * @param cgdev Device to be shut down. - * - * @returns 0 on success, !0 on failure. - */ -static int -claw_shutdown_device(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *priv; - struct net_device *ndev; - int ret = 0; - - CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); - priv = dev_get_drvdata(&cgdev->dev); - if (!priv) - return -ENODEV; - ndev = priv->channel[READ_CHANNEL].ndev; - if (ndev) { - /* Close the device */ - dev_info(&cgdev->dev, "%s: shutting down\n", - ndev->name); - if (ndev->flags & IFF_RUNNING) - ret = claw_release(ndev); - ndev->flags &=~IFF_RUNNING; - unregister_netdev(ndev); - ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ - claw_free_netdevice(ndev, 1); - priv->channel[READ_CHANNEL].ndev = NULL; - priv->channel[WRITE_CHANNEL].ndev = NULL; - priv->p_env->ndev = NULL; - } - ccw_device_set_offline(cgdev->cdev[1]); - ccw_device_set_offline(cgdev->cdev[0]); - return ret; -} - -static void -claw_remove_device(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *priv; - - CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); - priv = dev_get_drvdata(&cgdev->dev); - dev_info(&cgdev->dev, " will be removed.\n"); - if (cgdev->state == CCWGROUP_ONLINE) - claw_shutdown_device(cgdev); - kfree(priv->p_mtc_envelope); - priv->p_mtc_envelope=NULL; - kfree(priv->p_env); - priv->p_env=NULL; - kfree(priv->channel[0].irb); - priv->channel[0].irb=NULL; - kfree(priv->channel[1].irb); - priv->channel[1].irb=NULL; - kfree(priv); - dev_set_drvdata(&cgdev->dev, NULL); - dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL); - dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL); - put_device(&cgdev->dev); - - return; -} - - -/* - * sysfs attributes - */ -static ssize_t -claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%s\n",p_env->host_name); -} - -static ssize_t -claw_hname_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - if (count > MAX_NAME_LEN+1) - return -EINVAL; - memset(p_env->host_name, 0x20, MAX_NAME_LEN); - strncpy(p_env->host_name,buf, count); - p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */ - p_env->host_name[MAX_NAME_LEN] = 0x00; - CLAW_DBF_TEXT(2, setup, "HstnSet"); - CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name); - - return count; -} - -static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write); - -static ssize_t -claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%s\n", p_env->adapter_name); -} - -static ssize_t -claw_adname_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - if (count > MAX_NAME_LEN+1) - return -EINVAL; - memset(p_env->adapter_name, 0x20, MAX_NAME_LEN); - strncpy(p_env->adapter_name,buf, count); - p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */ - p_env->adapter_name[MAX_NAME_LEN] = 0x00; - CLAW_DBF_TEXT(2, setup, "AdnSet"); - CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name); - - return count; -} - -static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write); - -static ssize_t -claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%s\n", - p_env->api_type); -} - -static ssize_t -claw_apname_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - if (count > MAX_NAME_LEN+1) - return -EINVAL; - memset(p_env->api_type, 0x20, MAX_NAME_LEN); - strncpy(p_env->api_type,buf, count); - p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */ - p_env->api_type[MAX_NAME_LEN] = 0x00; - if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { - p_env->read_size=DEF_PACK_BUFSIZE; - p_env->write_size=DEF_PACK_BUFSIZE; - p_env->packing=PACKING_ASK; - CLAW_DBF_TEXT(2, setup, "PACKING"); - } - else { - p_env->packing=0; - p_env->read_size=CLAW_FRAME_SIZE; - p_env->write_size=CLAW_FRAME_SIZE; - CLAW_DBF_TEXT(2, setup, "ApiSet"); - } - CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type); - return count; -} - -static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write); - -static ssize_t -claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%d\n", p_env->write_buffers); -} - -static ssize_t -claw_wbuff_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - int nnn,max; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - sscanf(buf, "%i", &nnn); - if (p_env->packing) { - max = 64; - } - else { - max = 512; - } - if ((nnn > max ) || (nnn < 2)) - return -EINVAL; - p_env->write_buffers = nnn; - CLAW_DBF_TEXT(2, setup, "Wbufset"); - CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers); - return count; -} - -static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write); - -static ssize_t -claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct claw_privbk *priv; - struct claw_env * p_env; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - return sprintf(buf, "%d\n", p_env->read_buffers); -} - -static ssize_t -claw_rbuff_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct claw_privbk *priv; - struct claw_env *p_env; - int nnn,max; - - priv = dev_get_drvdata(dev); - if (!priv) - return -ENODEV; - p_env = priv->p_env; - sscanf(buf, "%i", &nnn); - if (p_env->packing) { - max = 64; - } - else { - max = 512; - } - if ((nnn > max ) || (nnn < 2)) - return -EINVAL; - p_env->read_buffers = nnn; - CLAW_DBF_TEXT(2, setup, "Rbufset"); - CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); - return count; -} -static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); - -static struct attribute *claw_attr[] = { - &dev_attr_read_buffer.attr, - &dev_attr_write_buffer.attr, - &dev_attr_adapter_name.attr, - &dev_attr_api_type.attr, - &dev_attr_host_name.attr, - NULL, -}; -static struct attribute_group claw_attr_group = { - .attrs = claw_attr, -}; -static const struct attribute_group *claw_attr_groups[] = { - &claw_attr_group, - NULL, -}; -static const struct device_type claw_devtype = { - .name = "claw", - .groups = claw_attr_groups, -}; - -/*----------------------------------------------------------------* - * claw_probe * - * this function is called for each CLAW device. * - *----------------------------------------------------------------*/ -static int claw_probe(struct ccwgroup_device *cgdev) -{ - struct claw_privbk *privptr = NULL; - - CLAW_DBF_TEXT(2, setup, "probe"); - if (!get_device(&cgdev->dev)) - return -ENODEV; - privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); - dev_set_drvdata(&cgdev->dev, privptr); - if (privptr == NULL) { - probe_error(cgdev); - put_device(&cgdev->dev); - CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); - return -ENOMEM; - } - privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL); - privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL); - if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) { - probe_error(cgdev); - put_device(&cgdev->dev); - CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); - return -ENOMEM; - } - memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8); - memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8); - memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8); - privptr->p_env->packing = 0; - privptr->p_env->write_buffers = 5; - privptr->p_env->read_buffers = 5; - privptr->p_env->read_size = CLAW_FRAME_SIZE; - privptr->p_env->write_size = CLAW_FRAME_SIZE; - privptr->p_env->p_priv = privptr; - cgdev->cdev[0]->handler = claw_irq_handler; - cgdev->cdev[1]->handler = claw_irq_handler; - cgdev->dev.type = &claw_devtype; - CLAW_DBF_TEXT(2, setup, "prbext 0"); - - return 0; -} /* end of claw_probe */ - -/*--------------------------------------------------------------------* -* claw_init and cleanup * -*---------------------------------------------------------------------*/ - -static void __exit claw_cleanup(void) -{ - ccwgroup_driver_unregister(&claw_group_driver); - ccw_driver_unregister(&claw_ccw_driver); - root_device_unregister(claw_root_dev); - claw_unregister_debug_facility(); - pr_info("Driver unloaded\n"); -} - -/** - * Initialize module. - * This is called just after the module is loaded. - * - * @return 0 on success, !0 on error. - */ -static int __init claw_init(void) -{ - int ret = 0; - - pr_info("Loading %s\n", version); - ret = claw_register_debug_facility(); - if (ret) { - pr_err("Registering with the S/390 debug feature" - " failed with error code %d\n", ret); - goto out_err; - } - CLAW_DBF_TEXT(2, setup, "init_mod"); - claw_root_dev = root_device_register("claw"); - ret = PTR_ERR_OR_ZERO(claw_root_dev); - if (ret) - goto register_err; - ret = ccw_driver_register(&claw_ccw_driver); - if (ret) - goto ccw_err; - claw_group_driver.driver.groups = claw_drv_attr_groups; - ret = ccwgroup_driver_register(&claw_group_driver); - if (ret) - goto ccwgroup_err; - return 0; - -ccwgroup_err: - ccw_driver_unregister(&claw_ccw_driver); -ccw_err: - root_device_unregister(claw_root_dev); -register_err: - CLAW_DBF_TEXT(2, setup, "init_bad"); - claw_unregister_debug_facility(); -out_err: - pr_err("Initializing the claw device driver failed\n"); - return ret; -} - -module_init(claw_init); -module_exit(claw_cleanup); - -MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); -MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \ - "Copyright IBM Corp. 2000, 2008\n"); -MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h deleted file mode 100644 index 3339b9b..0000000 --- a/drivers/s390/net/claw.h +++ /dev/null @@ -1,348 +0,0 @@ -/******************************************************* -* Define constants * -* * -********************************************************/ - -/*-----------------------------------------------------* -* CCW command codes for CLAW protocol * -*------------------------------------------------------*/ - -#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */ -#define CCW_CLAW_CMD_READ 0x02 /* read */ -#define CCW_CLAW_CMD_NOP 0x03 /* NOP */ -#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */ -#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */ -#define CCW_CLAW_CMD_TIC 0x08 /* TIC */ -#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */ -#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */ -#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */ - - -/*-----------------------------------------------------* -* CLAW Unique constants * -*------------------------------------------------------*/ - -#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */ -#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */ -#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */ -#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */ -#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */ - -/*-----------------------------------------------------* -* CLAW control command code * -*------------------------------------------------------*/ - -#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */ -#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */ -#define CONNECTION_REQUEST 0x21 /* Connection request */ -#define CONNECTION_RESPONSE 0x22 /* Connection response */ -#define CONNECTION_CONFIRM 0x23 /* Connection confirm */ -#define DISCONNECT 0x24 /* Disconnect */ -#define CLAW_ERROR 0x41 /* CLAW error message */ -#define CLAW_VERSION_ID 2 /* CLAW version ID */ - -/*-----------------------------------------------------* -* CLAW adater sense bytes * -*------------------------------------------------------*/ - -#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */ - -/*-----------------------------------------------------* -* CLAW control command return codes * -*------------------------------------------------------*/ - -#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */ -#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */ -#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */ - /* less than Linux on zSeries*/ - /* transmit size */ - -/*-----------------------------------------------------* -* CLAW Constants application name * -*------------------------------------------------------*/ - -#define HOST_APPL_NAME "TCPIP " -#define WS_APPL_NAME_IP_LINK "TCPIP " -#define WS_APPL_NAME_IP_NAME "IP " -#define WS_APPL_NAME_API_LINK "API " -#define WS_APPL_NAME_PACKED "PACKED " -#define WS_NAME_NOT_DEF "NOT_DEF " -#define PACKING_ASK 1 -#define PACK_SEND 2 -#define DO_PACKED 3 - -#define MAX_ENVELOPE_SIZE 65536 -#define CLAW_DEFAULT_MTU_SIZE 4096 -#define DEF_PACK_BUFSIZE 32768 -#define READ_CHANNEL 0 -#define WRITE_CHANNEL 1 - -#define TB_TX 0 /* sk buffer handling in process */ -#define TB_STOP 1 /* network device stop in process */ -#define TB_RETRY 2 /* retry in process */ -#define TB_NOBUFFER 3 /* no buffer on free queue */ -#define CLAW_MAX_LINK_ID 1 -#define CLAW_MAX_DEV 256 /* max claw devices */ -#define MAX_NAME_LEN 8 /* host name, adapter name length */ -#define CLAW_FRAME_SIZE 4096 -#define CLAW_ID_SIZE 20+3 - -/* state machine codes used in claw_irq_handler */ - -#define CLAW_STOP 0 -#define CLAW_START_HALT_IO 1 -#define CLAW_START_SENSEID 2 -#define CLAW_START_READ 3 -#define CLAW_START_WRITE 4 - -/*-----------------------------------------------------* -* Lock flag * -*------------------------------------------------------*/ -#define LOCK_YES 0 -#define LOCK_NO 1 - -/*-----------------------------------------------------* -* DBF Debug macros * -*------------------------------------------------------*/ -#define CLAW_DBF_TEXT(level, name, text) \ - do { \ - debug_text_event(claw_dbf_##name, level, text); \ - } while (0) - -#define CLAW_DBF_HEX(level,name,addr,len) \ -do { \ - debug_event(claw_dbf_##name,level,(void*)(addr),len); \ -} while (0) - -#define CLAW_DBF_TEXT_(level,name,text...) \ - do { \ - if (debug_level_enabled(claw_dbf_##name, level)) { \ - sprintf(debug_buffer, text); \ - debug_text_event(claw_dbf_##name, level, \ - debug_buffer); \ - } \ - } while (0) - -/** - * Enum for classifying detected devices. - */ -enum claw_channel_types { - /* Device is not a channel */ - claw_channel_type_none, - - /* Device is a CLAW channel device */ - claw_channel_type_claw -}; - - -/******************************************************* -* Define Control Blocks * -* * -********************************************************/ - -/*------------------------------------------------------*/ -/* CLAW header */ -/*------------------------------------------------------*/ - -struct clawh { - __u16 length; /* length of data read by preceding read CCW */ - __u8 opcode; /* equivalent read CCW */ - __u8 flag; /* flag of FF to indicate read was completed */ -}; - -/*------------------------------------------------------*/ -/* CLAW Packing header 4 bytes */ -/*------------------------------------------------------*/ -struct clawph { - __u16 len; /* Length of Packed Data Area */ - __u8 flag; /* Reserved not used */ - __u8 link_num; /* Link ID */ -}; - -/*------------------------------------------------------*/ -/* CLAW Ending struct ccwbk */ -/*------------------------------------------------------*/ -struct endccw { - __u32 real; /* real address of this block */ - __u8 write1; /* write 1 is active */ - __u8 read1; /* read 1 is active */ - __u16 reserved; /* reserved for future use */ - struct ccw1 write1_nop1; - struct ccw1 write1_nop2; - struct ccw1 write2_nop1; - struct ccw1 write2_nop2; - struct ccw1 read1_nop1; - struct ccw1 read1_nop2; - struct ccw1 read2_nop1; - struct ccw1 read2_nop2; -}; - -/*------------------------------------------------------*/ -/* CLAW struct ccwbk */ -/*------------------------------------------------------*/ -struct ccwbk { - void *next; /* pointer to next ccw block */ - __u32 real; /* real address of this ccw */ - void *p_buffer; /* virtual address of data */ - struct clawh header; /* claw header */ - struct ccw1 write; /* write CCW */ - struct ccw1 w_read_FF; /* read FF */ - struct ccw1 w_TIC_1; /* TIC */ - struct ccw1 read; /* read CCW */ - struct ccw1 read_h; /* read header */ - struct ccw1 signal; /* signal SMOD */ - struct ccw1 r_TIC_1; /* TIC1 */ - struct ccw1 r_read_FF; /* read FF */ - struct ccw1 r_TIC_2; /* TIC2 */ -}; - -/*------------------------------------------------------*/ -/* CLAW control block */ -/*------------------------------------------------------*/ -struct clawctl { - __u8 command; /* control command */ - __u8 version; /* CLAW protocol version */ - __u8 linkid; /* link ID */ - __u8 correlator; /* correlator */ - __u8 rc; /* return code */ - __u8 reserved1; /* reserved */ - __u8 reserved2; /* reserved */ - __u8 reserved3; /* reserved */ - __u8 data[24]; /* command specific fields */ -}; - -/*------------------------------------------------------*/ -/* Data for SYSTEMVALIDATE command */ -/*------------------------------------------------------*/ -struct sysval { - char WS_name[8]; /* Workstation System name */ - char host_name[8]; /* Host system name */ - __u16 read_frame_size; /* read frame size */ - __u16 write_frame_size; /* write frame size */ - __u8 reserved[4]; /* reserved */ -}; - -/*------------------------------------------------------*/ -/* Data for Connect command */ -/*------------------------------------------------------*/ -struct conncmd { - char WS_name[8]; /* Workstation application name */ - char host_name[8]; /* Host application name */ - __u16 reserved1[2]; /* read frame size */ - __u8 reserved2[4]; /* reserved */ -}; - -/*------------------------------------------------------*/ -/* Data for CLAW error */ -/*------------------------------------------------------*/ -struct clawwerror { - char reserved1[8]; /* reserved */ - char reserved2[8]; /* reserved */ - char reserved3[8]; /* reserved */ -}; - -/*------------------------------------------------------*/ -/* Data buffer for CLAW */ -/*------------------------------------------------------*/ -struct clawbuf { - char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */ -}; - -/*------------------------------------------------------*/ -/* Channel control block for read and write channel */ -/*------------------------------------------------------*/ - -struct chbk { - unsigned int devno; - int irq; - char id[CLAW_ID_SIZE]; - __u32 IO_active; - __u8 claw_state; - struct irb *irb; - struct ccw_device *cdev; /* pointer to the channel device */ - struct net_device *ndev; - wait_queue_head_t wait; - struct tasklet_struct tasklet; - struct timer_list timer; - unsigned long flag_a; /* atomic flags */ -#define CLAW_BH_ACTIVE 0 - unsigned long flag_b; /* atomic flags */ -#define CLAW_WRITE_ACTIVE 0 - __u8 last_dstat; - __u8 flag; - struct sk_buff_head collect_queue; - spinlock_t collect_lock; -#define CLAW_WRITE 0x02 /* - Set if this is a write channel */ -#define CLAW_READ 0x01 /* - Set if this is a read channel */ -#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */ -}; - -/*--------------------------------------------------------------* -* CLAW environment block * -*---------------------------------------------------------------*/ - -struct claw_env { - unsigned int devno[2]; /* device number */ - char host_name[9]; /* Host name */ - char adapter_name [9]; /* adapter name */ - char api_type[9]; /* TCPIP, API or PACKED */ - void *p_priv; /* privptr */ - __u16 read_buffers; /* read buffer number */ - __u16 write_buffers; /* write buffer number */ - __u16 read_size; /* read buffer size */ - __u16 write_size; /* write buffer size */ - __u16 dev_id; /* device ident */ - __u8 packing; /* are we packing? */ - __u8 in_use; /* device active flag */ - struct net_device *ndev; /* backward ptr to the net dev*/ -}; - -/*--------------------------------------------------------------* -* CLAW main control block * -*---------------------------------------------------------------*/ - -struct claw_privbk { - void *p_buff_ccw; - __u32 p_buff_ccw_num; - void *p_buff_read; - __u32 p_buff_read_num; - __u32 p_buff_pages_perread; - void *p_buff_write; - __u32 p_buff_write_num; - __u32 p_buff_pages_perwrite; - long active_link_ID; /* Active logical link ID */ - struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */ - struct ccwbk *p_write_active_first; /* ptr to the first write ccw */ - struct ccwbk *p_write_active_last; /* ptr to the last write ccw */ - struct ccwbk *p_read_active_first; /* ptr to the first read ccw */ - struct ccwbk *p_read_active_last; /* ptr to the last read ccw */ - struct endccw *p_end_ccw; /*ptr to ending ccw */ - struct ccwbk *p_claw_signal_blk; /* ptr to signal block */ - __u32 write_free_count; /* number of free bufs for write */ - struct net_device_stats stats; /* device status */ - struct chbk channel[2]; /* Channel control blocks */ - __u8 mtc_skipping; - int mtc_offset; - int mtc_logical_link; - void *p_mtc_envelope; - struct sk_buff *pk_skb; /* packing buffer */ - int pk_cnt; - struct clawctl ctl_bk; - struct claw_env *p_env; - __u8 system_validate_comp; - __u8 release_pend; - __u8 checksum_received_ip_pkts; - __u8 buffs_alloc; - struct endccw end_ccw; - unsigned long tbusy; - -}; - - -/************************************************************/ -/* define global constants */ -/************************************************************/ - -#define CCWBK_SIZE sizeof(struct ccwbk) - - diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 642c77c..3466d3c 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4218,7 +4218,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, - sizeof(struct qeth_ipacmd_setadpparms)); + sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8); if (!iob) return; cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); @@ -4290,7 +4290,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, - sizeof(struct qeth_ipacmd_setadpparms)); + sizeof(struct qeth_ipacmd_setadpparms_hdr) + + sizeof(struct qeth_change_addr)); if (!iob) return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 1e180c4..a48a743 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -1135,6 +1135,8 @@ static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev) case SSB_IDLOW_SSBREV_25: /* TODO - find the proper REJECT bit */ case SSB_IDLOW_SSBREV_27: /* same here */ return SSB_TMSLOW_REJECT; /* this is a guess */ + case SSB_IDLOW_SSBREV: + break; default: WARN(1, KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev); } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2bbfc25e..18f05bf 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -390,7 +390,7 @@ static void handle_tx(struct vhost_net *net) ubufs = NULL; } /* TODO: Check specific error and bomb out unless ENOBUFS? */ - err = sock->ops->sendmsg(NULL, sock, &msg, len); + err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { vhost_net_ubuf_put(ubufs); @@ -566,7 +566,7 @@ static void handle_rx(struct vhost_net *net) /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); - err = sock->ops->recvmsg(NULL, sock, &msg, + err = sock->ops->recvmsg(sock, &msg, 1, MSG_DONTWAIT | MSG_TRUNC); pr_debug("Discarded rx packet: len %zd\n", sock_len); continue; @@ -592,7 +592,7 @@ static void handle_rx(struct vhost_net *net) */ iov_iter_advance(&msg.msg_iter, vhost_hlen); } - err = sock->ops->recvmsg(NULL, sock, &msg, + err = sock->ops->recvmsg(sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: * it's not supposed to do this usually, but might be hard diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 994739d..44057b4 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -434,6 +434,18 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus, return bcma_find_core_unit(bus, coreid, 0); } +#ifdef CONFIG_BCMA_HOST_PCI +extern void bcma_host_pci_up(struct bcma_bus *bus); +extern void bcma_host_pci_down(struct bcma_bus *bus); +#else +static inline void bcma_host_pci_up(struct bcma_bus *bus) +{ +} +static inline void bcma_host_pci_down(struct bcma_bus *bus) +{ +} +#endif + extern bool bcma_core_is_enabled(struct bcma_device *core); extern void bcma_core_disable(struct bcma_device *core, u32 flags); extern int bcma_core_enable(struct bcma_device *core, u32 flags); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index db6fa21..6cceedf 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -663,14 +663,6 @@ struct bcma_drv_cc_b { #define bcma_cc_maskset32(cc, offset, mask, set) \ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) -extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); -extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); - -extern void bcma_chipco_suspend(struct bcma_drv_cc *cc); -extern void bcma_chipco_resume(struct bcma_drv_cc *cc); - -void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); - extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); @@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value); u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value); /* PMU support */ -extern void bcma_pmu_init(struct bcma_drv_cc *cc); -extern void bcma_pmu_early_init(struct bcma_drv_cc *cc); - extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value); extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h index 4dd1f33..4354d4e 100644 --- a/include/linux/bcma/bcma_driver_gmac_cmn.h +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn { #define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val) #define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) -#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN -extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); -#else -static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { } -#endif - #endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index 0b3b32a..8eea7f9 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -39,21 +39,6 @@ struct bcma_drv_mips { u8 early_setup_done:1; }; -#ifdef CONFIG_BCMA_DRIVER_MIPS -extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); -extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); - -extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); -#else -static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } -static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } - -static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) -{ - return 0; -} -#endif - extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore); #endif /* LINUX_BCMA_DRIVER_MIPS_H_ */ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 3f809ae..8e90004 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -238,12 +238,8 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) -extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc); -extern void bcma_core_pci_init(struct bcma_drv_pci *pc); -extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, +extern int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core, bool enable); -extern void bcma_core_pci_up(struct bcma_bus *bus); -extern void bcma_core_pci_down(struct bcma_bus *bus); extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h index 5988b05..31e6d17 100644 --- a/include/linux/bcma/bcma_driver_pcie2.h +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -143,6 +143,8 @@ struct bcma_drv_pcie2 { struct bcma_device *core; + + u16 reqsize; }; #define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset) @@ -153,6 +155,4 @@ struct bcma_drv_pcie2 { #define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set) #define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask) -void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); - #endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bbfceb7..a884f5a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -32,20 +32,16 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; - struct bpf_map_ops *ops; + const struct bpf_map_ops *ops; struct work_struct work; }; struct bpf_map_type_list { struct list_head list_node; - struct bpf_map_ops *ops; + const struct bpf_map_ops *ops; enum bpf_map_type type; }; -void bpf_register_map_type(struct bpf_map_type_list *tl); -void bpf_map_put(struct bpf_map *map); -struct bpf_map *bpf_map_get(struct fd f); - /* function argument constraints */ enum bpf_arg_type { ARG_ANYTHING = 0, /* any argument is ok */ @@ -109,37 +105,51 @@ struct bpf_verifier_ops { struct bpf_prog_type_list { struct list_head list_node; - struct bpf_verifier_ops *ops; + const struct bpf_verifier_ops *ops; enum bpf_prog_type type; }; -void bpf_register_prog_type(struct bpf_prog_type_list *tl); - struct bpf_prog; struct bpf_prog_aux { atomic_t refcnt; - bool is_gpl_compatible; - enum bpf_prog_type prog_type; - struct bpf_verifier_ops *ops; - struct bpf_map **used_maps; u32 used_map_cnt; + const struct bpf_verifier_ops *ops; + struct bpf_map **used_maps; struct bpf_prog *prog; struct work_struct work; }; #ifdef CONFIG_BPF_SYSCALL -void bpf_prog_put(struct bpf_prog *prog); -#else -static inline void bpf_prog_put(struct bpf_prog *prog) {} -#endif +void bpf_register_prog_type(struct bpf_prog_type_list *tl); +void bpf_register_map_type(struct bpf_map_type_list *tl); + struct bpf_prog *bpf_prog_get(u32 ufd); +void bpf_prog_put(struct bpf_prog *prog); + +struct bpf_map *bpf_map_get(struct fd f); +void bpf_map_put(struct bpf_map *map); + /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +#else +static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) +{ +} + +static inline struct bpf_prog *bpf_prog_get(u32 ufd) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_prog_put(struct bpf_prog *prog) +{ +} +#endif /* CONFIG_BPF_SYSCALL */ /* verifier prototypes for helper functions called from eBPF programs */ -extern struct bpf_func_proto bpf_map_lookup_elem_proto; -extern struct bpf_func_proto bpf_map_update_elem_proto; -extern struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_lookup_elem_proto; +extern const struct bpf_func_proto bpf_map_update_elem_proto; +extern const struct bpf_func_proto bpf_map_delete_elem_proto; #endif /* _LINUX_BPF_H */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 1d869d1..606563e 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops; int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); -int eth_rebuild_header(struct sk_buff *skb); int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); diff --git a/include/linux/filter.h b/include/linux/filter.h index caac208..9ee8c67 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -145,8 +145,6 @@ struct bpf_prog_aux; .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) -#define BPF_PSEUDO_MAP_FD 1 - /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) @@ -310,9 +308,11 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ bool jited; /* Is our filter JIT'ed? */ + bool gpl_compatible; /* Is our filter GPL compatible? */ u32 len; /* Number of filter blocks */ - struct sock_fprog_kern *orig_prog; /* Original BPF program */ + enum bpf_prog_type type; /* Type of BPF program */ struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); /* Instructions for interpreter */ diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 6e82d88..40b0ab9 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -28,7 +28,8 @@ #include <asm/byteorder.h> #define IEEE802154_MTU 127 -#define IEEE802154_MIN_PSDU_LEN 5 +#define IEEE802154_ACK_PSDU_LEN 5 +#define IEEE802154_MIN_PSDU_LEN 9 #define IEEE802154_PAN_ID_BROADCAST 0xffff #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff @@ -204,11 +205,18 @@ enum { /** * ieee802154_is_valid_psdu_len - check if psdu len is valid + * available lengths: + * 0-4 Reserved + * 5 MPDU (Acknowledgment) + * 6-8 Reserved + * 9-127 MPDU + * * @len: psdu len with (MHR + payload + MFR) */ static inline bool ieee802154_is_valid_psdu_len(const u8 len) { - return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); + return (len == IEEE802154_ACK_PSDU_LEN || + (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); } /** diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index a57bca2..dad8b00 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -44,6 +44,7 @@ struct br_ip_list { #define BR_PROMISC BIT(7) #define BR_PROXYARP BIT(8) #define BR_LEARNING_SYNC BIT(9) +#define BR_PROXYARP_WIFI BIT(10) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index aff7ad8..66a7d76 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -19,6 +19,7 @@ #include <linux/netdevice.h> #include <linux/ppp_channel.h> #include <linux/skbuff.h> +#include <linux/workqueue.h> #include <uapi/linux/if_pppox.h> static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) @@ -32,6 +33,7 @@ struct pppoe_opt { struct pppoe_addr pa; /* what this socket is bound to*/ struct sockaddr_pppox relay; /* what socket data will be relayed to (PPPoE relaying) */ + struct work_struct padt_work;/* Work item for handling PADT */ }; struct pptp_opt { diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 2c677af..b5a6470 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -111,7 +111,9 @@ struct ip_mc_list { extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto); extern int igmp_rcv(struct sk_buff *); +extern int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); +extern int __ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern void ip_mc_drop_socket(struct sock *sk); extern int ip_mc_source(int add, int omode, struct sock *sk, diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7b6d4e9..7299e95 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -163,6 +163,9 @@ enum { MLX4_QP_FLOW_STEERING_ATTACH = 0x65, MLX4_QP_FLOW_STEERING_DETACH = 0x66, MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, + + /* Update and read QCN parameters */ + MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68, }; enum { @@ -233,6 +236,16 @@ struct mlx4_config_dev_params { u8 rx_csum_flags_port_2; }; +enum mlx4_en_congestion_control_algorithm { + MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0, +}; + +enum mlx4_en_congestion_control_opmod { + MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CONGESTION_CONTROL_SET_PARAMS = 4, +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e4ebff7..1cc5482 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -203,7 +203,8 @@ enum { MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, - MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21 + MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21, + MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22, }; enum { diff --git a/include/linux/net.h b/include/linux/net.h index 17d8339..e74114b 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -120,7 +120,6 @@ struct socket { struct vm_area_struct; struct page; -struct kiocb; struct sockaddr; struct msghdr; struct module; @@ -162,8 +161,8 @@ struct proto_ops { int (*compat_getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); #endif - int (*sendmsg) (struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len); + int (*sendmsg) (struct socket *sock, struct msghdr *m, + size_t total_len); /* Notes for implementing recvmsg: * =============================== * msg->msg_namelen should get updated by the recvmsg handlers @@ -172,9 +171,8 @@ struct proto_ops { * handlers can assume that msg.msg_name is either NULL or has * a minimum size of sizeof(struct sockaddr_storage). */ - int (*recvmsg) (struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, - int flags); + int (*recvmsg) (struct socket *sock, struct msghdr *m, + size_t total_len, int flags); int (*mmap) (struct file *file, struct socket *sock, struct vm_area_struct * vma); ssize_t (*sendpage) (struct socket *sock, struct page *page, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 429d179..4541378 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -261,7 +261,6 @@ struct header_ops { unsigned short type, const void *daddr, const void *saddr, unsigned int len); int (*parse)(const struct sk_buff *skb, unsigned char *haddr); - int (*rebuild)(struct sk_buff *skb); int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, @@ -769,6 +768,8 @@ struct netdev_phys_item_id { typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); +struct fib_info; + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1032,6 +1033,14 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); * Called to notify switch device port of bridge port STP * state change. + * int (*ndo_sw_parent_fib_ipv4_add)(struct net_device *dev, __be32 dst, + * int dst_len, struct fib_info *fi, + * u8 tos, u8 type, u32 tb_id); + * Called to add/modify IPv4 route to switch device. + * int (*ndo_sw_parent_fib_ipv4_del)(struct net_device *dev, __be32 dst, + * int dst_len, struct fib_info *fi, + * u8 tos, u8 type, u32 tb_id); + * Called to delete IPv4 route from switch device. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1193,6 +1202,18 @@ struct net_device_ops { struct netdev_phys_item_id *psid); int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); + int (*ndo_switch_fib_ipv4_add)(struct net_device *dev, + __be32 dst, + int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 tb_id); + int (*ndo_switch_fib_ipv4_del)(struct net_device *dev, + __be32 dst, + int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 tb_id); #endif }; @@ -1346,7 +1367,7 @@ enum netdev_priv_flags { * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations * @fwd_ops: Management operations - * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc + * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * * @flags: Interface flags (a la BSD) @@ -2400,15 +2421,6 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } -static inline int dev_rebuild_header(struct sk_buff *skb) -{ - const struct net_device *dev = skb->dev; - - if (!dev->header_ops || !dev->header_ops->rebuild) - return 0; - return dev->header_ops->rebuild(skb); -} - typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index c755e49..bb39113 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -36,44 +36,6 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) } } -static inline void nf_bridge_update_protocol(struct sk_buff *skb) -{ - if (skb->nf_bridge->mask & BRNF_8021Q) - skb->protocol = htons(ETH_P_8021Q); - else if (skb->nf_bridge->mask & BRNF_PPPoE) - skb->protocol = htons(ETH_P_PPP_SES); -} - -/* Fill in the header for fragmented IP packets handled by - * the IPv4 connection tracking code. - * - * Only used in br_forward.c - */ -static inline int nf_bridge_copy_header(struct sk_buff *skb) -{ - int err; - unsigned int header_size; - - nf_bridge_update_protocol(skb); - header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); - err = skb_cow_head(skb, header_size); - if (err) - return err; - - skb_copy_to_linear_data_offset(skb, -header_size, - skb->nf_bridge->data, header_size); - __skb_push(skb, nf_bridge_encap_header_len(skb)); - return 0; -} - -static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) -{ - if (skb->nf_bridge && - skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) - return nf_bridge_copy_header(skb); - return 0; -} - static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) { if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) @@ -82,18 +44,6 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) } int br_handle_frame_finish(struct sk_buff *skb); -/* Only used in br_device.c */ -static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) -{ - struct nf_bridge_info *nf_bridge = skb->nf_bridge; - - skb_pull(skb, ETH_HLEN); - nf_bridge->mask ^= BRNF_BRIDGED_DNAT; - skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), - skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); - skb->dev = nf_bridge->physindev; - return br_handle_frame_finish(skb); -} /* This is called by the IP fragmenting code and it ensures there is * enough room for the encapsulating header (if there is one). */ @@ -119,7 +69,6 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) } #else -#define nf_bridge_maybe_copy_header(skb) (0) #define nf_bridge_pad(skb) (0) #define br_drop_fake_rtable(skb) do { } while (0) #endif /* CONFIG_BRIDGE_NETFILTER */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 30007af..bba1330 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -492,7 +492,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark - * @dropcount: total number of sk_receive_queue overflows * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_protocol: Protocol (encapsulation) @@ -641,7 +640,6 @@ struct sk_buff { #endif union { __u32 mark; - __u32 dropcount; __u32 reserved_tailroom; }; @@ -870,8 +868,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, void skb_abort_seq_read(struct skb_seq_state *st); unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, - unsigned int to, struct ts_config *config, - struct ts_state *state); + unsigned int to, struct ts_config *config); /* * Packet hash types specify the type of hash in skb_set_hash. diff --git a/include/linux/socket.h b/include/linux/socket.h index 5c19cba..fab4d0d 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -181,6 +181,7 @@ struct ucred { #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ #define AF_IB 27 /* Native InfiniBand address */ +#define AF_MPLS 28 /* MPLS */ #define AF_CAN 29 /* Controller Area Network */ #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ @@ -226,6 +227,7 @@ struct ucred { #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC #define PF_IB AF_IB +#define PF_MPLS AF_MPLS #define PF_CAN AF_CAN #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index cd519a1..b63fe6f 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h @@ -22,6 +22,7 @@ struct at86rf230_platform_data { int rstn; int slp_tr; int dig2; + u8 xtal_trim; }; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1a7adb4..97dbf16 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -236,7 +236,6 @@ struct tcp_sock { u32 lost_out; /* Lost packets */ u32 sacked_out; /* SACK'd packets */ u32 fackets_out; /* FACK'd packets */ - u32 tso_deferred; /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index d9a4905..ff3fb2b 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -228,8 +228,20 @@ struct skb_data { /* skb->cb is one of these */ struct usbnet *dev; enum skb_state state; size_t length; + unsigned long packets; }; +/* Drivers that set FLAG_MULTI_PACKET must call this in their + * tx_fixup method before returning an skb. + */ +static inline void +usbnet_set_skb_tx_stats(struct sk_buff *skb, unsigned long packets) +{ + struct skb_data *entry = (struct skb_data *) skb->cb; + + entry->packets = packets; +} + extern int usbnet_open(struct net_device *net); extern int usbnet_stop(struct net_device *net); extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 0d87674..172632d 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -100,8 +100,8 @@ struct vsock_transport { /* DGRAM. */ int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); - int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, - struct msghdr *msg, size_t len, int flags); + int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg, + size_t len, int flags); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, struct msghdr *, size_t len); bool (*dgram_allow)(u32 cid, u32 port); diff --git a/include/net/arp.h b/include/net/arp.h index 73c4986..5e0f891 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -9,28 +9,17 @@ extern struct neigh_table arp_tbl; -static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd) +static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd) { + u32 key = *(const u32 *)pkey; u32 val = key ^ hash32_ptr(dev); - return val * hash_rnd; + return val * hash_rnd[0]; } static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) { - struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht); - struct neighbour *n; - u32 hash_val; - - hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift); - for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference_bh(n->next)) { - if (n->dev == dev && *(u32 *)n->primary_key == key) - return n; - } - - return NULL; + return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); } static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key) @@ -47,7 +36,6 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 } void arp_init(void); -int arp_find(unsigned char *haddr, struct sk_buff *skb); int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); void arp_send(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, diff --git a/include/net/ax25.h b/include/net/ax25.h index bf0396e..16a923a 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -12,6 +12,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/atomic.h> +#include <net/neighbour.h> #define AX25_T1CLAMPLO 1 #define AX25_T1CLAMPHI (30 * HZ) @@ -366,9 +367,7 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); /* ax25_ip.c */ -int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short, - const void *, const void *, unsigned int); -int ax25_rebuild_header(struct sk_buff *); +netdev_tx_t ax25_ip_xmit(struct sk_buff *skb); extern const struct header_ops ax25_header_ops; /* ax25_out.c */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index e00455a..6bb97df 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -245,10 +245,10 @@ int bt_sock_register(int proto, const struct net_proto_family *ops); void bt_sock_unregister(int proto); void bt_sock_link(struct bt_sock_list *l, struct sock *s); void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); -int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags); -int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags); +int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags); +int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags); uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); @@ -275,21 +275,17 @@ struct hci_dev; typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); -struct hci_req_ctrl { - bool start; - u8 event; - hci_req_complete_t complete; -}; - struct bt_skb_cb { __u8 pkt_type; - __u8 incoming; + __u8 force_active; __u16 opcode; __u16 expect; - __u8 force_active; + __u8 incoming:1; + __u8 req_start:1; + u8 req_event; + hci_req_complete_t req_complete; struct l2cap_chan *chan; struct l2cap_ctrl control; - struct hci_req_ctrl req; bdaddr_t bdaddr; __le16 psm; }; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 52863c3..acec914 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -108,7 +108,7 @@ struct bt_uuid { struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; - u8 master; + u8 type; u8 val[16]; }; @@ -373,6 +373,7 @@ struct hci_dev { int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); int (*setup)(struct hci_dev *hdev); + int (*shutdown)(struct hci_dev *hdev); int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*hw_error)(struct hci_dev *hdev, u8 code); @@ -498,19 +499,14 @@ struct hci_conn_params { extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; -extern rwlock_t hci_cb_list_lock; +extern struct mutex hci_cb_list_lock; /* ----- HCI interface to upper protocols ----- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); -void l2cap_connect_cfm(struct hci_conn *hcon, u8 status); int l2cap_disconn_ind(struct hci_conn *hcon); -void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); -int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); -void sco_connect_cfm(struct hci_conn *hcon, __u8 status); -void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason); int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); /* ----- Inquiry cache ----- */ @@ -1050,28 +1046,6 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, } } -static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) -{ - switch (conn->type) { - case ACL_LINK: - case LE_LINK: - l2cap_connect_cfm(conn, status); - break; - - case SCO_LINK: - case ESCO_LINK: - sco_connect_cfm(conn, status); - break; - - default: - BT_ERR("unknown link type %d", conn->type); - break; - } - - if (conn->connect_cfm_cb) - conn->connect_cfm_cb(conn, status); -} - static inline int hci_proto_disconn_ind(struct hci_conn *conn) { if (conn->type != ACL_LINK && conn->type != LE_LINK) @@ -1080,91 +1054,69 @@ static inline int hci_proto_disconn_ind(struct hci_conn *conn) return l2cap_disconn_ind(conn); } -static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) -{ - switch (conn->type) { - case ACL_LINK: - case LE_LINK: - l2cap_disconn_cfm(conn, reason); - break; - - case SCO_LINK: - case ESCO_LINK: - sco_disconn_cfm(conn, reason); - break; - - /* L2CAP would be handled for BREDR chan */ - case AMP_LINK: - break; +/* ----- HCI callbacks ----- */ +struct hci_cb { + struct list_head list; - default: - BT_ERR("unknown link type %d", conn->type); - break; - } + char *name; - if (conn->disconn_cfm_cb) - conn->disconn_cfm_cb(conn, reason); -} + void (*connect_cfm) (struct hci_conn *conn, __u8 status); + void (*disconn_cfm) (struct hci_conn *conn, __u8 status); + void (*security_cfm) (struct hci_conn *conn, __u8 status, + __u8 encrypt); + void (*key_change_cfm) (struct hci_conn *conn, __u8 status); + void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); +}; -static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) +static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { - __u8 encrypt; - - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return; - - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) - return; + struct hci_cb *cb; - encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; - l2cap_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->connect_cfm) + cb->connect_cfm(conn, status); + } + mutex_unlock(&hci_cb_list_lock); - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); + if (conn->connect_cfm_cb) + conn->connect_cfm_cb(conn, status); } -static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) +static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return; + struct hci_cb *cb; - l2cap_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->disconn_cfm) + cb->disconn_cfm(conn, reason); + } + mutex_unlock(&hci_cb_list_lock); - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); + if (conn->disconn_cfm_cb) + conn->disconn_cfm_cb(conn, reason); } -/* ----- HCI callbacks ----- */ -struct hci_cb { - struct list_head list; - - char *name; - - void (*security_cfm) (struct hci_conn *conn, __u8 status, - __u8 encrypt); - void (*key_change_cfm) (struct hci_conn *conn, __u8 status); - void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); -}; - static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; __u8 encrypt; - hci_proto_auth_cfm(conn, status); - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) return; encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, @@ -1178,26 +1130,27 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, if (conn->pending_sec_level > conn->sec_level) conn->sec_level = conn->pending_sec_level; - hci_proto_encrypt_cfm(conn, status, encrypt); - - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, @@ -1205,12 +1158,12 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, { struct hci_cb *cb; - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); } static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type) @@ -1312,7 +1265,8 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); -void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk); +void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, + struct sock *skip_sk); void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); void hci_sock_dev_event(struct hci_dev *hdev, int event); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e218a30..fe8eef0 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -647,9 +647,14 @@ struct mgmt_ev_new_irk { struct mgmt_irk_info irk; } __packed; +#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00 +#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01 +#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02 +#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03 + struct mgmt_csrk_info { struct mgmt_addr_info addr; - __u8 master; + __u8 type; __u8 val[16]; } __packed; diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index f04cdbb..c2a40a17 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -82,6 +82,13 @@ typedef enum { AD_TRANSMIT /* tx Machine */ } tx_states_t; +/* churn machine states(43.4.17 in the 802.3ad standard) */ +typedef enum { + AD_CHURN_MONITOR, /* monitoring for churn */ + AD_CHURN, /* churn detected (error) */ + AD_NO_CHURN /* no churn (no error) */ +} churn_state_t; + /* rx indication types */ typedef enum { AD_TYPE_LACPDU = 1, /* type lacpdu */ @@ -229,6 +236,12 @@ typedef struct port { u16 sm_mux_timer_counter; /* state machine mux timer counter */ tx_states_t sm_tx_state; /* state machine tx state */ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */ + u16 sm_churn_actor_timer_counter; + u16 sm_churn_partner_timer_counter; + u32 churn_actor_count; + u32 churn_partner_count; + churn_state_t sm_churn_actor_state; + churn_state_t sm_churn_partner_state; struct slave *slave; /* pointer to the bond slave that this port belongs to */ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ @@ -262,6 +275,22 @@ struct ad_slave_info { u16 id; }; +static inline const char *bond_3ad_churn_desc(churn_state_t state) +{ + static const char *const churn_description[] = { + "monitoring", + "churned", + "none", + "unknown" + }; + int max_size = sizeof(churn_description) / sizeof(churn_description[0]); + + if (state >= max_size) + state = max_size - 1; + + return churn_description[state]; +} + /* ========== AD Exported functions to the main bonding code ========== */ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution); void bond_3ad_bind_slave(struct slave *slave); diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index 597b88a..207d9ba 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops { int (*ieee_setets) (struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *); + int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *); + int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_getapp) (struct net_device *, struct dcb_app *); diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h index fac4e3f..0f26aa7 100644 --- a/include/net/dn_neigh.h +++ b/include/net/dn_neigh.h @@ -22,6 +22,7 @@ int dn_neigh_router_hello(struct sk_buff *skb); int dn_neigh_endnode_hello(struct sk_buff *skb); void dn_neigh_pointopoint_hello(struct sk_buff *skb); int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); +int dn_to_neigh_output(struct sk_buff *skb); extern struct neigh_table dn_neigh_table; diff --git a/include/net/dsa.h b/include/net/dsa.h index ed3c34b..b525ac5 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -128,6 +128,11 @@ struct dsa_switch { int index; /* + * Tagging protocol understood by this switch + */ + enum dsa_tag_protocol tag_protocol; + + /* * Configuration data for this switch. */ struct dsa_chip_data *pd; @@ -165,6 +170,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); } +static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) +{ + return ds->phys_port_mask & (1 << p) && ds->ports[p]; +} + static inline u8 dsa_upstream_port(struct dsa_switch *ds) { struct dsa_switch_tree *dst = ds->dst; @@ -275,6 +285,16 @@ struct dsa_switch_driver { int (*get_regs_len)(struct dsa_switch *ds, int port); void (*get_regs)(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *p); + + /* + * Bridge integration + */ + int (*port_join_bridge)(struct dsa_switch *ds, int port, + u32 br_port_mask); + int (*port_leave_bridge)(struct dsa_switch *ds, int port, + u32 br_port_mask); + int (*port_stp_update)(struct dsa_switch *ds, int port, + u8 state); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 1f99a1d..d642539 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -12,7 +12,6 @@ struct sock; struct dst_ops { unsigned short family; - __be16 protocol; unsigned int gc_thresh; int (*gc)(struct dst_ops *ops); diff --git a/include/net/inet_common.h b/include/net/inet_common.h index b2828a0..4a92423 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -21,12 +21,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int inet_accept(struct socket *sock, struct socket *newsock, int flags); -int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size); +int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); -int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int flags); +int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags); int inet_shutdown(struct socket *sock, int how); int inet_listen(struct socket *sock, int backlog); void inet_sock_destruct(struct sock *sk); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5976bde..b9a6b0a 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -126,6 +126,8 @@ struct inet_connection_sock { /* Information on the current probe. */ int probe_size; + + u32 probe_timestamp; } icsk_mtup; u32 icsk_ca_priv[16]; u32 icsk_user_timeout; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 5bd120e..1657604 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -136,7 +136,7 @@ struct fib_result { u32 tclassid; struct fib_info *fi; struct fib_table *table; - struct list_head *fa_head; + struct hlist_head *fa_head; }; struct fib_result_nl { @@ -185,6 +185,7 @@ struct fib_table { u32 tb_id; int tb_default; int tb_num_default; + struct rcu_head rcu; unsigned long tb_data[0]; }; @@ -195,6 +196,7 @@ int fib_table_delete(struct fib_table *, struct fib_config *); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); int fib_table_flush(struct fib_table *table); +void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); @@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb); static inline struct fib_table *fib_get_table(struct net *net, u32 id) { + struct hlist_node *tb_hlist; struct hlist_head *ptr; ptr = id == RT_TABLE_LOCAL ? &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] : &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]; - return hlist_entry(ptr->first, struct fib_table, tb_hlist); + + tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr)); + + return hlist_entry(tb_hlist, struct fib_table, tb_hlist); } static inline struct fib_table *fib_new_table(struct net *net, u32 id) @@ -222,15 +228,19 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id) static inline int fib_lookup(struct net *net, const struct flowi4 *flp, struct fib_result *res) { - int err = -ENETUNREACH; + struct fib_table *tb; + int err; rcu_read_lock(); - if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res, - FIB_LOOKUP_NOREF) || - !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res, - FIB_LOOKUP_NOREF)) - err = 0; + for (err = 0; !err; err = -ENETUNREACH) { + tb = fib_get_table(net, RT_TABLE_LOCAL); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; + tb = fib_get_table(net, RT_TABLE_MAIN); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; + } rcu_read_unlock(); @@ -249,28 +259,33 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res); static inline int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) { - if (!net->ipv4.fib_has_custom_rules) { - int err = -ENETUNREACH; - - rcu_read_lock(); - - res->tclassid = 0; - if ((net->ipv4.fib_local && - !fib_table_lookup(net->ipv4.fib_local, flp, res, - FIB_LOOKUP_NOREF)) || - (net->ipv4.fib_main && - !fib_table_lookup(net->ipv4.fib_main, flp, res, - FIB_LOOKUP_NOREF)) || - (net->ipv4.fib_default && - !fib_table_lookup(net->ipv4.fib_default, flp, res, - FIB_LOOKUP_NOREF))) - err = 0; - - rcu_read_unlock(); - - return err; + struct fib_table *tb; + int err; + + if (net->ipv4.fib_has_custom_rules) + return __fib_lookup(net, flp, res); + + rcu_read_lock(); + + res->tclassid = 0; + + for (err = 0; !err; err = -ENETUNREACH) { + tb = rcu_dereference_rtnl(net->ipv4.fib_local); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; + + tb = rcu_dereference_rtnl(net->ipv4.fib_main); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; + + tb = rcu_dereference_rtnl(net->ipv4.fib_default); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; } - return __fib_lookup(net, flp, res); + + rcu_read_unlock(); + + return err; } #endif /* CONFIG_IP_MULTIPLE_TABLES */ @@ -294,6 +309,7 @@ static inline int fib_num_tclassid_users(struct net *net) return 0; } #endif +void fib_flush_external(struct net *net); /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 615b20b..20fd233 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -365,15 +365,15 @@ struct ip_vs_seq { /* counters per cpu */ struct ip_vs_counters { - __u32 conns; /* connections scheduled */ - __u32 inpkts; /* incoming packets */ - __u32 outpkts; /* outgoing packets */ + __u64 conns; /* connections scheduled */ + __u64 inpkts; /* incoming packets */ + __u64 outpkts; /* outgoing packets */ __u64 inbytes; /* incoming bytes */ __u64 outbytes; /* outgoing bytes */ }; /* Stats per cpu */ struct ip_vs_cpu_stats { - struct ip_vs_counters ustats; + struct ip_vs_counters cnt; struct u64_stats_sync syncp; }; @@ -383,23 +383,40 @@ struct ip_vs_estimator { u64 last_inbytes; u64 last_outbytes; - u32 last_conns; - u32 last_inpkts; - u32 last_outpkts; - - u32 cps; - u32 inpps; - u32 outpps; - u32 inbps; - u32 outbps; + u64 last_conns; + u64 last_inpkts; + u64 last_outpkts; + + u64 cps; + u64 inpps; + u64 outpps; + u64 inbps; + u64 outbps; +}; + +/* + * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user + */ +struct ip_vs_kstats { + u64 conns; /* connections scheduled */ + u64 inpkts; /* incoming packets */ + u64 outpkts; /* outgoing packets */ + u64 inbytes; /* incoming bytes */ + u64 outbytes; /* outgoing bytes */ + + u64 cps; /* current connection rate */ + u64 inpps; /* current in packet rate */ + u64 outpps; /* current out packet rate */ + u64 inbps; /* current in byte rate */ + u64 outbps; /* current out byte rate */ }; struct ip_vs_stats { - struct ip_vs_stats_user ustats; /* statistics */ + struct ip_vs_kstats kstats; /* kernel statistics */ struct ip_vs_estimator est; /* estimator */ struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ spinlock_t lock; /* spin lock */ - struct ip_vs_stats_user ustats0; /* reset values */ + struct ip_vs_kstats kstats0; /* reset values */ }; struct dst_entry; @@ -924,6 +941,7 @@ struct netns_ipvs { int sysctl_nat_icmp_send; int sysctl_pmtu_disc; int sysctl_backup_only; + int sysctl_conn_reuse_mode; /* ip_vs_lblc */ int sysctl_lblc_expiration; @@ -1042,6 +1060,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs) ipvs->sysctl_backup_only; } +static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_conn_reuse_mode; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -1109,6 +1132,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs) return 0; } +static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) +{ + return 1; +} + #endif /* IPVS core functions @@ -1388,8 +1416,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_zero_estimator(struct ip_vs_stats *stats); -void ip_vs_read_estimator(struct ip_vs_stats_user *dst, - struct ip_vs_stats *stats); +void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4c9fe22..b767306 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -940,4 +940,12 @@ int ipv6_sysctl_register(void); void ipv6_sysctl_unregister(void); #endif +int ipv6_sock_mc_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int __ipv6_sock_mc_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int ipv6_sock_mc_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int __ipv6_sock_mc_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); #endif /* _NET_IPV6_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 8506478..fb4e8a3d 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -19,6 +19,7 @@ #include <net/af_ieee802154.h> #include <linux/ieee802154.h> #include <linux/skbuff.h> +#include <linux/unaligned/memmove.h> #include <net/cfg802154.h> @@ -233,9 +234,7 @@ struct ieee802154_ops { */ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) { - __le64 tmp = (__force __le64)swab64p(be64_src); - - memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); + __put_unaligned_memmove64(swab64p(be64_src), le64_dst); } /** @@ -245,9 +244,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) */ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) { - __be64 tmp = (__force __be64)swab64p(le64_src); - - memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); + __put_unaligned_memmove64(swab64p(le64_src), be64_dst); } /* Basic interface to register ieee802154 hwice */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 6bbda34..b3a7751 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _ static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey) { - struct neigh_hash_table *nht; - const u32 *p32 = pkey; - struct neighbour *n; - u32 hash_val; - - nht = rcu_dereference_bh(nd_tbl.nht); - hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); - for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference_bh(n->next)) { - u32 *n32 = (u32 *) n->primary_key; - if (n->dev == dev && - ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | - (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) - return n; - } - - return NULL; + return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev); } static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 76f7084..d48b8ec 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -193,9 +193,11 @@ struct neigh_table { int family; int entry_size; int key_len; + __be16 protocol; __u32 (*hash)(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); + bool (*key_eq)(const struct neighbour *, const void *pkey); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); @@ -224,6 +226,7 @@ enum { NEIGH_ND_TABLE = 1, NEIGH_DN_TABLE = 2, NEIGH_NR_TABLES, + NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ }; static inline int neigh_parms_family(struct neigh_parms *p) @@ -246,6 +249,57 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 + +static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey) +{ + return *(const u16 *)n->primary_key == *(const u16 *)pkey; +} + +static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) +{ + return *(const u32 *)n->primary_key == *(const u32 *)pkey; +} + +static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey) +{ + const u32 *n32 = (const u32 *)n->primary_key; + const u32 *p32 = pkey; + + return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | + (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0; +} + +static inline struct neighbour *___neigh_lookup_noref( + struct neigh_table *tbl, + bool (*key_eq)(const struct neighbour *n, const void *pkey), + __u32 (*hash)(const void *pkey, + const struct net_device *dev, + __u32 *hash_rnd), + const void *pkey, + struct net_device *dev) +{ + struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); + struct neighbour *n; + u32 hash_val; + + hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); + n != NULL; + n = rcu_dereference_bh(n->next)) { + if (n->dev == dev && key_eq(n, pkey)) + return n; + } + + return NULL; +} + +static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, + const void *pkey, + struct net_device *dev) +{ + return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); +} + void neigh_table_init(int index, struct neigh_table *tbl); int neigh_table_clear(int index, struct neigh_table *tbl); struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, @@ -268,7 +322,6 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); -int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, @@ -306,6 +359,7 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); +int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); @@ -459,4 +513,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, memcpy(dst, n->ha, dev->addr_len); } while (read_seqretry(&n->ha_lock, seq)); } + + #endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 36faf49..2cb9acb 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -26,6 +26,7 @@ #endif #include <net/netns/nftables.h> #include <net/netns/xfrm.h> +#include <net/netns/mpls.h> #include <linux/ns_common.h> struct user_namespace; @@ -130,6 +131,9 @@ struct net { #if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; #endif +#if IS_ENABLED(CONFIG_MPLS) + struct netns_mpls mpls; +#endif struct sock *diag_nlsk; atomic_t fnhe_genid; }; diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index 03e928a..8641275 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -5,11 +5,7 @@ #include <net/ip.h> #include <net/icmp.h> -static inline void nf_send_unreach(struct sk_buff *skb_in, int code) -{ - icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); -} - +void nf_send_unreach(struct sk_buff *skb_in, int code, int hook); void nf_send_reset(struct sk_buff *oldskb, int hook); const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 23216d4..0ae445d 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -3,15 +3,8 @@ #include <linux/icmpv6.h> -static inline void -nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, - unsigned int hooknum) -{ - if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) - skb_in->dev = net->loopback_dev; - - icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); -} +void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, + unsigned int hooknum); void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index decb9a0..d756af5 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -409,74 +409,6 @@ struct nft_rule { __attribute__((aligned(__alignof__(struct nft_expr)))); }; -/** - * struct nft_trans - nf_tables object update in transaction - * - * @list: used internally - * @msg_type: message type - * @ctx: transaction context - * @data: internal information related to the transaction - */ -struct nft_trans { - struct list_head list; - int msg_type; - struct nft_ctx ctx; - char data[0]; -}; - -struct nft_trans_rule { - struct nft_rule *rule; -}; - -#define nft_trans_rule(trans) \ - (((struct nft_trans_rule *)trans->data)->rule) - -struct nft_trans_set { - struct nft_set *set; - u32 set_id; -}; - -#define nft_trans_set(trans) \ - (((struct nft_trans_set *)trans->data)->set) -#define nft_trans_set_id(trans) \ - (((struct nft_trans_set *)trans->data)->set_id) - -struct nft_trans_chain { - bool update; - char name[NFT_CHAIN_MAXNAMELEN]; - struct nft_stats __percpu *stats; - u8 policy; -}; - -#define nft_trans_chain_update(trans) \ - (((struct nft_trans_chain *)trans->data)->update) -#define nft_trans_chain_name(trans) \ - (((struct nft_trans_chain *)trans->data)->name) -#define nft_trans_chain_stats(trans) \ - (((struct nft_trans_chain *)trans->data)->stats) -#define nft_trans_chain_policy(trans) \ - (((struct nft_trans_chain *)trans->data)->policy) - -struct nft_trans_table { - bool update; - bool enable; -}; - -#define nft_trans_table_update(trans) \ - (((struct nft_trans_table *)trans->data)->update) -#define nft_trans_table_enable(trans) \ - (((struct nft_trans_table *)trans->data)->enable) - -struct nft_trans_elem { - struct nft_set *set; - struct nft_set_elem elem; -}; - -#define nft_trans_elem_set(trans) \ - (((struct nft_trans_elem *)trans->data)->set) -#define nft_trans_elem(trans) \ - (((struct nft_trans_elem *)trans->data)->elem) - static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule) { return (struct nft_expr *)&rule->data[0]; @@ -544,6 +476,25 @@ enum nft_chain_type { NFT_CHAIN_T_MAX }; +/** + * struct nf_chain_type - nf_tables chain type info + * + * @name: name of the type + * @type: numeric identifier + * @family: address family + * @owner: module owner + * @hook_mask: mask of valid hooks + * @hooks: hookfn overrides + */ +struct nf_chain_type { + const char *name; + enum nft_chain_type type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[NF_MAX_HOOKS]; +}; + int nft_chain_validate_dependency(const struct nft_chain *chain, enum nft_chain_type type); int nft_chain_validate_hooks(const struct nft_chain *chain, @@ -600,7 +551,7 @@ struct nft_table { u64 hgenerator; u32 use; u16 flags; - char name[]; + char name[NFT_TABLE_MAXNAMELEN]; }; /** @@ -630,25 +581,6 @@ struct nft_af_info { int nft_register_afinfo(struct net *, struct nft_af_info *); void nft_unregister_afinfo(struct nft_af_info *); -/** - * struct nf_chain_type - nf_tables chain type info - * - * @name: name of the type - * @type: numeric identifier - * @family: address family - * @owner: module owner - * @hook_mask: mask of valid hooks - * @hooks: hookfn overrides - */ -struct nf_chain_type { - const char *name; - enum nft_chain_type type; - int family; - struct module *owner; - unsigned int hook_mask; - nf_hookfn *hooks[NF_MAX_HOOKS]; -}; - int nft_register_chain_type(const struct nf_chain_type *); void nft_unregister_chain_type(const struct nf_chain_type *); @@ -673,4 +605,72 @@ void nft_unregister_expr(struct nft_expr_type *); #define MODULE_ALIAS_NFT_SET() \ MODULE_ALIAS("nft-set") +/** + * struct nft_trans - nf_tables object update in transaction + * + * @list: used internally + * @msg_type: message type + * @ctx: transaction context + * @data: internal information related to the transaction + */ +struct nft_trans { + struct list_head list; + int msg_type; + struct nft_ctx ctx; + char data[0]; +}; + +struct nft_trans_rule { + struct nft_rule *rule; +}; + +#define nft_trans_rule(trans) \ + (((struct nft_trans_rule *)trans->data)->rule) + +struct nft_trans_set { + struct nft_set *set; + u32 set_id; +}; + +#define nft_trans_set(trans) \ + (((struct nft_trans_set *)trans->data)->set) +#define nft_trans_set_id(trans) \ + (((struct nft_trans_set *)trans->data)->set_id) + +struct nft_trans_chain { + bool update; + char name[NFT_CHAIN_MAXNAMELEN]; + struct nft_stats __percpu *stats; + u8 policy; +}; + +#define nft_trans_chain_update(trans) \ + (((struct nft_trans_chain *)trans->data)->update) +#define nft_trans_chain_name(trans) \ + (((struct nft_trans_chain *)trans->data)->name) +#define nft_trans_chain_stats(trans) \ + (((struct nft_trans_chain *)trans->data)->stats) +#define nft_trans_chain_policy(trans) \ + (((struct nft_trans_chain *)trans->data)->policy) + +struct nft_trans_table { + bool update; + bool enable; +}; + +#define nft_trans_table_update(trans) \ + (((struct nft_trans_table *)trans->data)->update) +#define nft_trans_table_enable(trans) \ + (((struct nft_trans_table *)trans->data)->enable) + +struct nft_trans_elem { + struct nft_set *set; + struct nft_set_elem elem; +}; + +#define nft_trans_elem_set(trans) \ + (((struct nft_trans_elem *)trans->data)->set) +#define nft_trans_elem(trans) \ + (((struct nft_trans_elem *)trans->data)->elem) + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index dbe2254..8f3a1a1 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -7,6 +7,7 @@ #include <linux/uidgid.h> #include <net/inet_frag.h> +#include <linux/rcupdate.h> struct tcpm_hash_bucket; struct ctl_table_header; @@ -38,17 +39,19 @@ struct netns_ipv4 { #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; - struct fib_table *fib_local; - struct fib_table *fib_main; - struct fib_table *fib_default; + struct fib_table __rcu *fib_local; + struct fib_table __rcu *fib_main; + struct fib_table __rcu *fib_default; #endif #ifdef CONFIG_IP_ROUTE_CLASSID int fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; + bool fib_offload_disabled; struct sock *fibnl; struct sock * __percpu *icmp_sk; + struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; @@ -84,6 +87,8 @@ struct netns_ipv4 { int sysctl_tcp_fwmark_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; struct ping_group_range ping_group_range; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 69ae41f..ca0db12 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -67,6 +67,7 @@ struct netns_ipv6 { struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; + struct sock *mc_autojoin_sk; #ifdef CONFIG_IPV6_MROUTE #ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES struct mr6_table *mrt6; diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h new file mode 100644 index 0000000..d292036 --- /dev/null +++ b/include/net/netns/mpls.h @@ -0,0 +1,17 @@ +/* + * mpls in net namespaces + */ + +#ifndef __NETNS_MPLS_H__ +#define __NETNS_MPLS_H__ + +struct mpls_route; +struct ctl_table_header; + +struct netns_mpls { + size_t platform_labels; + struct mpls_route __rcu * __rcu *platform_label; + struct ctl_table_header *ctl; +}; + +#endif /* __NETNS_MPLS_H__ */ diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h index c24060e..4d6597a 100644 --- a/include/net/netns/x_tables.h +++ b/include/net/netns/x_tables.h @@ -9,6 +9,7 @@ struct ebt_table; struct netns_xt { struct list_head tables[NFPROTO_NUMPROTO]; bool notrack_deprecated_warning; + bool clusterip_deprecated_warning; #if defined(CONFIG_BRIDGE_NF_EBTABLES) || \ defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE) struct ebt_table *broute_table; diff --git a/include/net/ping.h b/include/net/ping.h index cc16d41..ac80cb4 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -75,12 +75,11 @@ void ping_err(struct sk_buff *skb, int offset, u32 info); int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *); -int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len); +int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len); int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len); -int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len); +int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); bool ping_rcv(struct sk_buff *skb); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c605d30..6d778ef 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -213,7 +213,7 @@ struct tcf_proto_ops { const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto*); - void (*destroy)(struct tcf_proto*); + bool (*destroy)(struct tcf_proto*, bool); unsigned long (*get)(struct tcf_proto*, u32 handle); int (*change)(struct net *net, struct sk_buff *, @@ -399,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, u32 parentid); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); -void tcf_destroy(struct tcf_proto *tp); +bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); /* Reset all TX qdiscs greater then index of a device. */ diff --git a/include/net/sock.h b/include/net/sock.h index ab186b1..250822c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -958,10 +958,9 @@ struct proto { int (*compat_ioctl)(struct sock *sk, unsigned int cmd, unsigned long arg); #endif - int (*sendmsg)(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len); - int (*recvmsg)(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, + int (*sendmsg)(struct sock *sk, struct msghdr *msg, + size_t len); + int (*recvmsg)(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, @@ -1562,9 +1561,8 @@ int sock_no_listen(struct socket *, int); int sock_no_shutdown(struct socket *, int); int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); -int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); -int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, - int); +int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); +int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma); ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, @@ -1576,8 +1574,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, */ int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags); +int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); int compat_sock_common_getsockopt(struct socket *sock, int level, @@ -2078,6 +2076,29 @@ static inline int sock_intr_errno(long timeo) return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } +struct sock_skb_cb { + u32 dropcount; +}; + +/* Store sock_skb_cb at the end of skb->cb[] so protocol families + * using skb->cb[] would keep using it directly and utilize its + * alignement guarantee. + */ +#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \ + sizeof(struct sock_skb_cb))) + +#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \ + SOCK_SKB_CB_OFFSET)) + +#define sock_skb_cb_check_size(size) \ + BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) + +static inline void +sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) +{ + SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); +} + void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, diff --git a/include/net/switchdev.h b/include/net/switchdev.h index cfcdac2..933fac4 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -51,6 +51,12 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); +int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id); +int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id); +void netdev_switch_fib_ipv4_abort(struct fib_info *fi); + #else static inline int netdev_switch_parent_id_get(struct net_device *dev, @@ -109,6 +115,24 @@ static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device * return 0; } +static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + return 0; +} + +static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + return 0; +} + +static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi) +{ +} + #endif #endif /* _LINUX_SWITCHDEV_H_ */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 8d6b983..2e11e38 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_MIN_MSS 88U /* The least MTU to use for probing */ -#define TCP_BASE_MSS 512 +#define TCP_BASE_MSS 1024 + +/* probing interval, default to 10 minutes as per RFC4821 */ +#define TCP_PROBE_INTERVAL 600 + +/* Specify interval when tcp mtu probing will stop */ +#define TCP_PROBE_THRESHOLD 8 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ #define TCP_FASTRETRANS_THRESH 3 @@ -349,8 +355,7 @@ void tcp_v4_early_demux(struct sk_buff *skb); int tcp_v4_rcv(struct sk_buff *skb); int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); -int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t size); +int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); void tcp_release_cb(struct sock *sk); @@ -430,8 +435,8 @@ int compat_tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); -int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, int *addr_len); +int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + int flags, int *addr_len); void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc); diff --git a/include/net/udp.h b/include/net/udp.h index 07f9b70..6d4ed18 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -194,6 +194,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*)(const struct sock *, const struct sock *), unsigned int hash2_nulladdr); +u32 udp_flow_hashrnd(void); + static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, int min, int max, bool use_eth) { @@ -205,12 +207,19 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, } hash = skb_get_hash(skb); - if (unlikely(!hash) && use_eth) { - /* Can't find a normal hash, caller has indicated an Ethernet - * packet so use that to compute a hash. - */ - hash = jhash(skb->data, 2 * ETH_ALEN, - (__force u32) skb->protocol); + if (unlikely(!hash)) { + if (use_eth) { + /* Can't find a normal hash, caller has indicated an + * Ethernet packet so use that to compute a hash. + */ + hash = jhash(skb->data, 2 * ETH_ALEN, + (__force u32) skb->protocol); + } else { + /* Can't derive any sort of hash for the packet, set + * to some consistent random value. + */ + hash = udp_flow_hashrnd(); + } } /* Since this is being sent on the wire obfuscate hash a bit @@ -229,8 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); void udp_err(struct sk_buff *, u32); -int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len); +int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udp_push_pending_frames(struct sock *sk); void udp_flush_pending_frames(struct sock *sk); void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 45da7ec..3fa1af8 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -118,8 +118,11 @@ enum bpf_map_type { enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, + BPF_PROG_TYPE_SCHED_CLS, }; +#define BPF_PSEUDO_MAP_FD 1 + /* flags for BPF_MAP_UPDATE_ELEM command */ #define BPF_ANY 0 /* create new element or update existing */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */ diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h index e711f20..6497d79 100644 --- a/include/uapi/linux/dcbnl.h +++ b/include/uapi/linux/dcbnl.h @@ -78,6 +78,70 @@ struct ieee_maxrate { __u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS]; }; +enum dcbnl_cndd_states { + DCB_CNDD_RESET = 0, + DCB_CNDD_EDGE, + DCB_CNDD_INTERIOR, + DCB_CNDD_INTERIOR_READY, +}; + +/* This structure contains the IEEE 802.1Qau QCN managed object. + * + *@rpg_enable: enable QCN RP + *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port + *@rpg_time_reset: time between rate increases if no CNMs received. + * given in u-seconds + *@rpg_byte_reset: transmitted data between rate increases if no CNMs received. + * given in Bytes + *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count + * before RP rate control state machine advances states + *@rpg_max_rate: the maxinun rate, in Mbits per second, + * at which an RP can transmit + *@rpg_ai_rate: The rate, in Mbits per second, + * used to increase rpTargetRate in the RPR_ACTIVE_INCREASE + *@rpg_hai_rate: The rate, in Mbits per second, + * used to increase rpTargetRate in the RPR_HYPER_INCREASE state + *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate. + * rpgGd is given as log2(Gd), where Gd may only be powers of 2 + *@rpg_min_dec_fac: The minimum factor by which the current transmit rate + * can be changed by reception of a CNM. + * value is given as percentage (1-100) + *@rpg_min_rate: The minimum value, in bits per second, for rate to limit + *@cndd_state_machine: The state of the congestion notification domain + * defense state machine, as defined by IEEE 802.3Qau + * section 32.1.1. In the interior ready state, + * the QCN capable hardware may add CN-TAG TLV to the + * outgoing traffic, to specifically identify outgoing + * flows. + */ + +struct ieee_qcn { + __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS]; + __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS]; +}; + +/* This structure contains the IEEE 802.1Qau QCN statistics. + * + *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated + * by RPs at this priority level on this Port + *@rppp_created_rps: number of active RPs(flows) that react to CNMs + */ + +struct ieee_qcn_stats { + __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS]; + __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS]; +}; + /* This structure contains the IEEE 802.1Qaz PFC managed object * * @pfc_cap: Indicates the number of traffic classes on the local device @@ -334,6 +398,8 @@ enum ieee_attrs { DCB_ATTR_IEEE_PEER_PFC, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_MAXRATE, + DCB_ATTR_IEEE_QCN, + DCB_ATTR_IEEE_QCN_STATS, __DCB_ATTR_IEEE_MAX }; #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index dea10a8..40fdfea 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -50,6 +50,7 @@ enum { #define IFA_F_PERMANENT 0x80 #define IFA_F_MANAGETEMPADDR 0x100 #define IFA_F_NOPREFIXROUTE 0x200 +#define IFA_F_MCAUTOJOIN 0x400 struct ifa_cacheinfo { __u32 ifa_prefered; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index dfd0bb2..756436e 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -247,6 +247,7 @@ enum { IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ + IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index cabe95d..3199243 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -358,6 +358,8 @@ enum { IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */ + IPVS_SVC_ATTR_STATS64, /* nested attribute for service stats */ + __IPVS_SVC_ATTR_MAX, }; @@ -387,6 +389,8 @@ enum { IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */ + IPVS_DEST_ATTR_STATS64, /* nested attribute for dest stats */ + __IPVS_DEST_ATTR_MAX, }; @@ -410,7 +414,8 @@ enum { /* * Attributes used to describe service or destination entry statistics * - * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS + * Used inside nested attributes IPVS_SVC_ATTR_STATS, IPVS_DEST_ATTR_STATS, + * IPVS_SVC_ATTR_STATS64 and IPVS_DEST_ATTR_STATS64. */ enum { IPVS_STATS_ATTR_UNSPEC = 0, diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 832bc46..b978393 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1,6 +1,7 @@ #ifndef _LINUX_NF_TABLES_H #define _LINUX_NF_TABLES_H +#define NFT_TABLE_MAXNAMELEN 32 #define NFT_CHAIN_MAXNAMELEN 32 #define NFT_USERDATA_MAXLEN 256 diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 25731df..bf08e76 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -397,6 +397,8 @@ enum { TCA_BPF_CLASSID, TCA_BPF_OPS_LEN, TCA_BPF_OPS, + TCA_BPF_FD, + TCA_BPF_NAME, __TCA_BPF_MAX, }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 5cc5d66..c3722b0 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -303,6 +303,8 @@ enum rtattr_type_t { RTA_TABLE, RTA_MARK, RTA_MFC_STATS, + RTA_VIA, + RTA_NEWDST, __RTA_MAX }; @@ -332,6 +334,7 @@ struct rtnexthop { #define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */ #define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */ #define RTNH_F_ONLINK 4 /* Gateway is forced on link */ +#define RTNH_F_EXTERNAL 8 /* Route installed externally */ /* Macros to handle hexthops */ @@ -344,6 +347,12 @@ struct rtnexthop { #define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len)) #define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0))) +/* RTA_VIA */ +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + /* RTM_CACHEINFO */ struct rta_cacheinfo { @@ -623,6 +632,8 @@ enum rtnetlink_groups { #define RTNLGRP_IPV6_NETCONF RTNLGRP_IPV6_NETCONF RTNLGRP_MDB, #define RTNLGRP_MDB RTNLGRP_MDB + RTNLGRP_MPLS_ROUTE, +#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index 8d72382..d4c8f14 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -83,11 +83,20 @@ enum { TIPC_NLA_BEARER_NAME, /* string */ TIPC_NLA_BEARER_PROP, /* nest */ TIPC_NLA_BEARER_DOMAIN, /* u32 */ + TIPC_NLA_BEARER_UDP_OPTS, /* nest */ __TIPC_NLA_BEARER_MAX, TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1 }; +enum { + TIPC_NLA_UDP_UNSPEC, + TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */ + TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */ + + __TIPC_NLA_UDP_MAX, + TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1 +}; /* Socket info */ enum { TIPC_NLA_SOCK_UNSPEC, diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index a5ae60f..e6983be 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,5 +1,2 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o -ifdef CONFIG_TEST_BPF -obj-$(CONFIG_BPF_SYSCALL) += test_stub.o -endif diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 9eb4d8a..8a66165 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map) kvfree(array); } -static struct bpf_map_ops array_ops = { +static const struct bpf_map_ops array_ops = { .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, @@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = { .map_delete_elem = array_map_delete_elem, }; -static struct bpf_map_type_list tl = { +static struct bpf_map_type_list array_type __read_mostly = { .ops = &array_ops, .type = BPF_MAP_TYPE_ARRAY, }; static int __init register_array_map(void) { - bpf_register_map_type(&tl); + bpf_register_map_type(&array_type); return 0; } late_initcall(register_array_map); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a64e7a2..50603ae 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -656,6 +656,11 @@ void bpf_prog_free(struct bpf_prog *fp) } EXPORT_SYMBOL_GPL(bpf_prog_free); +/* Weak definitions of helper functions in case we don't have bpf syscall. */ +const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; +const struct bpf_func_proto bpf_map_update_elem_proto __weak; +const struct bpf_func_proto bpf_map_delete_elem_proto __weak; + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. */ diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b3ba436..83c209d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map) kfree(htab); } -static struct bpf_map_ops htab_ops = { +static const struct bpf_map_ops htab_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, @@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = { .map_delete_elem = htab_map_delete_elem, }; -static struct bpf_map_type_list tl = { +static struct bpf_map_type_list htab_type __read_mostly = { .ops = &htab_ops, .type = BPF_MAP_TYPE_HASH, }; static int __init register_htab_map(void) { - bpf_register_map_type(&tl); + bpf_register_map_type(&htab_type); return 0; } late_initcall(register_htab_map); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 9e3414d..a3c7701 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -41,7 +41,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return (unsigned long) value; } -struct bpf_func_proto bpf_map_lookup_elem_proto = { +const struct bpf_func_proto bpf_map_lookup_elem_proto = { .func = bpf_map_lookup_elem, .gpl_only = false, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, @@ -60,7 +60,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return map->ops->map_update_elem(map, key, value, r4); } -struct bpf_func_proto bpf_map_update_elem_proto = { +const struct bpf_func_proto bpf_map_update_elem_proto = { .func = bpf_map_update_elem, .gpl_only = false, .ret_type = RET_INTEGER, @@ -80,7 +80,7 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return map->ops->map_delete_elem(map, key); } -struct bpf_func_proto bpf_map_delete_elem_proto = { +const struct bpf_func_proto bpf_map_delete_elem_proto = { .func = bpf_map_delete_elem, .gpl_only = false, .ret_type = RET_INTEGER, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 536edc2..669719c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -354,10 +354,11 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) list_for_each_entry(tl, &bpf_prog_types, list_node) { if (tl->type == type) { prog->aux->ops = tl->ops; - prog->aux->prog_type = type; + prog->type = type; return 0; } } + return -EINVAL; } @@ -418,6 +419,7 @@ void bpf_prog_put(struct bpf_prog *prog) bpf_prog_free(prog); } } +EXPORT_SYMBOL_GPL(bpf_prog_put); static int bpf_prog_release(struct inode *inode, struct file *filp) { @@ -465,6 +467,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd) fdput(f); return prog; } +EXPORT_SYMBOL_GPL(bpf_prog_get); /* last field in 'union bpf_attr' used by this command */ #define BPF_PROG_LOAD_LAST_FIELD log_buf @@ -508,7 +511,7 @@ static int bpf_prog_load(union bpf_attr *attr) prog->jited = false; atomic_set(&prog->aux->refcnt, 1); - prog->aux->is_gpl_compatible = is_gpl; + prog->gpl_compatible = is_gpl; /* find program type: socket_filter vs tracing_filter */ err = find_prog_type(type, prog); @@ -517,7 +520,6 @@ static int bpf_prog_load(union bpf_attr *attr) /* run eBPF verifier */ err = bpf_check(prog, attr); - if (err < 0) goto free_used_maps; @@ -528,7 +530,6 @@ static int bpf_prog_load(union bpf_attr *attr) bpf_prog_select_runtime(prog); err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC); - if (err < 0) /* failed to allocate fd */ goto free_used_maps; diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c deleted file mode 100644 index 0ceae1e..0000000 --- a/kernel/bpf/test_stub.c +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/bpf.h> - -/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC - * to be used by user space verifier testsuite - */ -struct bpf_context { - u64 arg1; - u64 arg2; -}; - -static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id) -{ - switch (func_id) { - case BPF_FUNC_map_lookup_elem: - return &bpf_map_lookup_elem_proto; - case BPF_FUNC_map_update_elem: - return &bpf_map_update_elem_proto; - case BPF_FUNC_map_delete_elem: - return &bpf_map_delete_elem_proto; - default: - return NULL; - } -} - -static const struct bpf_context_access { - int size; - enum bpf_access_type type; -} test_ctx_access[] = { - [offsetof(struct bpf_context, arg1)] = { - FIELD_SIZEOF(struct bpf_context, arg1), - BPF_READ - }, - [offsetof(struct bpf_context, arg2)] = { - FIELD_SIZEOF(struct bpf_context, arg2), - BPF_READ - }, -}; - -static bool test_is_valid_access(int off, int size, enum bpf_access_type type) -{ - const struct bpf_context_access *access; - - if (off < 0 || off >= ARRAY_SIZE(test_ctx_access)) - return false; - - access = &test_ctx_access[off]; - if (access->size == size && (access->type & type)) - return true; - - return false; -} - -static struct bpf_verifier_ops test_ops = { - .get_func_proto = test_func_proto, - .is_valid_access = test_is_valid_access, -}; - -static struct bpf_prog_type_list tl_prog = { - .ops = &test_ops, - .type = BPF_PROG_TYPE_UNSPEC, -}; - -static int __init register_test_ops(void) -{ - bpf_register_prog_type(&tl_prog); - return 0; -} -late_initcall(register_test_ops); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a28e09c..bdf4192 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -852,7 +852,7 @@ static int check_call(struct verifier_env *env, int func_id) } /* eBPF programs must be GPL compatible to use GPL-ed functions */ - if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) { + if (!env->prog->gpl_compatible && fn->gpl_only) { verbose("cannot call GPL only function from proprietary program\n"); return -EINVAL; } @@ -1172,6 +1172,17 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) return 0; } +static bool may_access_skb(enum bpf_prog_type type) +{ + switch (type) { + case BPF_PROG_TYPE_SOCKET_FILTER: + case BPF_PROG_TYPE_SCHED_CLS: + return true; + default: + return false; + } +} + /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, @@ -1194,8 +1205,8 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) struct reg_state *reg; int i, err; - if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { - verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n"); + if (!may_access_skb(env->prog->type)) { + verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n"); return -EINVAL; } diff --git a/net/6lowpan/Kconfig b/net/6lowpan/Kconfig index e4a02ef..7fa0f38 100644 --- a/net/6lowpan/Kconfig +++ b/net/6lowpan/Kconfig @@ -1,6 +1,61 @@ -config 6LOWPAN +menuconfig 6LOWPAN tristate "6LoWPAN Support" depends on IPV6 ---help--- This enables IPv6 over Low power Wireless Personal Area Network - "6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks. + +menuconfig 6LOWPAN_NHC + tristate "Next Header Compression Support" + depends on 6LOWPAN + default y + ---help--- + Support for next header compression. + +if 6LOWPAN_NHC + +config 6LOWPAN_NHC_DEST + tristate "Destination Options Header Support" + default y + ---help--- + 6LoWPAN IPv6 Destination Options Header compression according to + RFC6282. + +config 6LOWPAN_NHC_FRAGMENT + tristate "Fragment Header Support" + default y + ---help--- + 6LoWPAN IPv6 Fragment Header compression according to RFC6282. + +config 6LOWPAN_NHC_HOP + tristate "Hop-by-Hop Options Header Support" + default y + ---help--- + 6LoWPAN IPv6 Hop-by-Hop Options Header compression according to + RFC6282. + +config 6LOWPAN_NHC_IPV6 + tristate "IPv6 Header Support" + default y + ---help--- + 6LoWPAN IPv6 Header compression according to RFC6282. + +config 6LOWPAN_NHC_MOBILITY + tristate "Mobility Header Support" + default y + ---help--- + 6LoWPAN IPv6 Mobility Header compression according to RFC6282. + +config 6LOWPAN_NHC_ROUTING + tristate "Routing Header Support" + default y + ---help--- + 6LoWPAN IPv6 Routing Header compression according to RFC6282. + +config 6LOWPAN_NHC_UDP + tristate "UDP Header Support" + default y + ---help--- + 6LoWPAN IPv6 UDP Header compression according to RFC6282. + +endif diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile index 415886b..eb8baa7 100644 --- a/net/6lowpan/Makefile +++ b/net/6lowpan/Makefile @@ -1,3 +1,12 @@ -obj-$(CONFIG_6LOWPAN) := 6lowpan.o +obj-$(CONFIG_6LOWPAN) += 6lowpan.o -6lowpan-y := iphc.o +6lowpan-y := iphc.o nhc.o + +#rfc6282 nhcs +obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o +obj-$(CONFIG_6LOWPAN_NHC_FRAGMENT) += nhc_fragment.o +obj-$(CONFIG_6LOWPAN_NHC_HOP) += nhc_hop.o +obj-$(CONFIG_6LOWPAN_NHC_IPV6) += nhc_ipv6.o +obj-$(CONFIG_6LOWPAN_NHC_MOBILITY) += nhc_mobility.o +obj-$(CONFIG_6LOWPAN_NHC_ROUTING) += nhc_routing.o +obj-$(CONFIG_6LOWPAN_NHC_UDP) += nhc_udp.o diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 32ffec6..94a375c 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -54,6 +54,8 @@ #include <net/ipv6.h> #include <net/af_ieee802154.h> +#include "nhc.h" + /* Uncompress address function for source and * destination address(non-multicast). * @@ -224,77 +226,6 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb, return 0; } -static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh) -{ - bool fail; - u8 tmp = 0, val = 0; - - fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp)); - - if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { - pr_debug("UDP header uncompression\n"); - switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { - case LOWPAN_NHC_UDP_CS_P_00: - fail |= lowpan_fetch_skb(skb, &uh->source, - sizeof(uh->source)); - fail |= lowpan_fetch_skb(skb, &uh->dest, - sizeof(uh->dest)); - break; - case LOWPAN_NHC_UDP_CS_P_01: - fail |= lowpan_fetch_skb(skb, &uh->source, - sizeof(uh->source)); - fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); - uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); - break; - case LOWPAN_NHC_UDP_CS_P_10: - fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); - uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); - fail |= lowpan_fetch_skb(skb, &uh->dest, - sizeof(uh->dest)); - break; - case LOWPAN_NHC_UDP_CS_P_11: - fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); - uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT + - (val >> 4)); - uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + - (val & 0x0f)); - break; - default: - pr_debug("ERROR: unknown UDP format\n"); - goto err; - } - - pr_debug("uncompressed UDP ports: src = %d, dst = %d\n", - ntohs(uh->source), ntohs(uh->dest)); - - /* checksum */ - if (tmp & LOWPAN_NHC_UDP_CS_C) { - pr_debug_ratelimited("checksum elided currently not supported\n"); - goto err; - } else { - fail |= lowpan_fetch_skb(skb, &uh->check, - sizeof(uh->check)); - } - - /* UDP length needs to be infered from the lower layers - * here, we obtain the hint from the remaining size of the - * frame - */ - uh->len = htons(skb->len + sizeof(struct udphdr)); - pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len)); - } else { - pr_debug("ERROR: unsupported NH format\n"); - goto err; - } - - if (fail) - goto err; - - return 0; -err: - return -EINVAL; -} - /* TTL uncompression values */ static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 }; @@ -425,29 +356,11 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, return -EINVAL; } - /* UDP data uncompression */ + /* Next header data uncompression */ if (iphc0 & LOWPAN_IPHC_NH_C) { - struct udphdr uh; - const int needed = sizeof(struct udphdr) + sizeof(hdr); - - if (uncompress_udp_header(skb, &uh)) - return -EINVAL; - - /* replace the compressed UDP head by the uncompressed UDP - * header - */ - err = skb_cow(skb, needed); - if (unlikely(err)) + err = lowpan_nhc_do_uncompression(skb, dev, &hdr); + if (err < 0) return err; - - skb_push(skb, sizeof(struct udphdr)); - skb_reset_transport_header(skb); - skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr)); - - raw_dump_table(__func__, "raw UDP header dump", - (u8 *)&uh, sizeof(uh)); - - hdr.nexthdr = UIP_PROTO_UDP; } else { err = skb_cow(skb, sizeof(hdr)); if (unlikely(err)) @@ -500,71 +413,6 @@ static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift, return rol8(val, shift); } -static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb) -{ - struct udphdr *uh; - u8 tmp; - - /* In the case of RAW sockets the transport header is not set by - * the ip6 stack so we must set it ourselves - */ - if (skb->transport_header == skb->network_header) - skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - - uh = udp_hdr(skb); - - if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) == - LOWPAN_NHC_UDP_4BIT_PORT) && - ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) == - LOWPAN_NHC_UDP_4BIT_PORT)) { - pr_debug("UDP header: both ports compression to 4 bits\n"); - /* compression value */ - tmp = LOWPAN_NHC_UDP_CS_P_11; - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - /* source and destination port */ - tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT + - ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4); - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) == - LOWPAN_NHC_UDP_8BIT_PORT) { - pr_debug("UDP header: remove 8 bits of dest\n"); - /* compression value */ - tmp = LOWPAN_NHC_UDP_CS_P_01; - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - /* source port */ - lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source)); - /* destination port */ - tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT; - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) == - LOWPAN_NHC_UDP_8BIT_PORT) { - pr_debug("UDP header: remove 8 bits of source\n"); - /* compression value */ - tmp = LOWPAN_NHC_UDP_CS_P_10; - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - /* source port */ - tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT; - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - /* destination port */ - lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); - } else { - pr_debug("UDP header: can't compress\n"); - /* compression value */ - tmp = LOWPAN_NHC_UDP_CS_P_00; - lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); - /* source port */ - lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source)); - /* destination port */ - lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); - } - - /* checksum is always inline */ - lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check)); - - /* skip the UDP header */ - skb_pull(skb, sizeof(struct udphdr)); -} - int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) @@ -572,7 +420,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, u8 tmp, iphc0, iphc1, *hc_ptr; struct ipv6hdr *hdr; u8 head[100] = {}; - int addr_type; + int ret, addr_type; if (type != ETH_P_IPV6) return -EINVAL; @@ -649,13 +497,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, /* NOTE: payload length is always compressed */ - /* Next Header is compress if UDP */ - if (hdr->nexthdr == UIP_PROTO_UDP) - iphc0 |= LOWPAN_IPHC_NH_C; - - if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) - lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr, - sizeof(hdr->nexthdr)); + /* Check if we provide the nhc format for nexthdr and compression + * functionality. If not nexthdr is handled inline and not compressed. + */ + ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr, &iphc0); + if (ret < 0) + return ret; /* Hop limit * if 1: compress, encoding is 01 @@ -741,9 +588,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, } } - /* UDP header compression */ - if (hdr->nexthdr == UIP_PROTO_UDP) - compress_udp_header(&hc_ptr, skb); + /* next header compression */ + if (iphc0 & LOWPAN_IPHC_NH_C) { + ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr); + if (ret < 0) + return ret; + } head[0] = iphc0; head[1] = iphc1; @@ -761,4 +611,18 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, } EXPORT_SYMBOL_GPL(lowpan_header_compress); +static int __init lowpan_module_init(void) +{ + request_module_nowait("nhc_dest"); + request_module_nowait("nhc_fragment"); + request_module_nowait("nhc_hop"); + request_module_nowait("nhc_ipv6"); + request_module_nowait("nhc_mobility"); + request_module_nowait("nhc_routing"); + request_module_nowait("nhc_udp"); + + return 0; +} +module_init(lowpan_module_init); + MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c new file mode 100644 index 0000000..fd20fc5 --- /dev/null +++ b/net/6lowpan/nhc.c @@ -0,0 +1,241 @@ +/* + * 6LoWPAN next header compression + * + * + * Authors: + * Alexander Aring <aar@pengutronix.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/netdevice.h> + +#include <net/ipv6.h> + +#include "nhc.h" + +static struct rb_root rb_root = RB_ROOT; +static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX]; +static DEFINE_SPINLOCK(lowpan_nhc_lock); + +static int lowpan_nhc_insert(struct lowpan_nhc *nhc) +{ + struct rb_node **new = &rb_root.rb_node, *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc, + node); + int result, len_dif, len; + + len_dif = nhc->idlen - this->idlen; + + if (nhc->idlen < this->idlen) + len = nhc->idlen; + else + len = this->idlen; + + result = memcmp(nhc->id, this->id, len); + if (!result) + result = len_dif; + + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else if (result > 0) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&nhc->node, parent, new); + rb_insert_color(&nhc->node, &rb_root); + + return 0; +} + +static void lowpan_nhc_remove(struct lowpan_nhc *nhc) +{ + rb_erase(&nhc->node, &rb_root); +} + +static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb) +{ + struct rb_node *node = rb_root.rb_node; + const u8 *nhcid_skb_ptr = skb->data; + + while (node) { + struct lowpan_nhc *nhc = container_of(node, struct lowpan_nhc, + node); + u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN]; + int result, i; + + if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len) + return NULL; + + /* copy and mask afterwards the nhid value from skb */ + memcpy(nhcid_skb_ptr_masked, nhcid_skb_ptr, nhc->idlen); + for (i = 0; i < nhc->idlen; i++) + nhcid_skb_ptr_masked[i] &= nhc->idmask[i]; + + result = memcmp(nhcid_skb_ptr_masked, nhc->id, nhc->idlen); + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return nhc; + } + + return NULL; +} + +int lowpan_nhc_check_compression(struct sk_buff *skb, + const struct ipv6hdr *hdr, u8 **hc_ptr, + u8 *iphc0) +{ + struct lowpan_nhc *nhc; + + spin_lock_bh(&lowpan_nhc_lock); + + nhc = lowpan_nexthdr_nhcs[hdr->nexthdr]; + if (nhc && nhc->compress) + *iphc0 |= LOWPAN_IPHC_NH_C; + else + lowpan_push_hc_data(hc_ptr, &hdr->nexthdr, + sizeof(hdr->nexthdr)); + + spin_unlock_bh(&lowpan_nhc_lock); + + return 0; +} + +int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr, + u8 **hc_ptr) +{ + int ret; + struct lowpan_nhc *nhc; + + spin_lock_bh(&lowpan_nhc_lock); + + nhc = lowpan_nexthdr_nhcs[hdr->nexthdr]; + /* check if the nhc module was removed in unlocked part. + * TODO: this is a workaround we should prevent unloading + * of nhc modules while unlocked part, this will always drop + * the lowpan packet but it's very unlikely. + * + * Solution isn't easy because we need to decide at + * lowpan_nhc_check_compression if we do a compression or not. + * Because the inline data which is added to skb, we can't move this + * handling. + */ + if (unlikely(!nhc || !nhc->compress)) { + ret = -EINVAL; + goto out; + } + + /* In the case of RAW sockets the transport header is not set by + * the ip6 stack so we must set it ourselves + */ + if (skb->transport_header == skb->network_header) + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + ret = nhc->compress(skb, hc_ptr); + if (ret < 0) + goto out; + + /* skip the transport header */ + skb_pull(skb, nhc->nexthdrlen); + +out: + spin_unlock_bh(&lowpan_nhc_lock); + + return ret; +} + +int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev, + struct ipv6hdr *hdr) +{ + struct lowpan_nhc *nhc; + int ret; + + spin_lock_bh(&lowpan_nhc_lock); + + nhc = lowpan_nhc_by_nhcid(skb); + if (nhc) { + if (nhc->uncompress) { + ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) + + nhc->nexthdrlen); + if (ret < 0) { + spin_unlock_bh(&lowpan_nhc_lock); + return ret; + } + } else { + spin_unlock_bh(&lowpan_nhc_lock); + netdev_warn(dev, "received nhc id for %s which is not implemented.\n", + nhc->name); + return -ENOTSUPP; + } + } else { + spin_unlock_bh(&lowpan_nhc_lock); + netdev_warn(dev, "received unknown nhc id which was not found.\n"); + return -ENOENT; + } + + hdr->nexthdr = nhc->nexthdr; + skb_reset_transport_header(skb); + raw_dump_table(__func__, "raw transport header dump", + skb_transport_header(skb), nhc->nexthdrlen); + + spin_unlock_bh(&lowpan_nhc_lock); + + return 0; +} + +int lowpan_nhc_add(struct lowpan_nhc *nhc) +{ + int ret; + + if (!nhc->idlen || !nhc->idsetup) + return -EINVAL; + + WARN_ONCE(nhc->idlen > LOWPAN_NHC_MAX_ID_LEN, + "LOWPAN_NHC_MAX_ID_LEN should be updated to %zd.\n", + nhc->idlen); + + nhc->idsetup(nhc); + + spin_lock_bh(&lowpan_nhc_lock); + + if (lowpan_nexthdr_nhcs[nhc->nexthdr]) { + ret = -EEXIST; + goto out; + } + + ret = lowpan_nhc_insert(nhc); + if (ret < 0) + goto out; + + lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc; +out: + spin_unlock_bh(&lowpan_nhc_lock); + return ret; +} +EXPORT_SYMBOL(lowpan_nhc_add); + +void lowpan_nhc_del(struct lowpan_nhc *nhc) +{ + spin_lock_bh(&lowpan_nhc_lock); + + lowpan_nhc_remove(nhc); + lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL; + + spin_unlock_bh(&lowpan_nhc_lock); + + synchronize_net(); +} +EXPORT_SYMBOL(lowpan_nhc_del); diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h new file mode 100644 index 0000000..ed44938 --- /dev/null +++ b/net/6lowpan/nhc.h @@ -0,0 +1,146 @@ +#ifndef __6LOWPAN_NHC_H +#define __6LOWPAN_NHC_H + +#include <linux/skbuff.h> +#include <linux/rbtree.h> +#include <linux/module.h> + +#include <net/6lowpan.h> +#include <net/ipv6.h> + +#define LOWPAN_NHC_MAX_ID_LEN 1 + +/** + * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct + * + * @__nhc: variable name of the lowpan_nhc struct. + * @_name: const char * of common header compression name. + * @_nexthdr: ipv6 nexthdr field for the header compression. + * @_nexthdrlen: ipv6 nexthdr len for the reserved space. + * @_idsetup: callback to setup id and mask values. + * @_idlen: len for the next header id and mask, should be always the same. + * @_uncompress: callback for uncompression call. + * @_compress: callback for compression call. + */ +#define LOWPAN_NHC(__nhc, _name, _nexthdr, \ + _hdrlen, _idsetup, _idlen, \ + _uncompress, _compress) \ +static u8 __nhc##_val[_idlen]; \ +static u8 __nhc##_mask[_idlen]; \ +static struct lowpan_nhc __nhc = { \ + .name = _name, \ + .nexthdr = _nexthdr, \ + .nexthdrlen = _hdrlen, \ + .id = __nhc##_val, \ + .idmask = __nhc##_mask, \ + .idlen = _idlen, \ + .idsetup = _idsetup, \ + .uncompress = _uncompress, \ + .compress = _compress, \ +} + +#define module_lowpan_nhc(__nhc) \ +static int __init __nhc##_init(void) \ +{ \ + return lowpan_nhc_add(&(__nhc)); \ +} \ +module_init(__nhc##_init); \ +static void __exit __nhc##_exit(void) \ +{ \ + lowpan_nhc_del(&(__nhc)); \ +} \ +module_exit(__nhc##_exit); + +/** + * struct lowpan_nhc - hold 6lowpan next hdr compression ifnformation + * + * @node: holder for the rbtree. + * @name: name of the specific next header compression + * @nexthdr: next header value of the protocol which should be compressed. + * @nexthdrlen: ipv6 nexthdr len for the reserved space. + * @id: array for nhc id. Note this need to be in network byteorder. + * @mask: array for nhc id mask. Note this need to be in network byteorder. + * @len: the length of the next header id and mask. + * @setup: callback to setup fill the next header id value and mask. + * @compress: callback to do the header compression. + * @uncompress: callback to do the header uncompression. + */ +struct lowpan_nhc { + struct rb_node node; + const char *name; + const u8 nexthdr; + const size_t nexthdrlen; + u8 *id; + u8 *idmask; + const size_t idlen; + + void (*idsetup)(struct lowpan_nhc *nhc); + int (*uncompress)(struct sk_buff *skb, size_t needed); + int (*compress)(struct sk_buff *skb, u8 **hc_ptr); +}; + +/** + * lowpan_nhc_by_nexthdr - return the 6lowpan nhc by ipv6 nexthdr. + * + * @nexthdr: ipv6 nexthdr value. + */ +struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr); + +/** + * lowpan_nhc_check_compression - checks if we support compression format. If + * we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be + * set. If we don't support nexthdr will be added as inline data to the + * 6LoWPAN header. + * + * @skb: skb of 6LoWPAN header to read nhc and replace header. + * @hdr: ipv6hdr to check the nexthdr value + * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of + * replaced header. + * @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit + */ +int lowpan_nhc_check_compression(struct sk_buff *skb, + const struct ipv6hdr *hdr, u8 **hc_ptr, + u8 *iphc0); + +/** + * lowpan_nhc_do_compression - calling compress callback for nhc + * + * @skb: skb of 6LoWPAN header to read nhc and replace header. + * @hdr: ipv6hdr to set the nexthdr value + * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of + * replaced header. + */ +int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr, + u8 **hc_ptr); + +/** + * lowpan_nhc_do_uncompression - calling uncompress callback for nhc + * + * @nhc: 6LoWPAN nhc context, get by lowpan_nhc_by_ functions. + * @skb: skb of 6LoWPAN header, skb->data should be pointed to nhc id value. + * @dev: netdevice for print logging information. + * @hdr: ipv6hdr for setting nexthdr value. + */ +int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev, + struct ipv6hdr *hdr); + +/** + * lowpan_nhc_add - register a next header compression to framework + * + * @nhc: nhc which should be add. + */ +int lowpan_nhc_add(struct lowpan_nhc *nhc); + +/** + * lowpan_nhc_del - delete a next header compression from framework + * + * @nhc: nhc which should be delete. + */ +void lowpan_nhc_del(struct lowpan_nhc *nhc); + +/** + * lowpan_nhc_init - adding all default nhcs + */ +void lowpan_nhc_init(void); + +#endif /* __6LOWPAN_NHC_H */ diff --git a/net/6lowpan/nhc_dest.c b/net/6lowpan/nhc_dest.c new file mode 100644 index 0000000..0b292c9 --- /dev/null +++ b/net/6lowpan/nhc_dest.c @@ -0,0 +1,28 @@ +/* + * 6LoWPAN IPv6 Destination Options Header compression according to + * RFC6282 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_DEST_IDLEN 1 +#define LOWPAN_NHC_DEST_ID_0 0xe6 +#define LOWPAN_NHC_DEST_MASK_0 0xfe + +static void dest_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_DEST_ID_0; + nhc->idmask[0] = LOWPAN_NHC_DEST_MASK_0; +} + +LOWPAN_NHC(nhc_dest, "RFC6282 Destination Options", NEXTHDR_DEST, 0, + dest_nhid_setup, LOWPAN_NHC_DEST_IDLEN, NULL, NULL); + +module_lowpan_nhc(nhc_dest); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Destination Options compression"); +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc_fragment.c b/net/6lowpan/nhc_fragment.c new file mode 100644 index 0000000..473dbc5 --- /dev/null +++ b/net/6lowpan/nhc_fragment.c @@ -0,0 +1,27 @@ +/* + * 6LoWPAN IPv6 Fragment Header compression according to RFC6282 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_FRAGMENT_IDLEN 1 +#define LOWPAN_NHC_FRAGMENT_ID_0 0xe4 +#define LOWPAN_NHC_FRAGMENT_MASK_0 0xfe + +static void fragment_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_FRAGMENT_ID_0; + nhc->idmask[0] = LOWPAN_NHC_FRAGMENT_MASK_0; +} + +LOWPAN_NHC(nhc_fragment, "RFC6282 Fragment", NEXTHDR_FRAGMENT, 0, + fragment_nhid_setup, LOWPAN_NHC_FRAGMENT_IDLEN, NULL, NULL); + +module_lowpan_nhc(nhc_fragment); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Fragment compression"); +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc_hop.c b/net/6lowpan/nhc_hop.c new file mode 100644 index 0000000..1eb66be --- /dev/null +++ b/net/6lowpan/nhc_hop.c @@ -0,0 +1,27 @@ +/* + * 6LoWPAN IPv6 Hop-by-Hop Options Header compression according to RFC6282 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_HOP_IDLEN 1 +#define LOWPAN_NHC_HOP_ID_0 0xe0 +#define LOWPAN_NHC_HOP_MASK_0 0xfe + +static void hop_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_HOP_ID_0; + nhc->idmask[0] = LOWPAN_NHC_HOP_MASK_0; +} + +LOWPAN_NHC(nhc_hop, "RFC6282 Hop-by-Hop Options", NEXTHDR_HOP, 0, + hop_nhid_setup, LOWPAN_NHC_HOP_IDLEN, NULL, NULL); + +module_lowpan_nhc(nhc_hop); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Hop-by-Hop Options compression"); +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc_ipv6.c b/net/6lowpan/nhc_ipv6.c new file mode 100644 index 0000000..2313d16 --- /dev/null +++ b/net/6lowpan/nhc_ipv6.c @@ -0,0 +1,27 @@ +/* + * 6LoWPAN IPv6 Header compression according to RFC6282 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_IPV6_IDLEN 1 +#define LOWPAN_NHC_IPV6_ID_0 0xee +#define LOWPAN_NHC_IPV6_MASK_0 0xfe + +static void ipv6_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_IPV6_ID_0; + nhc->idmask[0] = LOWPAN_NHC_IPV6_MASK_0; +} + +LOWPAN_NHC(nhc_ipv6, "RFC6282 IPv6", NEXTHDR_IPV6, 0, ipv6_nhid_setup, + LOWPAN_NHC_IPV6_IDLEN, NULL, NULL); + +module_lowpan_nhc(nhc_ipv6); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 IPv6 compression"); +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc_mobility.c b/net/6lowpan/nhc_mobility.c new file mode 100644 index 0000000..60d3f38 --- /dev/null +++ b/net/6lowpan/nhc_mobility.c @@ -0,0 +1,27 @@ +/* + * 6LoWPAN IPv6 Mobility Header compression according to RFC6282 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_MOBILITY_IDLEN 1 +#define LOWPAN_NHC_MOBILITY_ID_0 0xe8 +#define LOWPAN_NHC_MOBILITY_MASK_0 0xfe + +static void mobility_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_MOBILITY_ID_0; + nhc->idmask[0] = LOWPAN_NHC_MOBILITY_MASK_0; +} + +LOWPAN_NHC(nhc_mobility, "RFC6282 Mobility", NEXTHDR_MOBILITY, 0, + mobility_nhid_setup, LOWPAN_NHC_MOBILITY_IDLEN, NULL, NULL); + +module_lowpan_nhc(nhc_mobility); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Mobility compression"); +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc_routing.c b/net/6lowpan/nhc_routing.c new file mode 100644 index 0000000..c393280 --- /dev/null +++ b/net/6lowpan/nhc_routing.c @@ -0,0 +1,27 @@ +/* + * 6LoWPAN IPv6 Routing Header compression according to RFC6282 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_ROUTING_IDLEN 1 +#define LOWPAN_NHC_ROUTING_ID_0 0xe2 +#define LOWPAN_NHC_ROUTING_MASK_0 0xfe + +static void routing_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_ROUTING_ID_0; + nhc->idmask[0] = LOWPAN_NHC_ROUTING_MASK_0; +} + +LOWPAN_NHC(nhc_routing, "RFC6282 Routing", NEXTHDR_ROUTING, 0, + routing_nhid_setup, LOWPAN_NHC_ROUTING_IDLEN, NULL, NULL); + +module_lowpan_nhc(nhc_routing); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Routing compression"); +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c new file mode 100644 index 0000000..c6bcaeb --- /dev/null +++ b/net/6lowpan/nhc_udp.c @@ -0,0 +1,157 @@ +/* + * 6LoWPAN IPv6 UDP compression according to RFC6282 + * + * + * Authors: + * Alexander Aring <aar@pengutronix.de> + * + * Orignal written by: + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + * Jon Smirl <jonsmirl@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "nhc.h" + +#define LOWPAN_NHC_UDP_IDLEN 1 + +static int udp_uncompress(struct sk_buff *skb, size_t needed) +{ + u8 tmp = 0, val = 0; + struct udphdr uh; + bool fail; + int err; + + fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp)); + + pr_debug("UDP header uncompression\n"); + switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { + case LOWPAN_NHC_UDP_CS_P_00: + fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source)); + fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest)); + break; + case LOWPAN_NHC_UDP_CS_P_01: + fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source)); + fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); + uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); + break; + case LOWPAN_NHC_UDP_CS_P_10: + fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); + uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); + fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest)); + break; + case LOWPAN_NHC_UDP_CS_P_11: + fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); + uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4)); + uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f)); + break; + default: + BUG(); + } + + pr_debug("uncompressed UDP ports: src = %d, dst = %d\n", + ntohs(uh.source), ntohs(uh.dest)); + + /* checksum */ + if (tmp & LOWPAN_NHC_UDP_CS_C) { + pr_debug_ratelimited("checksum elided currently not supported\n"); + fail = true; + } else { + fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check)); + } + + if (fail) + return -EINVAL; + + /* UDP length needs to be infered from the lower layers + * here, we obtain the hint from the remaining size of the + * frame + */ + uh.len = htons(skb->len + sizeof(struct udphdr)); + pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len)); + + /* replace the compressed UDP head by the uncompressed UDP + * header + */ + err = skb_cow(skb, needed); + if (unlikely(err)) + return err; + + skb_push(skb, sizeof(struct udphdr)); + skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr)); + + return 0; +} + +static int udp_compress(struct sk_buff *skb, u8 **hc_ptr) +{ + const struct udphdr *uh = udp_hdr(skb); + u8 tmp; + + if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) == + LOWPAN_NHC_UDP_4BIT_PORT) && + ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) == + LOWPAN_NHC_UDP_4BIT_PORT)) { + pr_debug("UDP header: both ports compression to 4 bits\n"); + /* compression value */ + tmp = LOWPAN_NHC_UDP_CS_P_11; + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + /* source and destination port */ + tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT + + ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4); + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) == + LOWPAN_NHC_UDP_8BIT_PORT) { + pr_debug("UDP header: remove 8 bits of dest\n"); + /* compression value */ + tmp = LOWPAN_NHC_UDP_CS_P_01; + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + /* source port */ + lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source)); + /* destination port */ + tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT; + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) == + LOWPAN_NHC_UDP_8BIT_PORT) { + pr_debug("UDP header: remove 8 bits of source\n"); + /* compression value */ + tmp = LOWPAN_NHC_UDP_CS_P_10; + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + /* source port */ + tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT; + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + /* destination port */ + lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); + } else { + pr_debug("UDP header: can't compress\n"); + /* compression value */ + tmp = LOWPAN_NHC_UDP_CS_P_00; + lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); + /* source port */ + lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source)); + /* destination port */ + lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); + } + + /* checksum is always inline */ + lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check)); + + return 0; +} + +static void udp_nhid_setup(struct lowpan_nhc *nhc) +{ + nhc->id[0] = LOWPAN_NHC_UDP_ID; + nhc->idmask[0] = LOWPAN_NHC_UDP_MASK; +} + +LOWPAN_NHC(nhc_udp, "RFC6282 UDP", NEXTHDR_UDP, sizeof(struct udphdr), + udp_nhid_setup, LOWPAN_NHC_UDP_IDLEN, udp_uncompress, udp_compress); + +module_lowpan_nhc(nhc_udp); +MODULE_DESCRIPTION("6LoWPAN next header RFC6282 UDP compression"); +MODULE_LICENSE("GPL"); diff --git a/net/802/fc.c b/net/802/fc.c index 7c174b6..7b92190 100644 --- a/net/802/fc.c +++ b/net/802/fc.c @@ -75,29 +75,8 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev, return -hdr_len; } -/* - * A neighbour discovery of some species (eg arp) has completed. We - * can now send the packet. - */ - -static int fc_rebuild_header(struct sk_buff *skb) -{ -#ifdef CONFIG_INET - struct fch_hdr *fch=(struct fch_hdr *)skb->data; - struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr)); - if(fcllc->ethertype != htons(ETH_P_IP)) { - printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(fcllc->ethertype)); - return 0; - } - return arp_find(fch->daddr, skb); -#else - return 0; -#endif -} - static const struct header_ops fc_header_ops = { .create = fc_header, - .rebuild = fc_rebuild_header, }; static void fc_setup(struct net_device *dev) diff --git a/net/802/fddi.c b/net/802/fddi.c index 59e7346..7d3a0af 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -87,31 +87,6 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev, return -hl; } - -/* - * Rebuild the FDDI MAC header. This is called after an ARP - * (or in future other address resolution) has completed on - * this sk_buff. We now let ARP fill in the other fields. - */ - -static int fddi_rebuild_header(struct sk_buff *skb) -{ - struct fddihdr *fddi = (struct fddihdr *)skb->data; - -#ifdef CONFIG_INET - if (fddi->hdr.llc_snap.ethertype == htons(ETH_P_IP)) - /* Try to get ARP to resolve the header and fill destination address */ - return arp_find(fddi->daddr, skb); - else -#endif - { - printk("%s: Don't know how to resolve type %04X addresses.\n", - skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype)); - return 0; - } -} - - /* * Determine the packet's protocol ID and fill in skb fields. * This routine is called before an incoming packet is passed @@ -177,7 +152,6 @@ EXPORT_SYMBOL(fddi_change_mtu); static const struct header_ops fddi_header_ops = { .create = fddi_header, - .rebuild = fddi_rebuild_header, }; diff --git a/net/802/hippi.c b/net/802/hippi.c index 2e03f82..ade1a52 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -91,33 +91,6 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev, /* - * Rebuild the HIPPI MAC header. This is called after an ARP has - * completed on this sk_buff. We now let ARP fill in the other fields. - */ - -static int hippi_rebuild_header(struct sk_buff *skb) -{ - struct hippi_hdr *hip = (struct hippi_hdr *)skb->data; - - /* - * Only IP is currently supported - */ - - if(hip->snap.ethertype != htons(ETH_P_IP)) - { - printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype)); - return 0; - } - - /* - * We don't support dynamic ARP on HIPPI, but we use the ARP - * static ARP tables to hold the I-FIELDs. - */ - return arp_find(hip->le.daddr, skb); -} - - -/* * Determine the packet's protocol ID. */ @@ -186,7 +159,6 @@ EXPORT_SYMBOL(hippi_neigh_setup_dev); static const struct header_ops hippi_header_ops = { .create = hippi_header, - .rebuild = hippi_rebuild_header, }; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 1189564..f196552 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -37,39 +37,6 @@ #include <linux/netpoll.h> /* - * Rebuild the Ethernet MAC header. This is called after an ARP - * (or in future other address resolution) has completed on this - * sk_buff. We now let ARP fill in the other fields. - * - * This routine CANNOT use cached dst->neigh! - * Really, it is used only when dst->neigh is wrong. - * - * TODO: This needs a checkup, I'm ignorant here. --BLG - */ -static int vlan_dev_rebuild_header(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); - - switch (veth->h_vlan_encapsulated_proto) { -#ifdef CONFIG_INET - case htons(ETH_P_IP): - - /* TODO: Confirm this will work with VLAN headers... */ - return arp_find(veth->h_dest, skb); -#endif - default: - pr_debug("%s: unable to resolve type %X addresses\n", - dev->name, ntohs(veth->h_vlan_encapsulated_proto)); - - ether_addr_copy(veth->h_source, dev->dev_addr); - break; - } - - return 0; -} - -/* * Create the VLAN header for an arbitrary protocol layer * * saddr=NULL means use device source address @@ -534,7 +501,6 @@ static int vlan_dev_get_lock_subclass(struct net_device *dev) static const struct header_ops vlan_header_ops = { .create = vlan_dev_hard_header, - .rebuild = vlan_dev_rebuild_header, .parse = eth_header_parse, }; @@ -554,7 +520,6 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev static const struct header_ops vlan_passthru_header_ops = { .create = vlan_passthru_hard_header, - .rebuild = dev_rebuild_header, .parse = eth_header_parse, }; @@ -827,5 +792,5 @@ void vlan_setup(struct net_device *dev) dev->destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops; - memset(dev->broadcast, 0, ETH_ALEN); + eth_zero_addr(dev->broadcast); } diff --git a/net/Makefile b/net/Makefile index 38704bd..3995613 100644 --- a/net/Makefile +++ b/net/Makefile @@ -69,7 +69,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ obj-$(CONFIG_NFC) += nfc/ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ -obj-$(CONFIG_NET_MPLS_GSO) += mpls/ +obj-$(CONFIG_MPLS) += mpls/ obj-$(CONFIG_HSR) += hsr/ ifneq ($(CONFIG_NET_SWITCHDEV),) obj-y += switchdev/ diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index d1c55d8..8ad3ec2 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -141,7 +141,7 @@ static void __aarp_send_query(struct aarp_entry *a) eah->pa_src_net = sat->s_net; eah->pa_src_node = sat->s_node; - memset(eah->hw_dst, '\0', ETH_ALEN); + eth_zero_addr(eah->hw_dst); eah->pa_dst_zero = 0; eah->pa_dst_net = a->target_addr.s_net; @@ -189,7 +189,7 @@ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us, eah->pa_src_node = us->s_node; if (!sha) - memset(eah->hw_dst, '\0', ETH_ALEN); + eth_zero_addr(eah->hw_dst); else ether_addr_copy(eah->hw_dst, sha); @@ -239,7 +239,7 @@ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us) eah->pa_src_net = us->s_net; eah->pa_src_node = us->s_node; - memset(eah->hw_dst, '\0', ETH_ALEN); + eth_zero_addr(eah->hw_dst); eah->pa_dst_zero = 0; eah->pa_dst_net = us->s_net; diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 0d0766e..3b7ad43 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1559,8 +1559,7 @@ freeit: return 0; } -static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t len) +static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct atalk_sock *at = at_sk(sk); @@ -1728,8 +1727,8 @@ out: return err ? : len; } -static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int flags) +static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct ddpehdr *ddp; diff --git a/net/atm/common.c b/net/atm/common.c index b84057e..ed04666 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -523,8 +523,8 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci) return 0; } -int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int flags) +int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; @@ -569,8 +569,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, return copied; } -int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, - size_t size) +int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) { struct sock *sk = sock->sk; DEFINE_WAIT(wait); diff --git a/net/atm/common.h b/net/atm/common.h index cc3c2da..4d6f5b2 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -13,10 +13,9 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family); int vcc_release(struct socket *sock); int vcc_connect(struct socket *sock, int itf, short vpi, int vci); -int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int flags); -int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, - size_t total_len); +int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags); +int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len); unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait); int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); diff --git a/net/atm/lec.c b/net/atm/lec.c index 4b98f89..cd3b379 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -2001,7 +2001,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, if (entry == NULL) goto out; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); - memset(entry->mac_addr, 0, ETH_ALEN); + eth_zero_addr(entry->mac_addr); entry->recv_vcc = vcc; entry->old_recv_push = old_push; entry->status = ESI_UNKNOWN; @@ -2086,7 +2086,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, entry->vcc = vcc; entry->old_push = old_push; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); - memset(entry->mac_addr, 0, ETH_ALEN); + eth_zero_addr(entry->mac_addr); entry->status = ESI_UNKNOWN; hlist_add_head(&entry->next, &priv->lec_arp_empty_ones); entry->timer.expires = jiffies + priv->vcc_timeout_period; diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 523bce7..4fd6af4 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -19,36 +19,15 @@ #include "resources.h" #include "signaling.h" -#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets - should block until the demon runs. - Danger: may cause nasty hangs if the demon - crashes. */ - struct atm_vcc *sigd = NULL; -#ifdef WAIT_FOR_DEMON -static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); -#endif static void sigd_put_skb(struct sk_buff *skb) { -#ifdef WAIT_FOR_DEMON - DECLARE_WAITQUEUE(wait, current); - - add_wait_queue(&sigd_sleep, &wait); - while (!sigd) { - set_current_state(TASK_UNINTERRUPTIBLE); - pr_debug("atmsvc: waiting for signaling daemon...\n"); - schedule(); - } - current->state = TASK_RUNNING; - remove_wait_queue(&sigd_sleep, &wait); -#else if (!sigd) { pr_debug("atmsvc: no signaling daemon\n"); kfree_skb(skb); return; } -#endif atm_force_charge(sigd, skb->truesize); skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); sk_atm(sigd)->sk_data_ready(sk_atm(sigd)); @@ -261,8 +240,5 @@ int sigd_attach(struct atm_vcc *vcc) vcc_insert_socket(sk_atm(vcc)); set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); -#ifdef WAIT_FOR_DEMON - wake_up(&sigd_sleep); -#endif return 0; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index ca049a7..330c1f4 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1432,8 +1432,7 @@ out: return err; } -static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name); struct sock *sk = sock->sk; @@ -1599,8 +1598,8 @@ out: return err; } -static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 67de6b3..7c646bb 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -46,9 +46,9 @@ #ifdef CONFIG_INET -int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned int len) +static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len) { unsigned char *buff; @@ -100,7 +100,7 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, return -AX25_HEADER_LEN; /* Unfinished header */ } -int ax25_rebuild_header(struct sk_buff *skb) +netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) { struct sk_buff *ourskb; unsigned char *bp = skb->data; @@ -115,9 +115,6 @@ int ax25_rebuild_header(struct sk_buff *skb) dst = (ax25_address *)(bp + 1); src = (ax25_address *)(bp + 8); - if (arp_find(bp + 1, skb)) - return 1; - route = ax25_get_route(dst, NULL); if (route) { digipeat = route->digipeat; @@ -129,6 +126,7 @@ int ax25_rebuild_header(struct sk_buff *skb) dev = skb->dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { + kfree_skb(skb); goto put; } @@ -212,31 +210,29 @@ put: if (route) ax25_put_route(route); - return 1; + return NETDEV_TX_OK; } #else /* INET */ -int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned int len) +static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len) { return -AX25_HEADER_LEN; } -int ax25_rebuild_header(struct sk_buff *skb) +netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) { - return 1; + kfree_skb(skb); + return NETDEV_TX_OK; } - #endif const struct header_ops ax25_header_ops = { .create = ax25_hard_header, - .rebuild = ax25_rebuild_header, }; -EXPORT_SYMBOL(ax25_hard_header); -EXPORT_SYMBOL(ax25_rebuild_header); EXPORT_SYMBOL(ax25_header_ops); +EXPORT_SYMBOL(ax25_ip_xmit); diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 27649e8..090828c 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -592,15 +592,16 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv, curr_gw = batadv_gw_get_selected_gw_node(bat_priv); - ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", - (curr_gw == gw_node ? "=>" : " "), - gw_node->orig_node->orig, - router_ifinfo->bat_iv.tq_avg, router->addr, - router->if_incoming->net_dev->name, - gw_node->bandwidth_down / 10, - gw_node->bandwidth_down % 10, - gw_node->bandwidth_up / 10, - gw_node->bandwidth_up % 10); + seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", + (curr_gw == gw_node ? "=>" : " "), + gw_node->orig_node->orig, + router_ifinfo->bat_iv.tq_avg, router->addr, + router->if_incoming->net_dev->name, + gw_node->bandwidth_down / 10, + gw_node->bandwidth_down % 10, + gw_node->bandwidth_up / 10, + gw_node->bandwidth_up % 10); + ret = seq_has_overflowed(seq) ? -1 : 0; if (curr_gw) batadv_gw_node_free_ref(curr_gw); diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 7de7463..b8c794b 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -91,4 +91,12 @@ config BT_SELFTEST_SMP Run test cases for SMP cryptographic functionality, including both legacy SMP as well as the Secure Connections features. +config BT_DEBUGFS + bool "Export Bluetooth internals in debugfs" + depends on BT && DEBUG_FS + default y + help + Provide extensive information about internal Bluetooth states + in debugfs. + source "drivers/bluetooth/Kconfig" diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 8e96e30..5d60879 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -13,8 +13,9 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ - a2mp.o amp.o ecc.o hci_request.o hci_debugfs.o + a2mp.o amp.o ecc.o hci_request.o +bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o subdir-ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index cedfbda..5a04eb1 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -19,9 +19,11 @@ #include "a2mp.h" #include "amp.h" +#define A2MP_FEAT_EXT 0x8000 + /* Global AMP Manager list */ -LIST_HEAD(amp_mgr_list); -DEFINE_MUTEX(amp_mgr_list_lock); +static LIST_HEAD(amp_mgr_list); +static DEFINE_MUTEX(amp_mgr_list_lock); /* A2MP build & send command helper functions */ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) @@ -43,7 +45,7 @@ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) return cmd; } -void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) +static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) { struct l2cap_chan *chan = mgr->a2mp_chan; struct a2mp_cmd *cmd; @@ -67,7 +69,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) kfree(cmd); } -u8 __next_ident(struct amp_mgr *mgr) +static u8 __next_ident(struct amp_mgr *mgr) { if (++mgr->ident == 0) mgr->ident = 1; @@ -75,6 +77,23 @@ u8 __next_ident(struct amp_mgr *mgr) return mgr->ident; } +static struct amp_mgr *amp_mgr_lookup_by_state(u8 state) +{ + struct amp_mgr *mgr; + + mutex_lock(&_mgr_list_lock); + list_for_each_entry(mgr, &_mgr_list, list) { + if (test_and_clear_bit(state, &mgr->state)) { + amp_mgr_get(mgr); + mutex_unlock(&_mgr_list_lock); + return mgr; + } + } + mutex_unlock(&_mgr_list_lock); + + return NULL; +} + /* hci_dev_list shall be locked */ static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl) { @@ -860,23 +879,6 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, return mgr->a2mp_chan; } -struct amp_mgr *amp_mgr_lookup_by_state(u8 state) -{ - struct amp_mgr *mgr; - - mutex_lock(&_mgr_list_lock); - list_for_each_entry(mgr, &_mgr_list, list) { - if (test_and_clear_bit(state, &mgr->state)) { - amp_mgr_get(mgr); - mutex_unlock(&_mgr_list_lock); - return mgr; - } - } - mutex_unlock(&_mgr_list_lock); - - return NULL; -} - void a2mp_send_getinfo_rsp(struct hci_dev *hdev) { struct amp_mgr *mgr; diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h index 487b54c..296f665 100644 --- a/net/bluetooth/a2mp.h +++ b/net/bluetooth/a2mp.h @@ -17,8 +17,6 @@ #include <net/bluetooth/l2cap.h> -#define A2MP_FEAT_EXT 0x8000 - enum amp_mgr_state { READ_LOC_AMP_INFO, READ_LOC_AMP_ASSOC, @@ -131,16 +129,10 @@ struct a2mp_physlink_rsp { #define A2MP_STATUS_PHYS_LINK_EXISTS 0x05 #define A2MP_STATUS_SECURITY_VIOLATION 0x06 -extern struct list_head amp_mgr_list; -extern struct mutex amp_mgr_list_lock; - struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr); int amp_mgr_put(struct amp_mgr *mgr); -u8 __next_ident(struct amp_mgr *mgr); struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, struct sk_buff *skb); -struct amp_mgr *amp_mgr_lookup_by_state(u8 state); -void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data); void a2mp_discover_amp(struct l2cap_chan *chan); void a2mp_send_getinfo_rsp(struct hci_dev *hdev); void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index ce22e0c..20a4698 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -210,8 +210,8 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) } EXPORT_SYMBOL(bt_accept_dequeue); -int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; @@ -283,8 +283,8 @@ static long bt_sock_data_wait(struct sock *sk, long timeo) return timeo; } -int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct sock *sk = sock->sk; int err = 0; @@ -711,10 +711,9 @@ EXPORT_SYMBOL_GPL(bt_debugfs); static int __init bt_init(void) { - struct sk_buff *skb; int err; - BUILD_BUG_ON(sizeof(struct bt_skb_cb) > sizeof(skb->cb)); + sock_skb_cb_check_size(sizeof(struct bt_skb_cb)); BT_INFO("Core ver %s", VERSION); diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 4b488ec..6ceb5d3 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c @@ -218,7 +218,7 @@ static const struct net_device_ops bnep_netdev_ops = { void bnep_net_setup(struct net_device *dev) { - memset(dev->broadcast, 0xff, ETH_ALEN); + eth_broadcast_addr(dev->broadcast); dev->addr_len = ETH_ALEN; ether_setup(dev); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index c9b8fa5..91ebb9c 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -309,7 +309,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) else hci_add_sco(sco, conn->handle); } else { - hci_proto_connect_cfm(sco, status); + hci_connect_cfm(sco, status); hci_conn_del(sco); } } @@ -618,7 +618,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); - hci_proto_connect_cfm(conn, status); + hci_connect_cfm(conn, status); hci_conn_del(conn); @@ -733,6 +733,14 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, struct hci_request req; int err; + /* Let's make sure that le is enabled.*/ + if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + + return ERR_PTR(-EOPNOTSUPP); + } + /* Some devices send ATT messages as soon as the physical link is * established. To be able to handle these ATT messages, the user- * space first establishes the connection and then starts the pairing @@ -856,8 +864,12 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, { struct hci_conn *acl; - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (lmp_bredr_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + return ERR_PTR(-EOPNOTSUPP); + } acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (!acl) { @@ -1139,7 +1151,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev) list_for_each_entry_safe(c, n, &h->list, list) { c->state = BT_CLOSED; - hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); + hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); hci_conn_del(c); } } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 3322d3f..bba4c34 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -51,7 +51,7 @@ DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); -DEFINE_RWLOCK(hci_cb_list_lock); +DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); @@ -390,7 +390,7 @@ static void bredr_init(struct hci_request *req) hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); } -static void amp_init(struct hci_request *req) +static void amp_init1(struct hci_request *req) { req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; @@ -400,9 +400,6 @@ static void amp_init(struct hci_request *req) /* Read Local Supported Commands */ hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - /* Read Local Supported Features */ - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - /* Read Local AMP Info */ hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); @@ -416,6 +413,16 @@ static void amp_init(struct hci_request *req) hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); } +static void amp_init2(struct hci_request *req) +{ + /* Read Local Supported Features. Not all AMP controllers + * support this so it's placed conditionally in the second + * stage init. + */ + if (req->hdev->commands[14] & 0x20) + hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); +} + static void hci_init1_req(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; @@ -432,7 +439,7 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt) break; case HCI_AMP: - amp_init(req); + amp_init1(req); break; default: @@ -578,6 +585,9 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; + if (hdev->dev_type == HCI_AMP) + return amp_init2(req); + if (lmp_bredr_capable(hdev)) bredr_setup(req); else @@ -896,17 +906,17 @@ static int __hci_init(struct hci_dev *hdev) &dut_mode_fops); } + err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); + if (err < 0) + return err; + /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode * BR/EDR/LE type controllers. AMP controllers only need the - * first stage init. + * first two stages of init. */ if (hdev->dev_type != HCI_BREDR) return 0; - err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); - if (err < 0) - return err; - err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); if (err < 0) return err; @@ -1591,6 +1601,12 @@ static int hci_dev_do_close(struct hci_dev *hdev) { BT_DBG("%s %p", hdev->name, hdev); + if (!test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + hdev->shutdown(hdev); + } + cancel_delayed_work(&hdev->power_off); hci_req_cancel(hdev, ENODEV); @@ -3448,9 +3464,9 @@ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - write_lock(&hci_cb_list_lock); - list_add(&cb->list, &hci_cb_list); - write_unlock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); + list_add_tail(&cb->list, &hci_cb_list); + mutex_unlock(&hci_cb_list_lock); return 0; } @@ -3460,9 +3476,9 @@ int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - write_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_del(&cb->list); - write_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); return 0; } @@ -3517,7 +3533,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, /* Stand-alone HCI commands must be flagged as * single-command requests. */ - bt_cb(skb)->req.start = true; + bt_cb(skb)->req_start = 1; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); @@ -4195,7 +4211,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev) if (!skb) return true; - return bt_cb(skb)->req.start; + return bt_cb(skb)->req_start; } static void hci_resend_last(struct hci_dev *hdev) @@ -4255,14 +4271,14 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) * command queue (hdev->cmd_q). */ if (hdev->sent_cmd) { - req_complete = bt_cb(hdev->sent_cmd)->req.complete; + req_complete = bt_cb(hdev->sent_cmd)->req_complete; if (req_complete) { /* We must set the complete callback to NULL to * avoid calling the callback more than once if * this function gets called again. */ - bt_cb(hdev->sent_cmd)->req.complete = NULL; + bt_cb(hdev->sent_cmd)->req_complete = NULL; goto call_complete; } @@ -4271,12 +4287,12 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) /* Remove all pending commands belonging to this request */ spin_lock_irqsave(&hdev->cmd_q.lock, flags); while ((skb = __skb_dequeue(&hdev->cmd_q))) { - if (bt_cb(skb)->req.start) { + if (bt_cb(skb)->req_start) { __skb_queue_head(&hdev->cmd_q, skb); break; } - req_complete = bt_cb(skb)->req.complete; + req_complete = bt_cb(skb)->req_complete; kfree_skb(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h index fb68efe..4444dc8 100644 --- a/net/bluetooth/hci_debugfs.h +++ b/net/bluetooth/hci_debugfs.h @@ -20,7 +20,29 @@ SOFTWARE IS DISCLAIMED. */ +#if IS_ENABLED(CONFIG_BT_DEBUGFS) + void hci_debugfs_create_common(struct hci_dev *hdev); void hci_debugfs_create_bredr(struct hci_dev *hdev); void hci_debugfs_create_le(struct hci_dev *hdev); void hci_debugfs_create_conn(struct hci_conn *conn); + +#else + +static inline void hci_debugfs_create_common(struct hci_dev *hdev) +{ +} + +static inline void hci_debugfs_create_bredr(struct hci_dev *hdev) +{ +} + +static inline void hci_debugfs_create_le(struct hci_dev *hdev) +{ +} + +static inline void hci_debugfs_create_conn(struct hci_conn *conn) +{ +} + +#endif diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a3fb094..39653d4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1537,7 +1537,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) if (conn && conn->state == BT_CONNECT) { if (status != 0x0c || conn->attempt > 2) { conn->state = BT_CLOSED; - hci_proto_connect_cfm(conn, status); + hci_connect_cfm(conn, status); hci_conn_del(conn); } else conn->state = BT_CONNECT2; @@ -1581,7 +1581,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) if (sco) { sco->state = BT_CLOSED; - hci_proto_connect_cfm(sco, status); + hci_connect_cfm(sco, status); hci_conn_del(sco); } } @@ -1608,7 +1608,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { - hci_proto_connect_cfm(conn, status); + hci_connect_cfm(conn, status); hci_conn_drop(conn); } } @@ -1635,7 +1635,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { - hci_proto_connect_cfm(conn, status); + hci_connect_cfm(conn, status); hci_conn_drop(conn); } } @@ -1811,7 +1811,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { - hci_proto_connect_cfm(conn, status); + hci_connect_cfm(conn, status); hci_conn_drop(conn); } } @@ -1838,7 +1838,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { - hci_proto_connect_cfm(conn, status); + hci_connect_cfm(conn, status); hci_conn_drop(conn); } } @@ -1873,7 +1873,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) if (sco) { sco->state = BT_CLOSED; - hci_proto_connect_cfm(sco, status); + hci_connect_cfm(sco, status); hci_conn_del(sco); } } @@ -2255,10 +2255,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_sco_setup(conn, ev->status); if (ev->status) { - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); hci_conn_del(conn); } else if (ev->link_type != ACL_LINK) - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); @@ -2366,7 +2366,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) &cp); } else { conn->state = BT_CONNECT2; - hci_proto_connect_cfm(conn, 0); + hci_connect_cfm(conn, 0); } } @@ -2444,7 +2444,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) type = conn->type; - hci_proto_disconn_cfm(conn, ev->reason); + hci_disconn_cfm(conn, ev->reason); hci_conn_del(conn); /* Re-enable advertising if necessary, since it might @@ -2501,7 +2501,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) &cp); } else { conn->state = BT_CONNECTED; - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } } else { @@ -2629,12 +2629,12 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); + hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } else hci_encrypt_cfm(conn, ev->status, ev->encrypt); @@ -2707,7 +2707,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev, if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } @@ -3106,7 +3106,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) cancel_delayed_work(&hdev->cmd_timer); if (ev->status || - (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) + (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req_event)) hci_req_cmd_complete(hdev, opcode, ev->status); if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { @@ -3679,7 +3679,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } @@ -3738,7 +3738,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, break; } - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); if (ev->status) hci_conn_del(conn); @@ -3849,7 +3849,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev, if (!ev->status) conn->state = BT_CONNECTED; - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } else { hci_auth_cfm(conn, ev->status); @@ -4512,7 +4512,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); - hci_proto_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, ev->status); params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, conn->dst_type); @@ -5039,7 +5039,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) skb_pull(skb, HCI_EVENT_HDR_SIZE); - if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req_event == event) { struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; u16 opcode = __le16_to_cpu(cmd_hdr->opcode); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index b59f92c..f857e76 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -55,7 +55,7 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); - bt_cb(skb)->req.complete = complete; + bt_cb(skb)->req_complete = complete; spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); @@ -116,9 +116,9 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, } if (skb_queue_empty(&req->cmd_q)) - bt_cb(skb)->req.start = true; + bt_cb(skb)->req_start = 1; - bt_cb(skb)->req.event = event; + bt_cb(skb)->req_event = event; skb_queue_tail(&req->cmd_q, skb); } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 1d65c5b..cb4bc48 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -46,9 +46,9 @@ struct hci_pinfo { unsigned short channel; }; -static inline int hci_test_bit(int nr, void *addr) +static inline int hci_test_bit(int nr, const void *addr) { - return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); + return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); } /* Security filter */ @@ -183,12 +183,13 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(skb_copy); } -/* Send frame to control socket */ -void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) +/* Send frame to sockets with specific channel */ +void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, + struct sock *skip_sk) { struct sock *sk; - BT_DBG("len %d", skb->len); + BT_DBG("channel %u len %d", channel, skb->len); read_lock(&hci_sk_list.lock); @@ -202,35 +203,7 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) if (sk->sk_state != BT_BOUND) continue; - if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) - continue; - - nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - continue; - - if (sock_queue_rcv_skb(sk, nskb)) - kfree_skb(nskb); - } - - read_unlock(&hci_sk_list.lock); -} - -static void queue_monitor_skb(struct sk_buff *skb) -{ - struct sock *sk; - - BT_DBG("len %d", skb->len); - - read_lock(&hci_sk_list.lock); - - sk_for_each(sk, &hci_sk_list.head) { - struct sk_buff *nskb; - - if (sk->sk_state != BT_BOUND) - continue; - - if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) + if (hci_pi(sk)->channel != channel) continue; nskb = skb_clone(skb, GFP_ATOMIC); @@ -290,7 +263,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len); - queue_monitor_skb(skb_copy); + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL); kfree_skb(skb_copy); } @@ -397,7 +370,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) skb = create_monitor_event(hdev, event); if (skb) { - queue_monitor_skb(skb); + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL); kfree_skb(skb); } } @@ -826,8 +799,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, } } -static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; @@ -871,8 +844,8 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, return err ? : copied; } -static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct hci_dev *hdev; @@ -965,7 +938,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, /* Stand-alone HCI commands must be flagged as * single-command requests. */ - bt_cb(skb)->req.start = true; + bt_cb(skb)->req_start = 1; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 6ba33f9..91c6828 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1244,6 +1244,13 @@ static void l2cap_move_done(struct l2cap_chan *chan) static void l2cap_chan_ready(struct l2cap_chan *chan) { + /* The channel may have already been flagged as connected in + * case of receiving data before the L2CAP info req/rsp + * procedure is complete. + */ + if (chan->state == BT_CONNECTED) + return; + /* This clears all conf flags, including CONF_NOT_COMPLETE */ chan->conf_state = 0; __clear_chan_timer(chan); @@ -6785,6 +6792,13 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, BT_DBG("chan %p, len %d", chan, skb->len); + /* If we receive data on a fixed channel before the info req/rsp + * procdure is done simply assume that the channel is supported + * and mark it as ready. + */ + if (chan->chan_type == L2CAP_CHAN_FIXED) + l2cap_chan_ready(chan); + if (chan->state != BT_CONNECTED) goto drop; @@ -7238,13 +7252,16 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, return NULL; } -void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) +static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct hci_dev *hdev = hcon->hdev; struct l2cap_conn *conn; struct l2cap_chan *pchan; u8 dst_type; + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (status) { @@ -7307,8 +7324,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon) return conn->disc_reason; } -void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) +static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + BT_DBG("hcon %p reason %d", hcon, reason); l2cap_conn_del(hcon, bt_to_errno(reason)); @@ -7331,13 +7351,13 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) } } -int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) +static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; if (!conn) - return 0; + return; BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); @@ -7420,8 +7440,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } mutex_unlock(&conn->chan_lock); - - return 0; } int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) @@ -7529,6 +7547,13 @@ drop: return 0; } +static struct hci_cb l2cap_cb = { + .name = "L2CAP", + .connect_cfm = l2cap_connect_cfm, + .disconn_cfm = l2cap_disconn_cfm, + .security_cfm = l2cap_security_cfm, +}; + static int l2cap_debugfs_show(struct seq_file *f, void *p) { struct l2cap_chan *c; @@ -7570,6 +7595,8 @@ int __init l2cap_init(void) if (err < 0) return err; + hci_register_cb(&l2cap_cb); + if (IS_ERR_OR_NULL(bt_debugfs)) return 0; @@ -7587,6 +7614,7 @@ int __init l2cap_init(void) void l2cap_exit(void) { debugfs_remove(l2cap_debugfs); + hci_unregister_cb(&l2cap_cb); l2cap_cleanup_sockets(); } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 60694f0..9070720e 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -944,8 +944,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, return err; } -static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -976,8 +976,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, return err; } -static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) { struct sock *sk = sock->sk; struct l2cap_pinfo *pi = l2cap_pi(sk); @@ -1004,9 +1004,9 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, release_sock(sk); if (sock->type == SOCK_STREAM) - err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); + err = bt_sock_stream_recvmsg(sock, msg, len, flags); else - err = bt_sock_recvmsg(iocb, sock, msg, len, flags); + err = bt_sock_recvmsg(sock, msg, len, flags); if (pi->chan->mode != L2CAP_MODE_ERTM) return err; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 9ec5390..1e4635a 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -29,6 +29,7 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_sock.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> @@ -242,7 +243,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len, /* Time stamp */ __net_timestamp(skb); - hci_send_to_control(skb, skip_sk); + hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk); kfree_skb(skb); return 0; @@ -2116,8 +2117,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) || - mgmt_pending_find(MGMT_OP_SET_HS, hdev)) { + if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_BUSY); goto failed; @@ -2176,6 +2176,12 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_dev_lock(hdev); + if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { + err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_BUSY); + goto unlock; + } + if (cp->val) { changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags); } else { @@ -3249,6 +3255,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (PTR_ERR(conn) == -EBUSY) status = MGMT_STATUS_BUSY; + else if (PTR_ERR(conn) == -EOPNOTSUPP) + status = MGMT_STATUS_NOT_SUPPORTED; + else if (PTR_ERR(conn) == -ECONNREFUSED) + status = MGMT_STATUS_REJECTED; else status = MGMT_STATUS_CONNECT_FAILED; @@ -6654,7 +6664,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); - ev.key.master = csrk->master; + ev.key.type = csrk->type; memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 3c6d2c8..825e8fb 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -549,8 +549,8 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int * return 0; } -static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; @@ -615,8 +615,8 @@ done: return sent; } -static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; @@ -627,7 +627,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, return 0; } - len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); + len = bt_sock_stream_recvmsg(sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 76321b5..54279ac 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -688,8 +688,8 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len return 0; } -static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; int err; @@ -758,8 +758,8 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) } } -static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); @@ -777,7 +777,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, release_sock(sk); - return bt_sock_recvmsg(iocb, sock, msg, len, flags); + return bt_sock_recvmsg(sock, msg, len, flags); } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) @@ -1083,9 +1083,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) return lm; } -void sco_connect_cfm(struct hci_conn *hcon, __u8 status) +static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); + if (!status) { struct sco_conn *conn; @@ -1096,8 +1100,11 @@ void sco_connect_cfm(struct hci_conn *hcon, __u8 status) sco_conn_del(hcon, bt_to_errno(status)); } -void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) +static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); @@ -1122,6 +1129,12 @@ drop: return 0; } +static struct hci_cb sco_cb = { + .name = "SCO", + .connect_cfm = sco_connect_cfm, + .disconn_cfm = sco_disconn_cfm, +}; + static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; @@ -1203,6 +1216,8 @@ int __init sco_init(void) BT_INFO("SCO socket layer initialized"); + hci_register_cb(&sco_cb); + if (IS_ERR_OR_NULL(bt_debugfs)) return 0; @@ -1222,6 +1237,8 @@ void __exit sco_exit(void) debugfs_remove(sco_debugfs); + hci_unregister_cb(&sco_cb); + bt_sock_unregister(BTPROTO_SCO); proto_unregister(&sco_proto); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index c09a821..c91c19b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1252,7 +1252,10 @@ static void smp_distribute_keys(struct smp_chan *smp) csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); if (csrk) { - csrk->master = 0x00; + if (hcon->sec_level > BT_SECURITY_MEDIUM) + csrk->type = MGMT_CSRK_LOCAL_AUTHENTICATED; + else + csrk->type = MGMT_CSRK_LOCAL_UNAUTHENTICATED; memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); } smp->slave_csrk = csrk; @@ -2352,7 +2355,10 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); if (csrk) { - csrk->master = 0x01; + if (conn->hcon->sec_level > BT_SECURITY_MEDIUM) + csrk->type = MGMT_CSRK_REMOTE_AUTHENTICATED; + else + csrk->type = MGMT_CSRK_REMOTE_UNAUTHENTICATED; memcpy(csrk->val, rp->csrk, sizeof(csrk->val)); } smp->csrk = csrk; @@ -2951,24 +2957,14 @@ create_chan: l2cap_chan_set_defaults(chan); if (cid == L2CAP_CID_SMP) { - /* If usage of static address is forced or if the devices - * does not have a public address, then listen on the static - * address. - * - * In case BR/EDR has been disabled on a dual-mode controller - * and a static address has been configued, then listen on - * the static address instead. - */ - if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) || - !bacmp(&hdev->bdaddr, BDADDR_ANY) || - (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && - bacmp(&hdev->static_addr, BDADDR_ANY))) { - bacpy(&chan->src, &hdev->static_addr); - chan->src_type = BDADDR_LE_RANDOM; - } else { - bacpy(&chan->src, &hdev->bdaddr); + u8 bdaddr_type; + + hci_copy_identity_address(hdev, &chan->src, &bdaddr_type); + + if (bdaddr_type == ADDR_LE_DEV_PUBLIC) chan->src_type = BDADDR_LE_PUBLIC; - } + else + chan->src_type = BDADDR_LE_RANDOM; } else { bacpy(&chan->src, &hdev->bdaddr); chan->src_type = BDADDR_BREDR; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ffd379d..294cbcc 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -36,13 +36,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) u16 vid = 0; rcu_read_lock(); -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { - br_nf_pre_routing_finish_bridge_slow(skb); + if (br_nf_prerouting_finish_bridge(skb)) { rcu_read_unlock(); return NETDEV_TX_OK; } -#endif u64_stats_update_begin(&brstats->syncp); brstats->tx_packets++; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index f96933a..3304a54 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p, int br_dev_queue_push_xmit(struct sk_buff *skb) { - /* ip_fragment doesn't copy the MAC header */ - if (nf_bridge_maybe_copy_header(skb) || - !is_skb_forwardable(skb->dev, skb)) { + if (!is_skb_forwardable(skb->dev, skb)) { kfree_skb(skb); } else { skb_push(skb, ETH_HLEN); @@ -188,6 +186,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, /* Do not flood to ports that enable proxy ARP */ if (p->flags & BR_PROXYARP) continue; + if ((p->flags & BR_PROXYARP_WIFI) && + BR_INPUT_SKB_CB(skb)->proxyarp_replied) + continue; prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e2aa7be..052c5eb 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -60,7 +60,7 @@ static int br_pass_frame_up(struct sk_buff *skb) } static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, - u16 vid) + u16 vid, struct net_bridge_port *p) { struct net_device *dev = br->dev; struct neighbour *n; @@ -68,6 +68,8 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, u8 *arpptr, *sha; __be32 sip, tip; + BR_INPUT_SKB_CB(skb)->proxyarp_replied = false; + if (dev->flags & IFF_NOARP) return; @@ -105,9 +107,12 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, } f = __br_fdb_get(br, n->ha, vid); - if (f) + if (f && ((p->flags & BR_PROXYARP) || + (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) { arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, sha, n->ha, sha); + BR_INPUT_SKB_CB(skb)->proxyarp_replied = true; + } neigh_release(n); } @@ -153,12 +158,10 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; - if (is_broadcast_ether_addr(dest)) { - if (IS_ENABLED(CONFIG_INET) && - p->flags & BR_PROXYARP && - skb->protocol == htons(ETH_P_ARP)) - br_do_proxy_arp(skb, br, vid); + if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP)) + br_do_proxy_arp(skb, br, vid, p); + if (is_broadcast_ether_addr(dest)) { skb2 = skb; unicast = false; } else if (is_multicast_ether_addr(dest)) { diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 0ee453f..a8361c7 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -239,6 +239,14 @@ drop: return -1; } +static void nf_bridge_update_protocol(struct sk_buff *skb) +{ + if (skb->nf_bridge->mask & BRNF_8021Q) + skb->protocol = htons(ETH_P_8021Q); + else if (skb->nf_bridge->mask & BRNF_PPPoE) + skb->protocol = htons(ETH_P_PPP_SES); +} + /* PF_BRIDGE/PRE_ROUTING *********************************************/ /* Undo the changes made for ip6tables PREROUTING and continue the * bridge PRE_ROUTING hook. */ @@ -764,23 +772,53 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, } #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) +static bool nf_bridge_copy_header(struct sk_buff *skb) +{ + int err; + unsigned int header_size; + + nf_bridge_update_protocol(skb); + header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); + err = skb_cow_head(skb, header_size); + if (err) + return false; + + skb_copy_to_linear_data_offset(skb, -header_size, + skb->nf_bridge->data, header_size); + __skb_push(skb, nf_bridge_encap_header_len(skb)); + return true; +} + +static int br_nf_push_frag_xmit(struct sk_buff *skb) +{ + if (!nf_bridge_copy_header(skb)) { + kfree_skb(skb); + return 0; + } + + return br_dev_queue_push_xmit(skb); +} + static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; int frag_max_size; + unsigned int mtu_reserved; + if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) + return br_dev_queue_push_xmit(skb); + + mtu_reserved = nf_bridge_mtu_reduction(skb); /* This is wrong! We should preserve the original fragment * boundaries by preserving frag_list rather than refragmenting. */ - if (skb->protocol == htons(ETH_P_IP) && - skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && - !skb_is_gso(skb)) { + if (skb->len + mtu_reserved > skb->dev->mtu) { frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; if (br_parse_ip_options(skb)) /* Drop invalid packet */ return NF_DROP; IPCB(skb)->frag_max_size = frag_max_size; - ret = ip_fragment(skb, br_dev_queue_push_xmit); + ret = ip_fragment(skb, br_nf_push_frag_xmit); } else ret = br_dev_queue_push_xmit(skb); @@ -854,6 +892,38 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops, return NF_ACCEPT; } +/* This is called when br_netfilter has called into iptables/netfilter, + * and DNAT has taken place on a bridge-forwarded packet. + * + * neigh->output has created a new MAC header, with local br0 MAC + * as saddr. + * + * This restores the original MAC saddr of the bridged packet + * before invoking bridge forward logic to transmit the packet. + */ +static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) +{ + struct nf_bridge_info *nf_bridge = skb->nf_bridge; + + skb_pull(skb, ETH_HLEN); + nf_bridge->mask &= ~BRNF_BRIDGED_DNAT; + + skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), + skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); + skb->dev = nf_bridge->physindev; + br_handle_frame_finish(skb); +} + +int br_nf_prerouting_finish_bridge(struct sk_buff *skb) +{ + if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { + br_nf_pre_routing_finish_bridge_slow(skb); + return 1; + } + return 0; +} +EXPORT_SYMBOL_GPL(br_nf_prerouting_finish_bridge); + void br_netfilter_enable(void) { } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 4fbcea0..8bc6b67 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -22,6 +22,85 @@ #include "br_private.h" #include "br_private_stp.h" +static int br_get_num_vlan_infos(const struct net_port_vlans *pv, + u32 filter_mask) +{ + u16 vid_range_start = 0, vid_range_end = 0; + u16 vid_range_flags = 0; + u16 pvid, vid, flags; + int num_vlans = 0; + + if (filter_mask & RTEXT_FILTER_BRVLAN) + return pv->num_vlans; + + if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) + return 0; + + /* Count number of vlan info's + */ + pvid = br_get_pvid(pv); + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { + flags = 0; + if (vid == pvid) + flags |= BRIDGE_VLAN_INFO_PVID; + + if (test_bit(vid, pv->untagged_bitmap)) + flags |= BRIDGE_VLAN_INFO_UNTAGGED; + + if (vid_range_start == 0) { + goto initvars; + } else if ((vid - vid_range_end) == 1 && + flags == vid_range_flags) { + vid_range_end = vid; + continue; + } else { + if ((vid_range_end - vid_range_start) > 0) + num_vlans += 2; + else + num_vlans += 1; + } +initvars: + vid_range_start = vid; + vid_range_end = vid; + vid_range_flags = flags; + } + + if (vid_range_start != 0) { + if ((vid_range_end - vid_range_start) > 0) + num_vlans += 2; + else + num_vlans += 1; + } + + return num_vlans; +} + +static size_t br_get_link_af_size_filtered(const struct net_device *dev, + u32 filter_mask) +{ + struct net_port_vlans *pv; + int num_vlan_infos; + + rcu_read_lock(); + if (br_port_exists(dev)) + pv = nbp_get_vlan_info(br_port_get_rcu(dev)); + else if (dev->priv_flags & IFF_EBRIDGE) + pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); + else + pv = NULL; + if (pv) + num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask); + else + num_vlan_infos = 0; + rcu_read_unlock(); + + if (!num_vlan_infos) + return 0; + + /* Each VLAN is returned in bridge_vlan_info along with flags */ + return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); +} + static inline size_t br_port_info_size(void) { return nla_total_size(1) /* IFLA_BRPORT_STATE */ @@ -36,7 +115,7 @@ static inline size_t br_port_info_size(void) + 0; } -static inline size_t br_nlmsg_size(void) +static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ @@ -45,7 +124,9 @@ static inline size_t br_nlmsg_size(void) + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(1) /* IFLA_OPERSTATE */ - + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */ + + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ + + nla_total_size(br_get_link_af_size_filtered(dev, + filter_mask)); /* IFLA_AF_SPEC */ } static int br_port_fill_attrs(struct sk_buff *skb, @@ -62,7 +143,9 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || - nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP))) + nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || + nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, + !!(p->flags & BR_PROXYARP_WIFI))) return -EMSGSIZE; return 0; @@ -280,6 +363,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) struct net *net; struct sk_buff *skb; int err = -ENOBUFS; + u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; if (!port) return; @@ -288,11 +372,11 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) br_debug(port->br, "port %u(%s) event %d\n", (unsigned int)port->port_no, port->dev->name, event); - skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); + skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC); if (skb == NULL) goto errout; - err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev); + err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev); if (err < 0) { /* -EMSGSIZE implies BUG in br_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -471,6 +555,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); + br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 387cb3b..20cbb72 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -54,7 +54,6 @@ static unsigned int fake_mtu(const struct dst_entry *dst) static struct dst_ops fake_dst_ops = { .family = AF_INET, - .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, .redirect = fake_redirect, .cow_metrics = fake_cow_metrics, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index de09199..f0a0438 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -305,6 +305,7 @@ struct br_input_skb_cb { #endif u16 frag_max_size; + bool proxyarp_replied; #ifdef CONFIG_BRIDGE_VLAN_FILTERING bool vlan_filtered; @@ -764,10 +765,15 @@ static inline int br_vlan_enabled(struct net_bridge *br) /* br_netfilter.c */ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) +int br_nf_prerouting_finish_bridge(struct sk_buff *skb); int br_nf_core_init(void); void br_nf_core_fini(void); void br_netfilter_rtable_init(struct net_bridge *); #else +static inline int br_nf_prerouting_finish_bridge(struct sk_buff *skb) +{ + return 0; +} static inline int br_nf_core_init(void) { return 0; } static inline void br_nf_core_fini(void) {} #define br_netfilter_rtable_init(x) diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 2de5d91..4905845 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -171,6 +171,7 @@ BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); BRPORT_ATTR_FLAG(learning, BR_LEARNING); BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP); +BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -215,6 +216,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_multicast_fast_leave, #endif &brport_attr_proxyarp, + &brport_attr_proxyarp_wifi, NULL }; diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 3244aea..5c6c965 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -21,6 +21,7 @@ #include <net/ip.h> #include <net/ip6_checksum.h> #include <linux/netfilter_bridge.h> +#include <linux/netfilter_ipv6.h> #include "../br_private.h" static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, @@ -36,7 +37,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, skb_pull(nskb, ETH_HLEN); } -static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) +/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) + * or the bridge port (NF_BRIDGE PREROUTING). + */ +static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, + const struct net_device *dev, + int hook) { struct sk_buff *nskb; struct iphdr *niph; @@ -65,11 +71,12 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) nft_reject_br_push_etherhdr(oldskb, nskb); - br_deliver(br_port_get_rcu(oldskb->dev), nskb); + br_deliver(br_port_get_rcu(dev), nskb); } -static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, - u8 code) +static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, + const struct net_device *dev, + int hook, u8 code) { struct sk_buff *nskb; struct iphdr *niph; @@ -77,8 +84,9 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, unsigned int len; void *payload; __wsum csum; + u8 proto; - if (!nft_bridge_iphdr_validate(oldskb)) + if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) return; /* IP header checks: fragment. */ @@ -91,7 +99,17 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, if (!pskb_may_pull(oldskb, len)) return; - if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) + if (pskb_trim_rcsum(oldskb, htons(ip_hdr(oldskb)->tot_len))) + return; + + if (ip_hdr(oldskb)->protocol == IPPROTO_TCP || + ip_hdr(oldskb)->protocol == IPPROTO_UDP) + proto = ip_hdr(oldskb)->protocol; + else + proto = 0; + + if (!skb_csum_unnecessary(oldskb) && + nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto)) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + @@ -120,11 +138,13 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, nft_reject_br_push_etherhdr(oldskb, nskb); - br_deliver(br_port_get_rcu(oldskb->dev), nskb); + br_deliver(br_port_get_rcu(dev), nskb); } static void nft_reject_br_send_v6_tcp_reset(struct net *net, - struct sk_buff *oldskb, int hook) + struct sk_buff *oldskb, + const struct net_device *dev, + int hook) { struct sk_buff *nskb; const struct tcphdr *oth; @@ -152,12 +172,37 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net, nft_reject_br_push_etherhdr(oldskb, nskb); - br_deliver(br_port_get_rcu(oldskb->dev), nskb); + br_deliver(br_port_get_rcu(dev), nskb); +} + +static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int thoff; + __be16 fo; + u8 proto = ip6h->nexthdr; + + if (skb->csum_bad) + return false; + + if (skb_csum_unnecessary(skb)) + return true; + + if (ip6h->payload_len && + pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) + return false; + + thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); + if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) + return false; + + return nf_ip6_checksum(skb, hook, thoff, proto) == 0; } static void nft_reject_br_send_v6_unreach(struct net *net, - struct sk_buff *oldskb, int hook, - u8 code) + struct sk_buff *oldskb, + const struct net_device *dev, + int hook, u8 code) { struct sk_buff *nskb; struct ipv6hdr *nip6h; @@ -176,6 +221,9 @@ static void nft_reject_br_send_v6_unreach(struct net *net, if (!pskb_may_pull(oldskb, len)) return; + if (!reject6_br_csum_ok(oldskb, hook)) + return; + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + LL_MAX_HEADER + len, GFP_ATOMIC); if (!nskb) @@ -205,7 +253,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net, nft_reject_br_push_etherhdr(oldskb, nskb); - br_deliver(br_port_get_rcu(oldskb->dev), nskb); + br_deliver(br_port_get_rcu(dev), nskb); } static void nft_reject_bridge_eval(const struct nft_expr *expr, @@ -224,16 +272,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, case htons(ETH_P_IP): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nft_reject_br_send_v4_unreach(pkt->skb, + nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, pkt->ops->hooknum, priv->icmp_code); break; case NFT_REJECT_TCP_RST: - nft_reject_br_send_v4_tcp_reset(pkt->skb, + nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in, pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: - nft_reject_br_send_v4_unreach(pkt->skb, + nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, pkt->ops->hooknum, nft_reject_icmp_code(priv->icmp_code)); break; @@ -242,16 +290,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, case htons(ETH_P_IPV6): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nft_reject_br_send_v6_unreach(net, pkt->skb, + nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in, pkt->ops->hooknum, priv->icmp_code); break; case NFT_REJECT_TCP_RST: - nft_reject_br_send_v6_tcp_reset(net, pkt->skb, + nft_reject_br_send_v6_tcp_reset(net, pkt->skb, pkt->in, pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: - nft_reject_br_send_v6_unreach(net, pkt->skb, + nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in, pkt->ops->hooknum, nft_reject_icmpv6_code(priv->icmp_code)); break; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 769b185..b6bf51b 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -271,8 +271,8 @@ static void caif_check_flow_release(struct sock *sk) * Copied from unix_dgram_recvmsg, but removed credit checks, * changed locking, address handling and added MSG_TRUNC. */ -static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t len, int flags) +static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m, + size_t len, int flags) { struct sock *sk = sock->sk; @@ -343,9 +343,8 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) * Copied from unix_stream_recvmsg, but removed credit checks, * changed locking calls, changed address handling. */ -static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, - int flags) +static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct sock *sk = sock->sk; int copied = 0; @@ -511,8 +510,8 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, } /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ -static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); @@ -586,8 +585,8 @@ err: * Changed removed permission handling and added waiting for flow on * and other minor adaptations. */ -static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int caif_stream_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); diff --git a/net/can/bcm.c b/net/can/bcm.c index ee9ffd9..b523453 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -328,7 +328,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, * containing the interface index. */ - BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); + sock_skb_cb_check_size(sizeof(struct sockaddr_can)); addr = (struct sockaddr_can *)skb->cb; memset(addr, 0, sizeof(*addr)); addr->can_family = AF_CAN; @@ -1231,8 +1231,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) /* * bcm_sendmsg - process BCM commands (opcodes) from the userspace */ -static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size) +static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); @@ -1535,8 +1534,8 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, return 0; } -static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/net/can/raw.c b/net/can/raw.c index 00c13ef..63ffdb0 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -95,8 +95,8 @@ struct raw_sock { */ static inline unsigned int *raw_flags(struct sk_buff *skb) { - BUILD_BUG_ON(sizeof(skb->cb) <= (sizeof(struct sockaddr_can) + - sizeof(unsigned int))); + sock_skb_cb_check_size(sizeof(struct sockaddr_can) + + sizeof(unsigned int)); /* return pointer after struct sockaddr_can */ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); @@ -135,7 +135,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data) * containing the interface index. */ - BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); + sock_skb_cb_check_size(sizeof(struct sockaddr_can)); addr = (struct sockaddr_can *)skb->cb; memset(addr, 0, sizeof(*addr)); addr->can_family = AF_CAN; @@ -658,8 +658,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname, return 0; } -static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size) +static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); @@ -728,8 +727,8 @@ send_failed: return err; } -static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; diff --git a/net/compat.c b/net/compat.c index 94d3d5e..4784431 100644 --- a/net/compat.c +++ b/net/compat.c @@ -508,25 +508,25 @@ COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, struct compat_group_req { __u32 gr_interface; struct __kernel_sockaddr_storage gr_group - __attribute__ ((aligned(4))); + __aligned(4); } __packed; struct compat_group_source_req { __u32 gsr_interface; struct __kernel_sockaddr_storage gsr_group - __attribute__ ((aligned(4))); + __aligned(4); struct __kernel_sockaddr_storage gsr_source - __attribute__ ((aligned(4))); + __aligned(4); } __packed; struct compat_group_filter { __u32 gf_interface; struct __kernel_sockaddr_storage gf_group - __attribute__ ((aligned(4))); + __aligned(4); __u32 gf_fmode; __u32 gf_numsrc; struct __kernel_sockaddr_storage gf_slist[1] - __attribute__ ((aligned(4))); + __aligned(4); } __packed; #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ diff --git a/net/core/ethtool.c b/net/core/ethtool.c index aa378ec..1d00b89 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -790,7 +790,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, if (ops->get_rxfh_indir_size) dev_indir_size = ops->get_rxfh_indir_size(dev); if (ops->get_rxfh_key_size) - dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev); + dev_key_size = ops->get_rxfh_key_size(dev); if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) return -EFAULT; diff --git a/net/core/filter.c b/net/core/filter.c index f6bdc2b..7a4eb70 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -814,7 +814,7 @@ static void bpf_release_orig_filter(struct bpf_prog *fp) static void __bpf_prog_release(struct bpf_prog *prog) { - if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { + if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { bpf_prog_put(prog); } else { bpf_release_orig_filter(prog); @@ -1019,6 +1019,32 @@ void bpf_prog_destroy(struct bpf_prog *fp) } EXPORT_SYMBOL_GPL(bpf_prog_destroy); +static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) +{ + struct sk_filter *fp, *old_fp; + + fp = kmalloc(sizeof(*fp), GFP_KERNEL); + if (!fp) + return -ENOMEM; + + fp->prog = prog; + atomic_set(&fp->refcnt, 0); + + if (!sk_filter_charge(sk, fp)) { + kfree(fp); + return -ENOMEM; + } + + old_fp = rcu_dereference_protected(sk->sk_filter, + sock_owned_by_user(sk)); + rcu_assign_pointer(sk->sk_filter, fp); + + if (old_fp) + sk_filter_uncharge(sk, old_fp); + + return 0; +} + /** * sk_attach_filter - attach a socket filter * @fprog: the filter program @@ -1031,7 +1057,6 @@ EXPORT_SYMBOL_GPL(bpf_prog_destroy); */ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { - struct sk_filter *fp, *old_fp; unsigned int fsize = bpf_classic_proglen(fprog); unsigned int bpf_fsize = bpf_prog_size(fprog->len); struct bpf_prog *prog; @@ -1068,36 +1093,20 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (IS_ERR(prog)) return PTR_ERR(prog); - fp = kmalloc(sizeof(*fp), GFP_KERNEL); - if (!fp) { + err = __sk_attach_prog(prog, sk); + if (err < 0) { __bpf_prog_release(prog); - return -ENOMEM; - } - fp->prog = prog; - - atomic_set(&fp->refcnt, 0); - - if (!sk_filter_charge(sk, fp)) { - __sk_filter_release(fp); - return -ENOMEM; + return err; } - old_fp = rcu_dereference_protected(sk->sk_filter, - sock_owned_by_user(sk)); - rcu_assign_pointer(sk->sk_filter, fp); - - if (old_fp) - sk_filter_uncharge(sk, old_fp); - return 0; } EXPORT_SYMBOL_GPL(sk_attach_filter); -#ifdef CONFIG_BPF_SYSCALL int sk_attach_bpf(u32 ufd, struct sock *sk) { - struct sk_filter *fp, *old_fp; struct bpf_prog *prog; + int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; @@ -1106,40 +1115,22 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) if (IS_ERR(prog)) return PTR_ERR(prog); - if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { - /* valid fd, but invalid program type */ + if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) { bpf_prog_put(prog); return -EINVAL; } - fp = kmalloc(sizeof(*fp), GFP_KERNEL); - if (!fp) { + err = __sk_attach_prog(prog, sk); + if (err < 0) { bpf_prog_put(prog); - return -ENOMEM; - } - fp->prog = prog; - - atomic_set(&fp->refcnt, 0); - - if (!sk_filter_charge(sk, fp)) { - __sk_filter_release(fp); - return -ENOMEM; + return err; } - old_fp = rcu_dereference_protected(sk->sk_filter, - sock_owned_by_user(sk)); - rcu_assign_pointer(sk->sk_filter, fp); - - if (old_fp) - sk_filter_uncharge(sk, old_fp); - return 0; } -/* allow socket filters to call - * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem() - */ -static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id) +static const struct bpf_func_proto * +sk_filter_func_proto(enum bpf_func_id func_id) { switch (func_id) { case BPF_FUNC_map_lookup_elem: @@ -1153,34 +1144,37 @@ static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func } } -static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type) +static bool sk_filter_is_valid_access(int off, int size, + enum bpf_access_type type) { /* skb fields cannot be accessed yet */ return false; } -static struct bpf_verifier_ops sock_filter_ops = { - .get_func_proto = sock_filter_func_proto, - .is_valid_access = sock_filter_is_valid_access, +static const struct bpf_verifier_ops sk_filter_ops = { + .get_func_proto = sk_filter_func_proto, + .is_valid_access = sk_filter_is_valid_access, }; -static struct bpf_prog_type_list tl = { - .ops = &sock_filter_ops, +static struct bpf_prog_type_list sk_filter_type __read_mostly = { + .ops = &sk_filter_ops, .type = BPF_PROG_TYPE_SOCKET_FILTER, }; -static int __init register_sock_filter_ops(void) +static struct bpf_prog_type_list sched_cls_type __read_mostly = { + .ops = &sk_filter_ops, + .type = BPF_PROG_TYPE_SCHED_CLS, +}; + +static int __init register_sk_filter_ops(void) { - bpf_register_prog_type(&tl); + bpf_register_prog_type(&sk_filter_type); + bpf_register_prog_type(&sched_cls_type); + return 0; } -late_initcall(register_sock_filter_ops); -#else -int sk_attach_bpf(u32 ufd, struct sock *sk) -{ - return -EOPNOTSUPP; -} -#endif +late_initcall(register_sk_filter_ops); + int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 70fe9e1..ad07990 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -397,25 +397,15 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { struct neighbour *n; - int key_len = tbl->key_len; - u32 hash_val; - struct neigh_hash_table *nht; NEIGH_CACHE_STAT_INC(tbl, lookups); rcu_read_lock_bh(); - nht = rcu_dereference_bh(tbl->nht); - hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); - - for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference_bh(n->next)) { - if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { - if (!atomic_inc_not_zero(&n->refcnt)) - n = NULL; - NEIGH_CACHE_STAT_INC(tbl, hits); - break; - } + n = __neigh_lookup_noref(tbl, pkey, dev); + if (n) { + if (!atomic_inc_not_zero(&n->refcnt)) + n = NULL; + NEIGH_CACHE_STAT_INC(tbl, hits); } rcu_read_unlock_bh(); @@ -1263,10 +1253,10 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl, EXPORT_SYMBOL(neigh_event_ns); /* called with read_lock_bh(&n->lock); */ -static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst) +static void neigh_hh_init(struct neighbour *n) { - struct net_device *dev = dst->dev; - __be16 prot = dst->ops->protocol; + struct net_device *dev = n->dev; + __be16 prot = n->tbl->protocol; struct hh_cache *hh = &n->hh; write_lock_bh(&n->lock); @@ -1280,43 +1270,19 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst) write_unlock_bh(&n->lock); } -/* This function can be used in contexts, where only old dev_queue_xmit - * worked, f.e. if you want to override normal output path (eql, shaper), - * but resolution is not made yet. - */ - -int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - - __skb_pull(skb, skb_network_offset(skb)); - - if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, - skb->len) < 0 && - dev_rebuild_header(skb)) - return 0; - - return dev_queue_xmit(skb); -} -EXPORT_SYMBOL(neigh_compat_output); - /* Slow and careful. */ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); int rc = 0; - if (!dst) - goto discard; - if (!neigh_event_send(neigh, skb)) { int err; struct net_device *dev = neigh->dev; unsigned int seq; if (dev->header_ops->cache && !neigh->hh.hh_len) - neigh_hh_init(neigh, dst); + neigh_hh_init(neigh); do { __skb_pull(skb, skb_network_offset(skb)); @@ -1332,8 +1298,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) } out: return rc; -discard: - neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh); out_kfree_skb: rc = -EINVAL; kfree_skb(skb); @@ -2427,6 +2391,40 @@ void __neigh_for_each_release(struct neigh_table *tbl, } EXPORT_SYMBOL(__neigh_for_each_release); +int neigh_xmit(int index, struct net_device *dev, + const void *addr, struct sk_buff *skb) +{ + int err = -EAFNOSUPPORT; + if (likely(index < NEIGH_NR_TABLES)) { + struct neigh_table *tbl; + struct neighbour *neigh; + + tbl = neigh_tables[index]; + if (!tbl) + goto out; + neigh = __neigh_lookup_noref(tbl, addr, dev); + if (!neigh) + neigh = __neigh_create(tbl, addr, dev, false); + err = PTR_ERR(neigh); + if (IS_ERR(neigh)) + goto out_kfree_skb; + err = neigh->output(neigh, skb); + } + else if (index == NEIGH_LINK_TABLE) { + err = dev_hard_header(skb, dev, ntohs(skb->protocol), + addr, NULL, skb->len); + if (err < 0) + goto out_kfree_skb; + err = dev_queue_xmit(skb); + } +out: + return err; +out_kfree_skb: + kfree_skb(skb); + goto out; +} +EXPORT_SYMBOL(neigh_xmit); + #ifdef CONFIG_PROC_FS static struct neighbour *neigh_get_first(struct seq_file *seq) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f805078..47c3241 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2865,7 +2865,6 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) * @from: search offset * @to: search limit * @config: textsearch configuration - * @state: uninitialized textsearch state variable * * Finds a pattern in the skb data according to the specified * textsearch configuration. Use textsearch_next() to retrieve @@ -2873,17 +2872,17 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) * to the first occurrence or UINT_MAX if no match was found. */ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, - unsigned int to, struct ts_config *config, - struct ts_state *state) + unsigned int to, struct ts_config *config) { + struct ts_state state; unsigned int ret; config->get_next_block = skb_ts_get_next_block; config->finish = skb_ts_finish; - skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); + skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); - ret = textsearch_find(config, state); + ret = textsearch_find(config, &state); return (ret <= to - from ? ret : UINT_MAX); } EXPORT_SYMBOL(skb_find_text); @@ -3207,10 +3206,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); - struct sk_buff *nskb, *lp, *p = *head; unsigned int len = skb_gro_len(skb); + struct sk_buff *lp, *p = *head; unsigned int delta_truesize; - unsigned int headroom; if (unlikely(p->len + len >= 65536)) return -E2BIG; @@ -3277,48 +3275,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } - /* switch back to head shinfo */ - pinfo = skb_shinfo(p); - - if (pinfo->frag_list) - goto merge; - if (skb_gro_len(p) != pinfo->gso_size) - return -E2BIG; - - headroom = skb_headroom(p); - nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); - if (unlikely(!nskb)) - return -ENOMEM; - - __copy_skb_header(nskb, p); - nskb->mac_len = p->mac_len; - - skb_reserve(nskb, headroom); - __skb_put(nskb, skb_gro_offset(p)); - - skb_set_mac_header(nskb, skb_mac_header(p) - p->data); - skb_set_network_header(nskb, skb_network_offset(p)); - skb_set_transport_header(nskb, skb_transport_offset(p)); - - __skb_pull(p, skb_gro_offset(p)); - memcpy(skb_mac_header(nskb), skb_mac_header(p), - p->data - skb_mac_header(p)); - - skb_shinfo(nskb)->frag_list = p; - skb_shinfo(nskb)->gso_size = pinfo->gso_size; - pinfo->gso_size = 0; - __skb_header_release(p); - NAPI_GRO_CB(nskb)->last = p; - - nskb->data_len += p->len; - nskb->truesize += p->truesize; - nskb->len += p->len; - - *head = nskb; - nskb->next = p->next; - p->next = NULL; - - p = nskb; merge: delta_truesize = skb->truesize; diff --git a/net/core/sock.c b/net/core/sock.c index 93c8b20..726e1f9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) skb_dst_force(skb); spin_lock_irqsave(&list->lock, flags); - skb->dropcount = atomic_read(&sk->sk_drops); + sock_skb_set_dropcount(sk, skb); __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); @@ -2163,15 +2163,14 @@ int sock_no_getsockopt(struct socket *sock, int level, int optname, } EXPORT_SYMBOL(sock_no_getsockopt); -int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, - size_t len) +int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_sendmsg); -int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, - size_t len, int flags) +int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, + int flags) { return -EOPNOTSUPP; } @@ -2543,14 +2542,14 @@ int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, EXPORT_SYMBOL(compat_sock_common_getsockopt); #endif -int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; int addr_len = 0; int err; - err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, + err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 93ea801..5b21f6f 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -177,6 +177,8 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, + [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)}, + [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)}, }; static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { @@ -1030,7 +1032,7 @@ nla_put_failure: return err; } -/* Handle IEEE 802.1Qaz GET commands. */ +/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *ieee, *app; @@ -1067,6 +1069,32 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) } } + if (ops->ieee_getqcn) { + struct ieee_qcn qcn; + + memset(&qcn, 0, sizeof(qcn)); + err = ops->ieee_getqcn(netdev, &qcn); + if (!err) { + err = nla_put(skb, DCB_ATTR_IEEE_QCN, + sizeof(qcn), &qcn); + if (err) + return -EMSGSIZE; + } + } + + if (ops->ieee_getqcnstats) { + struct ieee_qcn_stats qcn_stats; + + memset(&qcn_stats, 0, sizeof(qcn_stats)); + err = ops->ieee_getqcnstats(netdev, &qcn_stats); + if (!err) { + err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS, + sizeof(qcn_stats), &qcn_stats); + if (err) + return -EMSGSIZE; + } + } + if (ops->ieee_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); @@ -1379,8 +1407,9 @@ int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, } EXPORT_SYMBOL(dcbnl_cee_notify); -/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not - * be completed the entire msg is aborted and error value is returned. +/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands. + * If any requested operation can not be completed + * the entire msg is aborted and error value is returned. * No attempt is made to reconcile the case where only part of the * cmd can be completed. */ @@ -1417,6 +1446,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, goto err; } + if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) { + struct ieee_qcn *qcn = + nla_data(ieee[DCB_ATTR_IEEE_QCN]); + + err = ops->ieee_setqcn(netdev, qcn); + if (err) + goto err; + } + if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index e4c144f..3b1d64d 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -310,11 +310,9 @@ int compat_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); #endif int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); -int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t size); -int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int nonblock, int flags, - int *addr_len); +int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); +int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + int flags, int *addr_len); void dccp_shutdown(struct sock *sk, int how); int inet_dccp_listen(struct socket *sock, int backlog); unsigned int dccp_poll(struct file *file, struct socket *sock, diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 595ddf0..d8346d0 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -72,8 +72,7 @@ static void printl(const char *fmt, ...) wake_up(&dccpw.wait); } -static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t size) +static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { const struct inet_sock *inet = inet_sk(sk); struct ccid3_hc_tx_sock *hc = NULL; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index e171b78..52a9401 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -741,8 +741,7 @@ static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) return 0; } -int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len) +int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { const struct dccp_sock *dp = dccp_sk(sk); const int flags = msg->msg_flags; @@ -806,8 +805,8 @@ out_discard: EXPORT_SYMBOL_GPL(dccp_sendmsg); -int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, int *addr_len) +int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + int flags, int *addr_len) { const struct dccp_hdr *dh; long timeo; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 8102286..754484b 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1669,8 +1669,8 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int } -static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); @@ -1905,8 +1905,7 @@ static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, return skb; } -static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size) +static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 7ca7c31..be1f08c 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -49,41 +49,17 @@ #include <net/dn_route.h> static int dn_neigh_construct(struct neighbour *); -static void dn_long_error_report(struct neighbour *, struct sk_buff *); -static void dn_short_error_report(struct neighbour *, struct sk_buff *); -static int dn_long_output(struct neighbour *, struct sk_buff *); -static int dn_short_output(struct neighbour *, struct sk_buff *); -static int dn_phase3_output(struct neighbour *, struct sk_buff *); - - -/* - * For talking to broadcast devices: Ethernet & PPP - */ -static const struct neigh_ops dn_long_ops = { - .family = AF_DECnet, - .error_report = dn_long_error_report, - .output = dn_long_output, - .connected_output = dn_long_output, -}; +static void dn_neigh_error_report(struct neighbour *, struct sk_buff *); +static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb); /* - * For talking to pointopoint and multidrop devices: DDCMP and X.25 + * Operations for adding the link layer header. */ -static const struct neigh_ops dn_short_ops = { +static const struct neigh_ops dn_neigh_ops = { .family = AF_DECnet, - .error_report = dn_short_error_report, - .output = dn_short_output, - .connected_output = dn_short_output, -}; - -/* - * For talking to DECnet phase III nodes - */ -static const struct neigh_ops dn_phase3_ops = { - .family = AF_DECnet, - .error_report = dn_short_error_report, /* Can use short version here */ - .output = dn_phase3_output, - .connected_output = dn_phase3_output, + .error_report = dn_neigh_error_report, + .output = dn_neigh_output, + .connected_output = dn_neigh_output, }; static u32 dn_neigh_hash(const void *pkey, @@ -93,11 +69,18 @@ static u32 dn_neigh_hash(const void *pkey, return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]); } +static bool dn_key_eq(const struct neighbour *neigh, const void *pkey) +{ + return neigh_key_eq16(neigh, pkey); +} + struct neigh_table dn_neigh_table = { .family = PF_DECnet, .entry_size = NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)), .key_len = sizeof(__le16), + .protocol = cpu_to_be16(ETH_P_DNA_RT), .hash = dn_neigh_hash, + .key_eq = dn_key_eq, .constructor = dn_neigh_construct, .id = "dn_neigh_cache", .parms ={ @@ -146,16 +129,9 @@ static int dn_neigh_construct(struct neighbour *neigh) __neigh_parms_put(neigh->parms); neigh->parms = neigh_parms_clone(parms); - - if (dn_db->use_long) - neigh->ops = &dn_long_ops; - else - neigh->ops = &dn_short_ops; rcu_read_unlock(); - if (dn->flags & DN_NDFLAG_P3) - neigh->ops = &dn_phase3_ops; - + neigh->ops = &dn_neigh_ops; neigh->nud_state = NUD_NOARP; neigh->output = neigh->ops->connected_output; @@ -187,24 +163,16 @@ static int dn_neigh_construct(struct neighbour *neigh) return 0; } -static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb) +static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb) { - printk(KERN_DEBUG "dn_long_error_report: called\n"); + printk(KERN_DEBUG "dn_neigh_error_report: called\n"); kfree_skb(skb); } - -static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb) -{ - printk(KERN_DEBUG "dn_short_error_report: called\n"); - kfree_skb(skb); -} - -static int dn_neigh_output_packet(struct sk_buff *skb) +static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct dn_route *rt = (struct dn_route *)dst; - struct neighbour *neigh = rt->n; struct net_device *dev = neigh->dev; char mac_addr[ETH_ALEN]; unsigned int seq; @@ -226,6 +194,18 @@ static int dn_neigh_output_packet(struct sk_buff *skb) return err; } +static int dn_neigh_output_packet(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct dn_route *rt = (struct dn_route *)dst; + struct neighbour *neigh = rt->n; + + return neigh->output(neigh, skb); +} + +/* + * For talking to broadcast devices: Ethernet & PPP + */ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) { struct net_device *dev = neigh->dev; @@ -269,6 +249,9 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) neigh->dev, dn_neigh_output_packet); } +/* + * For talking to pointopoint and multidrop devices: DDCMP and X.25 + */ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) { struct net_device *dev = neigh->dev; @@ -306,7 +289,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) } /* - * Phase 3 output is the same is short output, execpt that + * For talking to DECnet phase III nodes + * Phase 3 output is the same as short output, execpt that * it clears the area bits before transmission. */ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) @@ -344,6 +328,32 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) neigh->dev, dn_neigh_output_packet); } +int dn_to_neigh_output(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct dn_route *rt = (struct dn_route *) dst; + struct neighbour *neigh = rt->n; + struct dn_neigh *dn = (struct dn_neigh *)neigh; + struct dn_dev *dn_db; + bool use_long; + + rcu_read_lock(); + dn_db = rcu_dereference(neigh->dev->dn_ptr); + if (dn_db == NULL) { + rcu_read_unlock(); + return -EINVAL; + } + use_long = dn_db->use_long; + rcu_read_unlock(); + + if (dn->flags & DN_NDFLAG_P3) + return dn_phase3_output(neigh, skb); + if (use_long) + return dn_long_output(neigh, skb); + else + return dn_short_output(neigh, skb); +} + /* * Unfortunately, the neighbour code uses the device in its hash * function, so we don't get any advantage from it. This function diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 3b81092..9ab0c4b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -136,7 +136,6 @@ int decnet_dst_gc_interval = 2; static struct dst_ops dn_dst_ops = { .family = PF_DECnet, - .protocol = cpu_to_be16(ETH_P_DNA_RT), .gc_thresh = 128, .gc = dn_dst_gc, .check = dn_dst_check, @@ -743,15 +742,6 @@ out: return NET_RX_DROP; } -static int dn_to_neigh_output(struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - struct dn_route *rt = (struct dn_route *) dst; - struct neighbour *n = rt->n; - - return n->output(n, skb); -} - static int dn_output(struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 5f8ac40..b45206e 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -8,6 +8,7 @@ config NET_DSA tristate depends on HAVE_NET_DSA select PHYLIB + select NET_SWITCHDEV if NET_DSA diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 2173402..b40f11b 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -175,43 +175,14 @@ __ATTRIBUTE_GROUPS(dsa_hwmon); #endif /* CONFIG_NET_DSA_HWMON */ /* basic switch operations **************************************************/ -static struct dsa_switch * -dsa_switch_setup(struct dsa_switch_tree *dst, int index, - struct device *parent, struct device *host_dev) +static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) { - struct dsa_chip_data *pd = dst->pd->chip + index; - struct dsa_switch_driver *drv; - struct dsa_switch *ds; - int ret; - char *name; - int i; + struct dsa_switch_driver *drv = ds->drv; + struct dsa_switch_tree *dst = ds->dst; + struct dsa_chip_data *pd = ds->pd; bool valid_name_found = false; - - /* - * Probe for switch model. - */ - drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); - if (drv == NULL) { - netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", - index); - return ERR_PTR(-EINVAL); - } - netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n", - index, name); - - - /* - * Allocate and initialise switch state. - */ - ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); - if (ds == NULL) - return ERR_PTR(-ENOMEM); - - ds->dst = dst; - ds->index = index; - ds->pd = dst->pd->chip + index; - ds->drv = drv; - ds->master_dev = host_dev; + int index = ds->index; + int i, ret; /* * Validate supplied switch configuration. @@ -256,7 +227,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, * switch. */ if (dst->cpu_switch == index) { - switch (drv->tag_protocol) { + switch (ds->tag_protocol) { #ifdef CONFIG_NET_DSA_TAG_DSA case DSA_TAG_PROTO_DSA: dst->rcv = dsa_netdev_ops.rcv; @@ -284,7 +255,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, goto out; } - dst->tag_protocol = drv->tag_protocol; + dst->tag_protocol = ds->tag_protocol; } /* @@ -314,19 +285,15 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, * Create network devices for physical switch ports. */ for (i = 0; i < DSA_MAX_PORTS; i++) { - struct net_device *slave_dev; - if (!(ds->phys_port_mask & (1 << i))) continue; - slave_dev = dsa_slave_create(ds, parent, i, pd->port_names[i]); - if (slave_dev == NULL) { + ret = dsa_slave_create(ds, parent, i, pd->port_names[i]); + if (ret < 0) { netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n", index, i, pd->port_names[i]); - continue; + ret = 0; } - - ds->ports[i] = slave_dev; } #ifdef CONFIG_NET_DSA_HWMON @@ -354,13 +321,57 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, } #endif /* CONFIG_NET_DSA_HWMON */ - return ds; + return ret; out_free: mdiobus_free(ds->slave_mii_bus); out: kfree(ds); - return ERR_PTR(ret); + return ret; +} + +static struct dsa_switch * +dsa_switch_setup(struct dsa_switch_tree *dst, int index, + struct device *parent, struct device *host_dev) +{ + struct dsa_chip_data *pd = dst->pd->chip + index; + struct dsa_switch_driver *drv; + struct dsa_switch *ds; + int ret; + char *name; + + /* + * Probe for switch model. + */ + drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); + if (drv == NULL) { + netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", + index); + return ERR_PTR(-EINVAL); + } + netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n", + index, name); + + + /* + * Allocate and initialise switch state. + */ + ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); + if (ds == NULL) + return NULL; + + ds->dst = dst; + ds->index = index; + ds->pd = pd; + ds->drv = drv; + ds->tag_protocol = drv->tag_protocol; + ds->master_dev = host_dev; + + ret = dsa_switch_setup_one(ds, parent); + if (ret) + return NULL; + + return ds; } static void dsa_switch_destroy(struct dsa_switch *ds) @@ -378,7 +389,7 @@ static int dsa_switch_suspend(struct dsa_switch *ds) /* Suspend slave network devices */ for (i = 0; i < DSA_MAX_PORTS; i++) { - if (!(ds->phys_port_mask & (1 << i))) + if (!dsa_is_port_initialized(ds, i)) continue; ret = dsa_slave_suspend(ds->ports[i]); @@ -404,7 +415,7 @@ static int dsa_switch_resume(struct dsa_switch *ds) /* Resume slave network devices */ for (i = 0; i < DSA_MAX_PORTS; i++) { - if (!(ds->phys_port_mask & (1 << i))) + if (!dsa_is_port_initialized(ds, i)) continue; ret = dsa_slave_resume(ds->ports[i]); @@ -567,9 +578,9 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd) kfree(pd->chip); } -static int dsa_of_probe(struct platform_device *pdev) +static int dsa_of_probe(struct device *dev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *np = dev->of_node; struct device_node *child, *mdio, *ethernet, *port, *link; struct mii_bus *mdio_bus; struct platform_device *ethernet_dev; @@ -587,7 +598,7 @@ static int dsa_of_probe(struct platform_device *pdev) mdio_bus = of_mdio_find_bus(mdio); if (!mdio_bus) - return -EINVAL; + return -EPROBE_DEFER; ethernet = of_parse_phandle(np, "dsa,ethernet", 0); if (!ethernet) @@ -595,13 +606,13 @@ static int dsa_of_probe(struct platform_device *pdev) ethernet_dev = of_find_device_by_node(ethernet); if (!ethernet_dev) - return -ENODEV; + return -EPROBE_DEFER; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; - pdev->dev.platform_data = pd; + dev->platform_data = pd; pd->netdev = ðernet_dev->dev; pd->nr_chips = of_get_available_child_count(np); if (pd->nr_chips > DSA_MAX_SWITCHES) @@ -674,43 +685,86 @@ out_free_chip: dsa_of_free_platform_data(pd); out_free: kfree(pd); - pdev->dev.platform_data = NULL; + dev->platform_data = NULL; return ret; } -static void dsa_of_remove(struct platform_device *pdev) +static void dsa_of_remove(struct device *dev) { - struct dsa_platform_data *pd = pdev->dev.platform_data; + struct dsa_platform_data *pd = dev->platform_data; - if (!pdev->dev.of_node) + if (!dev->of_node) return; dsa_of_free_platform_data(pd); kfree(pd); } #else -static inline int dsa_of_probe(struct platform_device *pdev) +static inline int dsa_of_probe(struct device *dev) { return 0; } -static inline void dsa_of_remove(struct platform_device *pdev) +static inline void dsa_of_remove(struct device *dev) { } #endif +static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, + struct device *parent, struct dsa_platform_data *pd) +{ + int i; + + dst->pd = pd; + dst->master_netdev = dev; + dst->cpu_switch = -1; + dst->cpu_port = -1; + + for (i = 0; i < pd->nr_chips; i++) { + struct dsa_switch *ds; + + ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev); + if (IS_ERR(ds)) { + netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n", + i, PTR_ERR(ds)); + continue; + } + + dst->ds[i] = ds; + if (ds->drv->poll_link != NULL) + dst->link_poll_needed = 1; + } + + /* + * If we use a tagging format that doesn't have an ethertype + * field, make sure that all packets from this point on get + * sent to the tag format's receive function. + */ + wmb(); + dev->dsa_ptr = (void *)dst; + + if (dst->link_poll_needed) { + INIT_WORK(&dst->link_poll_work, dsa_link_poll_work); + init_timer(&dst->link_poll_timer); + dst->link_poll_timer.data = (unsigned long)dst; + dst->link_poll_timer.function = dsa_link_poll_timer; + dst->link_poll_timer.expires = round_jiffies(jiffies + HZ); + add_timer(&dst->link_poll_timer); + } +} + static int dsa_probe(struct platform_device *pdev) { struct dsa_platform_data *pd = pdev->dev.platform_data; struct net_device *dev; struct dsa_switch_tree *dst; - int i, ret; + int ret; pr_notice_once("Distributed Switch Architecture driver version %s\n", dsa_driver_version); if (pdev->dev.of_node) { - ret = dsa_of_probe(pdev); + ret = dsa_of_probe(&pdev->dev); if (ret) return ret; @@ -722,7 +776,7 @@ static int dsa_probe(struct platform_device *pdev) dev = dev_to_net_device(pd->netdev); if (dev == NULL) { - ret = -EINVAL; + ret = -EPROBE_DEFER; goto out; } @@ -741,54 +795,18 @@ static int dsa_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dst); - dst->pd = pd; - dst->master_netdev = dev; - dst->cpu_switch = -1; - dst->cpu_port = -1; - - for (i = 0; i < pd->nr_chips; i++) { - struct dsa_switch *ds; - - ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev); - if (IS_ERR(ds)) { - netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n", - i, PTR_ERR(ds)); - continue; - } - - dst->ds[i] = ds; - if (ds->drv->poll_link != NULL) - dst->link_poll_needed = 1; - } - - /* - * If we use a tagging format that doesn't have an ethertype - * field, make sure that all packets from this point on get - * sent to the tag format's receive function. - */ - wmb(); - dev->dsa_ptr = (void *)dst; - - if (dst->link_poll_needed) { - INIT_WORK(&dst->link_poll_work, dsa_link_poll_work); - init_timer(&dst->link_poll_timer); - dst->link_poll_timer.data = (unsigned long)dst; - dst->link_poll_timer.function = dsa_link_poll_timer; - dst->link_poll_timer.expires = round_jiffies(jiffies + HZ); - add_timer(&dst->link_poll_timer); - } + dsa_setup_dst(dst, dev, &pdev->dev, pd); return 0; out: - dsa_of_remove(pdev); + dsa_of_remove(&pdev->dev); return ret; } -static int dsa_remove(struct platform_device *pdev) +static void dsa_remove_dst(struct dsa_switch_tree *dst) { - struct dsa_switch_tree *dst = platform_get_drvdata(pdev); int i; if (dst->link_poll_needed) @@ -802,8 +820,14 @@ static int dsa_remove(struct platform_device *pdev) if (ds != NULL) dsa_switch_destroy(ds); } +} + +static int dsa_remove(struct platform_device *pdev) +{ + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); - dsa_of_remove(pdev); + dsa_remove_dst(dst); + dsa_of_remove(&pdev->dev); return 0; } @@ -830,6 +854,10 @@ static struct packet_type dsa_pack_type __read_mostly = { .func = dsa_switch_rcv, }; +static struct notifier_block dsa_netdevice_nb __read_mostly = { + .notifier_call = dsa_slave_netdevice_event, +}; + #ifdef CONFIG_PM_SLEEP static int dsa_suspend(struct device *d) { @@ -888,6 +916,8 @@ static int __init dsa_init_module(void) { int rc; + register_netdevice_notifier(&dsa_netdevice_nb); + rc = platform_driver_register(&dsa_driver); if (rc) return rc; @@ -900,6 +930,7 @@ module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { + unregister_netdevice_notifier(&dsa_netdevice_nb); dev_remove_pack(&dsa_pack_type); platform_driver_unregister(&dsa_driver); } diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index dc9756d..d5f1f9b8 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -45,6 +45,8 @@ struct dsa_slave_priv { int old_link; int old_pause; int old_duplex; + + struct net_device *bridge_dev; }; /* dsa.c */ @@ -53,11 +55,12 @@ extern char dsa_driver_version[]; /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); -struct net_device *dsa_slave_create(struct dsa_switch *ds, - struct device *parent, - int port, char *name); +int dsa_slave_create(struct dsa_switch *ds, struct device *parent, + int port, char *name); int dsa_slave_suspend(struct net_device *slave_dev); int dsa_slave_resume(struct net_device *slave_dev); +int dsa_slave_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr); /* tag_dsa.c */ extern const struct dsa_device_ops dsa_netdev_ops; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index f23dead..a47305c 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -10,10 +10,13 @@ #include <linux/list.h> #include <linux/etherdevice.h> +#include <linux/netdevice.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/of_net.h> #include <linux/of_mdio.h> +#include <net/rtnetlink.h> +#include <linux/if_bridge.h> #include "dsa_priv.h" /* slave mii_bus handling ***************************************************/ @@ -60,11 +63,18 @@ static int dsa_slave_init(struct net_device *dev) return 0; } +static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p) +{ + return !!p->bridge_dev; +} + static int dsa_slave_open(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct net_device *master = p->parent->dst->master_netdev; struct dsa_switch *ds = p->parent; + u8 stp_state = dsa_port_is_bridged(p) ? + BR_STATE_BLOCKING : BR_STATE_FORWARDING; int err; if (!(master->flags & IFF_UP)) @@ -93,6 +103,9 @@ static int dsa_slave_open(struct net_device *dev) goto clear_promisc; } + if (ds->drv->port_stp_update) + ds->drv->port_stp_update(ds, p->port, stp_state); + if (p->phy) phy_start(p->phy); @@ -133,6 +146,9 @@ static int dsa_slave_close(struct net_device *dev) if (ds->drv->port_disable) ds->drv->port_disable(ds, p->port, p->phy); + if (ds->drv->port_stp_update) + ds->drv->port_stp_update(ds, p->port, BR_STATE_DISABLED); + return 0; } @@ -194,6 +210,92 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } +/* Return a bitmask of all ports being currently bridged within a given bridge + * device. Note that on leave, the mask will still return the bitmask of ports + * currently bridged, prior to port removal, and this is exactly what we want. + */ +static u32 dsa_slave_br_port_mask(struct dsa_switch *ds, + struct net_device *bridge) +{ + struct dsa_slave_priv *p; + unsigned int port; + u32 mask = 0; + + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!dsa_is_port_initialized(ds, port)) + continue; + + p = netdev_priv(ds->ports[port]); + + if (ds->ports[port]->priv_flags & IFF_BRIDGE_PORT && + p->bridge_dev == bridge) + mask |= 1 << port; + } + + return mask; +} + +static int dsa_slave_stp_update(struct net_device *dev, u8 state) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + if (ds->drv->port_stp_update) + ret = ds->drv->port_stp_update(ds, p->port, state); + + return ret; +} + +static int dsa_slave_bridge_port_join(struct net_device *dev, + struct net_device *br) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + p->bridge_dev = br; + + if (ds->drv->port_join_bridge) + ret = ds->drv->port_join_bridge(ds, p->port, + dsa_slave_br_port_mask(ds, br)); + + return ret; +} + +static int dsa_slave_bridge_port_leave(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + + if (ds->drv->port_leave_bridge) + ret = ds->drv->port_leave_bridge(ds, p->port, + dsa_slave_br_port_mask(ds, p->bridge_dev)); + + p->bridge_dev = NULL; + + /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer, + * so allow it to be in BR_STATE_FORWARDING to be kept functional + */ + dsa_slave_stp_update(dev, BR_STATE_FORWARDING); + + return ret; +} + +static int dsa_slave_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + psid->id_len = sizeof(ds->index); + memcpy(&psid->id, &ds->index, psid->id_len); + + return 0; +} + static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); @@ -470,6 +572,8 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_set_rx_mode = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, + .ndo_switch_parent_id_get = dsa_slave_parent_id_get, + .ndo_switch_port_stp_update = dsa_slave_stp_update, }; static void dsa_slave_adjust_link(struct net_device *dev) @@ -605,9 +709,8 @@ int dsa_slave_resume(struct net_device *slave_dev) return 0; } -struct net_device * -dsa_slave_create(struct dsa_switch *ds, struct device *parent, - int port, char *name) +int dsa_slave_create(struct dsa_switch *ds, struct device *parent, + int port, char *name) { struct net_device *master = ds->dst->master_netdev; struct net_device *slave_dev; @@ -617,7 +720,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name, NET_NAME_UNKNOWN, ether_setup); if (slave_dev == NULL) - return slave_dev; + return -ENOMEM; slave_dev->features = master->vlan_features; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; @@ -667,19 +770,63 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, ret = dsa_slave_phy_setup(p, slave_dev); if (ret) { free_netdev(slave_dev); - return NULL; + return ret; } + ds->ports[port] = slave_dev; ret = register_netdev(slave_dev); if (ret) { netdev_err(master, "error %d registering interface %s\n", ret, slave_dev->name); phy_disconnect(p->phy); + ds->ports[port] = NULL; free_netdev(slave_dev); - return NULL; + return ret; } netif_carrier_off(slave_dev); - return slave_dev; + return 0; +} + +static bool dsa_slave_dev_check(struct net_device *dev) +{ + return dev->netdev_ops == &dsa_slave_netdev_ops; +} + +static int dsa_slave_master_changed(struct net_device *dev) +{ + struct net_device *master = netdev_master_upper_dev_get(dev); + int err = 0; + + if (master && master->rtnl_link_ops && + !strcmp(master->rtnl_link_ops->kind, "bridge")) + err = dsa_slave_bridge_port_join(dev, master); + else + err = dsa_slave_bridge_port_leave(dev); + + return err; +} + +int dsa_slave_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + int err = 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + dev = netdev_notifier_info_to_dev(ptr); + if (!dsa_slave_dev_check(dev)) + goto out; + + err = dsa_slave_master_changed(dev); + if (err) + netdev_warn(dev, "failed to reflect master change\n"); + + break; + } + +out: + return NOTIFY_DONE; } diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 238f38d..f3bad41 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -104,7 +104,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { - memset(eth->h_dest, 0, ETH_ALEN); + eth_zero_addr(eth->h_dest); return ETH_HLEN; } @@ -113,39 +113,6 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, EXPORT_SYMBOL(eth_header); /** - * eth_rebuild_header- rebuild the Ethernet MAC header. - * @skb: socket buffer to update - * - * This is called after an ARP or IPV6 ndisc it's resolution on this - * sk_buff. We now let protocol (ARP) fill in the other fields. - * - * This routine CANNOT use cached dst->neigh! - * Really, it is used only when dst->neigh is wrong. - */ -int eth_rebuild_header(struct sk_buff *skb) -{ - struct ethhdr *eth = (struct ethhdr *)skb->data; - struct net_device *dev = skb->dev; - - switch (eth->h_proto) { -#ifdef CONFIG_INET - case htons(ETH_P_IP): - return arp_find(eth->h_dest, skb); -#endif - default: - netdev_dbg(dev, - "%s: unable to resolve type %X addresses.\n", - dev->name, ntohs(eth->h_proto)); - - memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); - break; - } - - return 0; -} -EXPORT_SYMBOL(eth_rebuild_header); - -/** * eth_get_headlen - determine the the length of header for an ethernet frame * @data: pointer to start of frame * @len: total length of frame @@ -369,7 +336,6 @@ EXPORT_SYMBOL(eth_validate_addr); const struct header_ops eth_header_ops ____cacheline_aligned = { .create = eth_header, .parse = eth_header_parse, - .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; @@ -391,7 +357,7 @@ void ether_setup(struct net_device *dev) dev->flags = IFF_BROADCAST|IFF_MULTICAST; dev->priv_flags |= IFF_TX_SKB_SHARING; - memset(dev->broadcast, 0xFF, ETH_ALEN); + eth_broadcast_addr(dev->broadcast); } EXPORT_SYMBOL(ether_setup); diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 055fbb7..dfd3c60 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -126,6 +126,7 @@ static void lowpan_setup(struct net_device *dev) dev->header_ops = &lowpan_header_ops; dev->ml_priv = &lowpan_mlme; dev->destructor = free_netdev; + dev->features |= NETIF_F_NETNS_LOCAL; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -148,10 +149,11 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, pr_debug("adding new link\n"); - if (!tb[IFLA_LINK]) + if (!tb[IFLA_LINK] || + !net_eq(dev_net(dev), &init_net)) return -EINVAL; /* find and hold real wpan device */ - real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; if (real_dev->type != ARPHRD_IEEE802154) { diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index 18bc7e7..888d099 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -225,6 +225,7 @@ static int cfg802154_netdev_notifier_call(struct notifier_block *nb, switch (state) { /* TODO NETDEV_DEVTYPE */ case NETDEV_REGISTER: + dev->features |= NETIF_F_NETNS_LOCAL; wpan_dev->identifier = ++rdev->wpan_dev_id; list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list); rdev->devlist_generation++; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 2878d8c..b60c65f 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -98,12 +98,12 @@ static int ieee802154_sock_release(struct socket *sock) return 0; } -static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int ieee802154_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; - return sk->sk_prot->sendmsg(iocb, sk, msg, len); + return sk->sk_prot->sendmsg(sk, msg, len); } static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr, @@ -255,8 +255,7 @@ static int raw_disconnect(struct sock *sk, int flags) return 0; } -static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t size) +static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; unsigned int mtu; @@ -327,8 +326,8 @@ out: return err; } -static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len) +static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; @@ -615,8 +614,7 @@ static int dgram_disconnect(struct sock *sk, int flags) return 0; } -static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t size) +static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; unsigned int mtu; @@ -715,9 +713,8 @@ out: return err; } -static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d2e49ba..64a9c0f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -716,8 +716,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, } EXPORT_SYMBOL(inet_getname); -int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size) +int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; @@ -728,7 +727,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, inet_autobind(sk)) return -EAGAIN; - return sk->sk_prot->sendmsg(iocb, sk, msg, size); + return sk->sk_prot->sendmsg(sk, msg, size); } EXPORT_SYMBOL(inet_sendmsg); @@ -750,8 +749,8 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, } EXPORT_SYMBOL(inet_sendpage); -int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int flags) +int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; int addr_len = 0; @@ -759,7 +758,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, sock_rps_record_flow(sk); - err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, + err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; @@ -1675,7 +1674,7 @@ static int __init inet_init(void) struct list_head *r; int rc = -EINVAL; - BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); + sock_skb_cb_check_size(sizeof(struct inet_skb_parm)); rc = proto_register(&tcp_prot, 1); if (rc) diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 205e147..5f5c674 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -122,6 +122,7 @@ * Interface to generic neighbour cache. */ static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); +static bool arp_key_eq(const struct neighbour *n, const void *pkey); static int arp_constructor(struct neighbour *neigh); static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb); static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); @@ -149,18 +150,12 @@ static const struct neigh_ops arp_direct_ops = { .connected_output = neigh_direct_output, }; -static const struct neigh_ops arp_broken_ops = { - .family = AF_INET, - .solicit = arp_solicit, - .error_report = arp_error_report, - .output = neigh_compat_output, - .connected_output = neigh_compat_output, -}; - struct neigh_table arp_tbl = { .family = AF_INET, .key_len = 4, + .protocol = cpu_to_be16(ETH_P_IP), .hash = arp_hash, + .key_eq = arp_key_eq, .constructor = arp_constructor, .proxy_redo = parp_redo, .id = "arp_cache", @@ -216,7 +211,12 @@ static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) { - return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd); + return arp_hashfn(pkey, dev, hash_rnd); +} + +static bool arp_key_eq(const struct neighbour *neigh, const void *pkey) +{ + return neigh_key_eq32(neigh, pkey); } static int arp_constructor(struct neighbour *neigh) @@ -260,35 +260,6 @@ static int arp_constructor(struct neighbour *neigh) in old paradigm. */ -#if 1 - /* So... these "amateur" devices are hopeless. - The only thing, that I can say now: - It is very sad that we need to keep ugly obsolete - code to make them happy. - - They should be moved to more reasonable state, now - they use rebuild_header INSTEAD OF hard_start_xmit!!! - Besides that, they are sort of out of date - (a lot of redundant clones/copies, useless in 2.1), - I wonder why people believe that they work. - */ - switch (dev->type) { - default: - break; - case ARPHRD_ROSE: -#if IS_ENABLED(CONFIG_AX25) - case ARPHRD_AX25: -#if IS_ENABLED(CONFIG_NETROM) - case ARPHRD_NETROM: -#endif - neigh->ops = &arp_broken_ops; - neigh->output = neigh->ops->output; - return 0; -#else - break; -#endif - } -#endif if (neigh->type == RTN_MULTICAST) { neigh->nud_state = NUD_NOARP; arp_mc_map(addr, neigh->ha, dev, 1); @@ -433,71 +404,6 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) return flag; } -/* OBSOLETE FUNCTIONS */ - -/* - * Find an arp mapping in the cache. If not found, post a request. - * - * It is very UGLY routine: it DOES NOT use skb->dst->neighbour, - * even if it exists. It is supposed that skb->dev was mangled - * by a virtual device (eql, shaper). Nobody but broken devices - * is allowed to use this function, it is scheduled to be removed. --ANK - */ - -static int arp_set_predefined(int addr_hint, unsigned char *haddr, - __be32 paddr, struct net_device *dev) -{ - switch (addr_hint) { - case RTN_LOCAL: - pr_debug("arp called for own IP address\n"); - memcpy(haddr, dev->dev_addr, dev->addr_len); - return 1; - case RTN_MULTICAST: - arp_mc_map(paddr, haddr, dev, 1); - return 1; - case RTN_BROADCAST: - memcpy(haddr, dev->broadcast, dev->addr_len); - return 1; - } - return 0; -} - - -int arp_find(unsigned char *haddr, struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - __be32 paddr; - struct neighbour *n; - - if (!skb_dst(skb)) { - pr_debug("arp_find is called with dst==NULL\n"); - kfree_skb(skb); - return 1; - } - - paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr); - if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, - paddr, dev)) - return 0; - - n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); - - if (n) { - n->used = jiffies; - if (n->nud_state & NUD_VALID || neigh_event_send(n, skb) == 0) { - neigh_ha_snapshot(haddr, n, dev); - neigh_release(n); - return 0; - } - neigh_release(n); - } else - kfree_skb(skb); - return 1; -} -EXPORT_SYMBOL(arp_find); - -/* END OF OBSOLETE FUNCTIONS */ - /* * Check if we can use proxy ARP for this path */ diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 3a8985c..5105759 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -548,6 +548,26 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, return NULL; } +static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) +{ + struct ip_mreqn mreq = { + .imr_multiaddr.s_addr = ifa->ifa_address, + .imr_ifindex = ifa->ifa_dev->dev->ifindex, + }; + int ret; + + ASSERT_RTNL(); + + lock_sock(sk); + if (join) + ret = __ip_mc_join_group(sk, &mreq); + else + ret = __ip_mc_leave_group(sk, &mreq); + release_sock(sk); + + return ret; +} + static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); @@ -584,6 +604,8 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa))) continue; + if (ipv4_is_multicast(ifa->ifa_address)) + ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa); __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); return 0; } @@ -838,6 +860,15 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) * userspace already relies on not having to provide this. */ set_ifa_lifetime(ifa, valid_lft, prefered_lft); + if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) { + int ret = ip_mc_config(net->ipv4.mc_autojoin_sk, + true, ifa); + + if (ret < 0) { + inet_free_ifa(ifa); + return ret; + } + } return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid); } else { inet_free_ifa(ifa); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 57be71d..e067770 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -89,17 +89,14 @@ struct fib_table *fib_new_table(struct net *net, u32 id) switch (id) { case RT_TABLE_LOCAL: - net->ipv4.fib_local = tb; + rcu_assign_pointer(net->ipv4.fib_local, tb); break; - case RT_TABLE_MAIN: - net->ipv4.fib_main = tb; + rcu_assign_pointer(net->ipv4.fib_main, tb); break; - case RT_TABLE_DEFAULT: - net->ipv4.fib_default = tb; + rcu_assign_pointer(net->ipv4.fib_default, tb); break; - default: break; } @@ -132,13 +129,14 @@ struct fib_table *fib_get_table(struct net *net, u32 id) static void fib_flush(struct net *net) { int flushed = 0; - struct fib_table *tb; - struct hlist_head *head; unsigned int h; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { - head = &net->ipv4.fib_table_hash[h]; - hlist_for_each_entry(tb, head, tb_hlist) + struct hlist_head *head = &net->ipv4.fib_table_hash[h]; + struct hlist_node *tmp; + struct fib_table *tb; + + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) flushed += fib_table_flush(tb); } @@ -146,6 +144,19 @@ static void fib_flush(struct net *net) rt_cache_flush(net); } +void fib_flush_external(struct net *net) +{ + struct fib_table *tb; + struct hlist_head *head; + unsigned int h; + + for (h = 0; h < FIB_TABLE_HASHSZ; h++) { + head = &net->ipv4.fib_table_hash[h]; + hlist_for_each_entry(tb, head, tb_hlist) + fib_table_flush_external(tb); + } +} + /* * Find address type as if only "dev" was present in the system. If * on_dev is NULL then all interfaces are taken into consideration. @@ -665,10 +676,12 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) s_h = cb->args[0]; s_e = cb->args[1]; + rcu_read_lock(); + for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { e = 0; head = &net->ipv4.fib_table_hash[h]; - hlist_for_each_entry(tb, head, tb_hlist) { + hlist_for_each_entry_rcu(tb, head, tb_hlist) { if (e < s_e) goto next; if (dumped) @@ -682,6 +695,8 @@ next: } } out: + rcu_read_unlock(); + cb->args[1] = e; cb->args[0] = h; @@ -1117,14 +1132,34 @@ static void ip_fib_net_exit(struct net *net) rtnl_lock(); for (i = 0; i < FIB_TABLE_HASHSZ; i++) { - struct fib_table *tb; - struct hlist_head *head; + struct hlist_head *head = &net->ipv4.fib_table_hash[i]; struct hlist_node *tmp; + struct fib_table *tb; + + /* this is done in two passes as flushing the table could + * cause it to be reallocated in order to accommodate new + * tnodes at the root as the table shrinks. + */ + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) + fib_table_flush(tb); - head = &net->ipv4.fib_table_hash[i]; hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { +#ifdef CONFIG_IP_MULTIPLE_TABLES + switch (tb->tb_id) { + case RT_TABLE_LOCAL: + RCU_INIT_POINTER(net->ipv4.fib_local, NULL); + break; + case RT_TABLE_MAIN: + RCU_INIT_POINTER(net->ipv4.fib_main, NULL); + break; + case RT_TABLE_DEFAULT: + RCU_INIT_POINTER(net->ipv4.fib_default, NULL); + break; + default: + break; + } +#endif hlist_del(&tb->tb_hlist); - fib_table_flush(tb); fib_free_table(tb); } } diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index 825981b1..ae2e6ee 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -6,11 +6,12 @@ #include <net/ip_fib.h> struct fib_alias { - struct list_head fa_list; + struct hlist_node fa_list; struct fib_info *fa_info; u8 fa_tos; u8 fa_type; u8 fa_state; + u8 fa_slen; struct rcu_head rcu; }; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index d3db718..190d0d0 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -209,6 +209,8 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, rule4->tos = frh->tos; net->ipv4.fib_has_custom_rules = true; + fib_flush_external(rule->fr_net); + err = 0; errout: return err; @@ -224,6 +226,7 @@ static void fib4_rule_delete(struct fib_rule *rule) net->ipv4.fib_num_tclassid_users--; #endif net->ipv4.fib_has_custom_rules = true; + fib_flush_external(rule->fr_net); } static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 1e2090e..c6d2674 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1163,12 +1163,12 @@ int fib_sync_down_dev(struct net_device *dev, int force) void fib_select_default(struct fib_result *res) { struct fib_info *fi = NULL, *last_resort = NULL; - struct list_head *fa_head = res->fa_head; + struct hlist_head *fa_head = res->fa_head; struct fib_table *tb = res->table; int order = -1, last_idx = -1; struct fib_alias *fa; - list_for_each_entry_rcu(fa, fa_head, fa_list) { + hlist_for_each_entry_rcu(fa, fa_head, fa_list) { struct fib_info *next_fi = fa->fa_info; if (next_fi->fib_scope != res->scope || diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3daf022..9095545 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -79,6 +79,7 @@ #include <net/tcp.h> #include <net/sock.h> #include <net/ip_fib.h> +#include <net/switchdev.h> #include "fib_lookup.h" #define MAX_STAT_DEPTH 32 @@ -88,38 +89,35 @@ typedef unsigned int t_key; -#define IS_TNODE(n) ((n)->bits) -#define IS_LEAF(n) (!(n)->bits) +#define IS_TRIE(n) ((n)->pos >= KEYLENGTH) +#define IS_TNODE(n) ((n)->bits) +#define IS_LEAF(n) (!(n)->bits) -#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos) - -struct tnode { +struct key_vector { t_key key; - unsigned char bits; /* 2log(KEYLENGTH) bits needed */ unsigned char pos; /* 2log(KEYLENGTH) bits needed */ + unsigned char bits; /* 2log(KEYLENGTH) bits needed */ unsigned char slen; - struct tnode __rcu *parent; - struct rcu_head rcu; union { - /* The fields in this struct are valid if bits > 0 (TNODE) */ - struct { - t_key empty_children; /* KEYLENGTH bits needed */ - t_key full_children; /* KEYLENGTH bits needed */ - struct tnode __rcu *child[0]; - }; - /* This list pointer if valid if bits == 0 (LEAF) */ - struct hlist_head list; + /* This list pointer if valid if (pos | bits) == 0 (LEAF) */ + struct hlist_head leaf; + /* This array is valid if (pos | bits) > 0 (TNODE) */ + struct key_vector __rcu *tnode[0]; }; }; -struct leaf_info { - struct hlist_node hlist; - int plen; - u32 mask_plen; /* ntohl(inet_make_mask(plen)) */ - struct list_head falh; +struct tnode { struct rcu_head rcu; + t_key empty_children; /* KEYLENGTH bits needed */ + t_key full_children; /* KEYLENGTH bits needed */ + struct key_vector __rcu *parent; + struct key_vector kv[1]; +#define tn_bits kv[0].bits }; +#define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n]) +#define LEAF_SIZE TNODE_SIZE(1) + #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats { unsigned int gets; @@ -142,13 +140,13 @@ struct trie_stat { }; struct trie { - struct tnode __rcu *trie; + struct key_vector kv[1]; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats; #endif }; -static void resize(struct trie *t, struct tnode *tn); +static struct key_vector *resize(struct trie *t, struct key_vector *tn); static size_t tnode_free_size; /* @@ -161,41 +159,46 @@ static const int sync_pages = 128; static struct kmem_cache *fn_alias_kmem __read_mostly; static struct kmem_cache *trie_leaf_kmem __read_mostly; +static inline struct tnode *tn_info(struct key_vector *kv) +{ + return container_of(kv, struct tnode, kv[0]); +} + /* caller must hold RTNL */ -#define node_parent(n) rtnl_dereference((n)->parent) +#define node_parent(tn) rtnl_dereference(tn_info(tn)->parent) +#define get_child(tn, i) rtnl_dereference((tn)->tnode[i]) /* caller must hold RCU read lock or RTNL */ -#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent) +#define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent) +#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i]) /* wrapper for rcu_assign_pointer */ -static inline void node_set_parent(struct tnode *n, struct tnode *tp) +static inline void node_set_parent(struct key_vector *n, struct key_vector *tp) { if (n) - rcu_assign_pointer(n->parent, tp); + rcu_assign_pointer(tn_info(n)->parent, tp); } -#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p) +#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p) /* This provides us with the number of children in this node, in the case of a * leaf this will return 0 meaning none of the children are accessible. */ -static inline unsigned long tnode_child_length(const struct tnode *tn) +static inline unsigned long child_length(const struct key_vector *tn) { return (1ul << tn->bits) & ~(1ul); } -/* caller must hold RTNL */ -static inline struct tnode *tnode_get_child(const struct tnode *tn, - unsigned long i) -{ - return rtnl_dereference(tn->child[i]); -} +#define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos) -/* caller must hold RCU read lock or RTNL */ -static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn, - unsigned long i) +static inline unsigned long get_index(t_key key, struct key_vector *kv) { - return rcu_dereference_rtnl(tn->child[i]); + unsigned long index = key ^ kv->key; + + if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos)) + return 0; + + return index >> kv->pos; } /* To understand this stuff, an understanding of keys and all their bits is @@ -274,106 +277,104 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa) } #define TNODE_KMALLOC_MAX \ - ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *)) + ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *)) +#define TNODE_VMALLOC_MAX \ + ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *)) static void __node_free_rcu(struct rcu_head *head) { struct tnode *n = container_of(head, struct tnode, rcu); - if (IS_LEAF(n)) + if (!n->tn_bits) kmem_cache_free(trie_leaf_kmem, n); - else if (n->bits <= TNODE_KMALLOC_MAX) + else if (n->tn_bits <= TNODE_KMALLOC_MAX) kfree(n); else vfree(n); } -#define node_free(n) call_rcu(&n->rcu, __node_free_rcu) +#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu) -static inline void free_leaf_info(struct leaf_info *leaf) +static struct tnode *tnode_alloc(int bits) { - kfree_rcu(leaf, rcu); -} + size_t size; + + /* verify bits is within bounds */ + if (bits > TNODE_VMALLOC_MAX) + return NULL; + + /* determine size and verify it is non-zero and didn't overflow */ + size = TNODE_SIZE(1ul << bits); -static struct tnode *tnode_alloc(size_t size) -{ if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } -static inline void empty_child_inc(struct tnode *n) +static inline void empty_child_inc(struct key_vector *n) { - ++n->empty_children ? : ++n->full_children; + ++tn_info(n)->empty_children ? : ++tn_info(n)->full_children; } -static inline void empty_child_dec(struct tnode *n) +static inline void empty_child_dec(struct key_vector *n) { - n->empty_children-- ? : n->full_children--; + tn_info(n)->empty_children-- ? : tn_info(n)->full_children--; } -static struct tnode *leaf_new(t_key key) +static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) { - struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); - if (l) { - l->parent = NULL; - /* set key and pos to reflect full key value - * any trailing zeros in the key should be ignored - * as the nodes are searched - */ - l->key = key; - l->slen = 0; - l->pos = 0; - /* set bits to 0 indicating we are not a tnode */ - l->bits = 0; + struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); + struct key_vector *l = kv->kv; - INIT_HLIST_HEAD(&l->list); - } - return l; -} + if (!kv) + return NULL; -static struct leaf_info *leaf_info_new(int plen) -{ - struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL); - if (li) { - li->plen = plen; - li->mask_plen = ntohl(inet_make_mask(plen)); - INIT_LIST_HEAD(&li->falh); - } - return li; + /* initialize key vector */ + l->key = key; + l->pos = 0; + l->bits = 0; + l->slen = fa->fa_slen; + + /* link leaf to fib alias */ + INIT_HLIST_HEAD(&l->leaf); + hlist_add_head(&fa->fa_list, &l->leaf); + + return l; } -static struct tnode *tnode_new(t_key key, int pos, int bits) +static struct key_vector *tnode_new(t_key key, int pos, int bits) { - size_t sz = offsetof(struct tnode, child[1ul << bits]); - struct tnode *tn = tnode_alloc(sz); + struct tnode *tnode = tnode_alloc(bits); unsigned int shift = pos + bits; + struct key_vector *tn = tnode->kv; /* verify bits and pos their msb bits clear and values are valid */ BUG_ON(!bits || (shift > KEYLENGTH)); - if (tn) { - tn->parent = NULL; - tn->slen = pos; - tn->pos = pos; - tn->bits = bits; - tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; - if (bits == KEYLENGTH) - tn->full_children = 1; - else - tn->empty_children = 1ul << bits; - } + pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), + sizeof(struct key_vector *) << bits); + + if (!tnode) + return NULL; + + if (bits == KEYLENGTH) + tnode->full_children = 1; + else + tnode->empty_children = 1ul << bits; + + tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; + tn->pos = pos; + tn->bits = bits; + tn->slen = pos; - pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode), - sizeof(struct tnode *) << bits); return tn; } /* Check whether a tnode 'n' is "full", i.e. it is an internal node * and no bits are skipped. See discussion in dyntree paper p. 6 */ -static inline int tnode_full(const struct tnode *tn, const struct tnode *n) +static inline int tnode_full(struct key_vector *tn, struct key_vector *n) { return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n); } @@ -381,12 +382,13 @@ static inline int tnode_full(const struct tnode *tn, const struct tnode *n) /* Add a child at position i overwriting the old value. * Update the value of full_children and empty_children. */ -static void put_child(struct tnode *tn, unsigned long i, struct tnode *n) +static void put_child(struct key_vector *tn, unsigned long i, + struct key_vector *n) { - struct tnode *chi = tnode_get_child(tn, i); + struct key_vector *chi = get_child(tn, i); int isfull, wasfull; - BUG_ON(i >= tnode_child_length(tn)); + BUG_ON(i >= child_length(tn)); /* update emptyChildren, overflow into fullChildren */ if (n == NULL && chi != NULL) @@ -399,23 +401,23 @@ static void put_child(struct tnode *tn, unsigned long i, struct tnode *n) isfull = tnode_full(tn, n); if (wasfull && !isfull) - tn->full_children--; + tn_info(tn)->full_children--; else if (!wasfull && isfull) - tn->full_children++; + tn_info(tn)->full_children++; if (n && (tn->slen < n->slen)) tn->slen = n->slen; - rcu_assign_pointer(tn->child[i], n); + rcu_assign_pointer(tn->tnode[i], n); } -static void update_children(struct tnode *tn) +static void update_children(struct key_vector *tn) { unsigned long i; /* update all of the child parent pointers */ - for (i = tnode_child_length(tn); i;) { - struct tnode *inode = tnode_get_child(tn, --i); + for (i = child_length(tn); i;) { + struct key_vector *inode = get_child(tn, --i); if (!inode) continue; @@ -431,36 +433,37 @@ static void update_children(struct tnode *tn) } } -static inline void put_child_root(struct tnode *tp, struct trie *t, - t_key key, struct tnode *n) +static inline void put_child_root(struct key_vector *tp, t_key key, + struct key_vector *n) { - if (tp) - put_child(tp, get_index(key, tp), n); + if (IS_TRIE(tp)) + rcu_assign_pointer(tp->tnode[0], n); else - rcu_assign_pointer(t->trie, n); + put_child(tp, get_index(key, tp), n); } -static inline void tnode_free_init(struct tnode *tn) +static inline void tnode_free_init(struct key_vector *tn) { - tn->rcu.next = NULL; + tn_info(tn)->rcu.next = NULL; } -static inline void tnode_free_append(struct tnode *tn, struct tnode *n) +static inline void tnode_free_append(struct key_vector *tn, + struct key_vector *n) { - n->rcu.next = tn->rcu.next; - tn->rcu.next = &n->rcu; + tn_info(n)->rcu.next = tn_info(tn)->rcu.next; + tn_info(tn)->rcu.next = &tn_info(n)->rcu; } -static void tnode_free(struct tnode *tn) +static void tnode_free(struct key_vector *tn) { - struct callback_head *head = &tn->rcu; + struct callback_head *head = &tn_info(tn)->rcu; while (head) { head = head->next; - tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]); + tnode_free_size += TNODE_SIZE(1ul << tn->bits); node_free(tn); - tn = container_of(head, struct tnode, rcu); + tn = container_of(head, struct tnode, rcu)->kv; } if (tnode_free_size >= PAGE_SIZE * sync_pages) { @@ -469,14 +472,16 @@ static void tnode_free(struct tnode *tn) } } -static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn) +static struct key_vector *replace(struct trie *t, + struct key_vector *oldtnode, + struct key_vector *tn) { - struct tnode *tp = node_parent(oldtnode); + struct key_vector *tp = node_parent(oldtnode); unsigned long i; /* setup the parent pointer out of and back into this node */ NODE_INIT_PARENT(tn, tp); - put_child_root(tp, t, tn->key, tn); + put_child_root(tp, tn->key, tn); /* update all of the child parent pointers */ update_children(tn); @@ -485,18 +490,21 @@ static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn) tnode_free(oldtnode); /* resize children now that oldtnode is freed */ - for (i = tnode_child_length(tn); i;) { - struct tnode *inode = tnode_get_child(tn, --i); + for (i = child_length(tn); i;) { + struct key_vector *inode = get_child(tn, --i); /* resize child node */ if (tnode_full(tn, inode)) - resize(t, inode); + tn = resize(t, inode); } + + return tp; } -static int inflate(struct trie *t, struct tnode *oldtnode) +static struct key_vector *inflate(struct trie *t, + struct key_vector *oldtnode) { - struct tnode *tn; + struct key_vector *tn; unsigned long i; t_key m; @@ -504,7 +512,7 @@ static int inflate(struct trie *t, struct tnode *oldtnode) tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1); if (!tn) - return -ENOMEM; + goto notnode; /* prepare oldtnode to be freed */ tnode_free_init(oldtnode); @@ -514,9 +522,9 @@ static int inflate(struct trie *t, struct tnode *oldtnode) * point to existing tnodes and the links between our allocated * nodes. */ - for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) { - struct tnode *inode = tnode_get_child(oldtnode, --i); - struct tnode *node0, *node1; + for (i = child_length(oldtnode), m = 1u << tn->pos; i;) { + struct key_vector *inode = get_child(oldtnode, --i); + struct key_vector *node0, *node1; unsigned long j, k; /* An empty child */ @@ -534,8 +542,8 @@ static int inflate(struct trie *t, struct tnode *oldtnode) /* An internal node with two children */ if (inode->bits == 1) { - put_child(tn, 2 * i + 1, tnode_get_child(inode, 1)); - put_child(tn, 2 * i, tnode_get_child(inode, 0)); + put_child(tn, 2 * i + 1, get_child(inode, 1)); + put_child(tn, 2 * i, get_child(inode, 0)); continue; } @@ -564,11 +572,11 @@ static int inflate(struct trie *t, struct tnode *oldtnode) tnode_free_append(tn, node0); /* populate child pointers in new nodes */ - for (k = tnode_child_length(inode), j = k / 2; j;) { - put_child(node1, --j, tnode_get_child(inode, --k)); - put_child(node0, j, tnode_get_child(inode, j)); - put_child(node1, --j, tnode_get_child(inode, --k)); - put_child(node0, j, tnode_get_child(inode, j)); + for (k = child_length(inode), j = k / 2; j;) { + put_child(node1, --j, get_child(inode, --k)); + put_child(node0, j, get_child(inode, j)); + put_child(node1, --j, get_child(inode, --k)); + put_child(node0, j, get_child(inode, j)); } /* link new nodes to parent */ @@ -581,25 +589,25 @@ static int inflate(struct trie *t, struct tnode *oldtnode) } /* setup the parent pointers into and out of this node */ - replace(t, oldtnode, tn); - - return 0; + return replace(t, oldtnode, tn); nomem: /* all pointers should be clean so we are done */ tnode_free(tn); - return -ENOMEM; +notnode: + return NULL; } -static int halve(struct trie *t, struct tnode *oldtnode) +static struct key_vector *halve(struct trie *t, + struct key_vector *oldtnode) { - struct tnode *tn; + struct key_vector *tn; unsigned long i; pr_debug("In halve\n"); tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1); if (!tn) - return -ENOMEM; + goto notnode; /* prepare oldtnode to be freed */ tnode_free_init(oldtnode); @@ -609,10 +617,10 @@ static int halve(struct trie *t, struct tnode *oldtnode) * point to existing tnodes and the links between our allocated * nodes. */ - for (i = tnode_child_length(oldtnode); i;) { - struct tnode *node1 = tnode_get_child(oldtnode, --i); - struct tnode *node0 = tnode_get_child(oldtnode, --i); - struct tnode *inode; + for (i = child_length(oldtnode); i;) { + struct key_vector *node1 = get_child(oldtnode, --i); + struct key_vector *node0 = get_child(oldtnode, --i); + struct key_vector *inode; /* At least one of the children is empty */ if (!node1 || !node0) { @@ -622,10 +630,8 @@ static int halve(struct trie *t, struct tnode *oldtnode) /* Two nonempty children */ inode = tnode_new(node0->key, oldtnode->pos, 1); - if (!inode) { - tnode_free(tn); - return -ENOMEM; - } + if (!inode) + goto nomem; tnode_free_append(tn, inode); /* initialize pointers out of node */ @@ -638,30 +644,36 @@ static int halve(struct trie *t, struct tnode *oldtnode) } /* setup the parent pointers into and out of this node */ - replace(t, oldtnode, tn); - - return 0; + return replace(t, oldtnode, tn); +nomem: + /* all pointers should be clean so we are done */ + tnode_free(tn); +notnode: + return NULL; } -static void collapse(struct trie *t, struct tnode *oldtnode) +static struct key_vector *collapse(struct trie *t, + struct key_vector *oldtnode) { - struct tnode *n, *tp; + struct key_vector *n, *tp; unsigned long i; /* scan the tnode looking for that one child that might still exist */ - for (n = NULL, i = tnode_child_length(oldtnode); !n && i;) - n = tnode_get_child(oldtnode, --i); + for (n = NULL, i = child_length(oldtnode); !n && i;) + n = get_child(oldtnode, --i); /* compress one level */ tp = node_parent(oldtnode); - put_child_root(tp, t, oldtnode->key, n); + put_child_root(tp, oldtnode->key, n); node_set_parent(n, tp); /* drop dead node */ node_free(oldtnode); + + return tp; } -static unsigned char update_suffix(struct tnode *tn) +static unsigned char update_suffix(struct key_vector *tn) { unsigned char slen = tn->pos; unsigned long stride, i; @@ -671,8 +683,8 @@ static unsigned char update_suffix(struct tnode *tn) * why we start with a stride of 2 since a stride of 1 would * represent the nodes with suffix length equal to tn->pos */ - for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) { - struct tnode *n = tnode_get_child(tn, i); + for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) { + struct key_vector *n = get_child(tn, i); if (!n || (n->slen <= slen)) continue; @@ -704,12 +716,12 @@ static unsigned char update_suffix(struct tnode *tn) * * 'high' in this instance is the variable 'inflate_threshold'. It * is expressed as a percentage, so we multiply it with - * tnode_child_length() and instead of multiplying by 2 (since the + * child_length() and instead of multiplying by 2 (since the * child array will be doubled by inflate()) and multiplying * the left-hand side by 100 (to handle the percentage thing) we * multiply the left-hand side by 50. * - * The left-hand side may look a bit weird: tnode_child_length(tn) + * The left-hand side may look a bit weird: child_length(tn) * - tn->empty_children is of course the number of non-null children * in the current node. tn->full_children is the number of "full" * children, that is non-null tnodes with a skip value of 0. @@ -719,10 +731,10 @@ static unsigned char update_suffix(struct tnode *tn) * A clearer way to write this would be: * * to_be_doubled = tn->full_children; - * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children - + * not_to_be_doubled = child_length(tn) - tn->empty_children - * tn->full_children; * - * new_child_length = tnode_child_length(tn) * 2; + * new_child_length = child_length(tn) * 2; * * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) / * new_child_length; @@ -739,57 +751,57 @@ static unsigned char update_suffix(struct tnode *tn) * inflate_threshold * new_child_length * * expand not_to_be_doubled and to_be_doubled, and shorten: - * 100 * (tnode_child_length(tn) - tn->empty_children + + * 100 * (child_length(tn) - tn->empty_children + * tn->full_children) >= inflate_threshold * new_child_length * * expand new_child_length: - * 100 * (tnode_child_length(tn) - tn->empty_children + + * 100 * (child_length(tn) - tn->empty_children + * tn->full_children) >= - * inflate_threshold * tnode_child_length(tn) * 2 + * inflate_threshold * child_length(tn) * 2 * * shorten again: - * 50 * (tn->full_children + tnode_child_length(tn) - + * 50 * (tn->full_children + child_length(tn) - * tn->empty_children) >= inflate_threshold * - * tnode_child_length(tn) + * child_length(tn) * */ -static bool should_inflate(const struct tnode *tp, const struct tnode *tn) +static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn) { - unsigned long used = tnode_child_length(tn); + unsigned long used = child_length(tn); unsigned long threshold = used; /* Keep root node larger */ - threshold *= tp ? inflate_threshold : inflate_threshold_root; - used -= tn->empty_children; - used += tn->full_children; + threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold; + used -= tn_info(tn)->empty_children; + used += tn_info(tn)->full_children; /* if bits == KEYLENGTH then pos = 0, and will fail below */ return (used > 1) && tn->pos && ((50 * used) >= threshold); } -static bool should_halve(const struct tnode *tp, const struct tnode *tn) +static inline bool should_halve(struct key_vector *tp, struct key_vector *tn) { - unsigned long used = tnode_child_length(tn); + unsigned long used = child_length(tn); unsigned long threshold = used; /* Keep root node larger */ - threshold *= tp ? halve_threshold : halve_threshold_root; - used -= tn->empty_children; + threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold; + used -= tn_info(tn)->empty_children; /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */ return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold); } -static bool should_collapse(const struct tnode *tn) +static inline bool should_collapse(struct key_vector *tn) { - unsigned long used = tnode_child_length(tn); + unsigned long used = child_length(tn); - used -= tn->empty_children; + used -= tn_info(tn)->empty_children; /* account for bits == KEYLENGTH case */ - if ((tn->bits == KEYLENGTH) && tn->full_children) + if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children) used -= KEY_MAX; /* One child or none, time to drop us from the trie */ @@ -797,10 +809,13 @@ static bool should_collapse(const struct tnode *tn) } #define MAX_WORK 10 -static void resize(struct trie *t, struct tnode *tn) +static struct key_vector *resize(struct trie *t, struct key_vector *tn) { - struct tnode *tp = node_parent(tn); - struct tnode __rcu **cptr; +#ifdef CONFIG_IP_FIB_TRIE_STATS + struct trie_use_stats __percpu *stats = t->stats; +#endif + struct key_vector *tp = node_parent(tn); + unsigned long cindex = get_index(tn->key, tp); int max_work = MAX_WORK; pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", @@ -810,183 +825,125 @@ static void resize(struct trie *t, struct tnode *tn) * doing it ourselves. This way we can let RCU fully do its * thing without us interfering */ - cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie; - BUG_ON(tn != rtnl_dereference(*cptr)); + BUG_ON(tn != get_child(tp, cindex)); /* Double as long as the resulting node has a number of * nonempty nodes that are above the threshold. */ while (should_inflate(tp, tn) && max_work) { - if (inflate(t, tn)) { + tp = inflate(t, tn); + if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS - this_cpu_inc(t->stats->resize_node_skipped); + this_cpu_inc(stats->resize_node_skipped); #endif break; } max_work--; - tn = rtnl_dereference(*cptr); + tn = get_child(tp, cindex); } /* Return if at least one inflate is run */ if (max_work != MAX_WORK) - return; + return node_parent(tn); /* Halve as long as the number of empty children in this * node is above threshold. */ while (should_halve(tp, tn) && max_work) { - if (halve(t, tn)) { + tp = halve(t, tn); + if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS - this_cpu_inc(t->stats->resize_node_skipped); + this_cpu_inc(stats->resize_node_skipped); #endif break; } max_work--; - tn = rtnl_dereference(*cptr); + tn = get_child(tp, cindex); } /* Only one child remains */ - if (should_collapse(tn)) { - collapse(t, tn); - return; - } + if (should_collapse(tn)) + return collapse(t, tn); + + /* update parent in case inflate or halve failed */ + tp = node_parent(tn); /* Return if at least one deflate was run */ if (max_work != MAX_WORK) - return; + return tp; /* push the suffix length to the parent node */ if (tn->slen > tn->pos) { unsigned char slen = update_suffix(tn); - if (tp && (slen > tp->slen)) + if (slen > tp->slen) tp->slen = slen; } -} - -/* readside must use rcu_read_lock currently dump routines - via get_fa_head and dump */ - -static struct leaf_info *find_leaf_info(struct tnode *l, int plen) -{ - struct hlist_head *head = &l->list; - struct leaf_info *li; - hlist_for_each_entry_rcu(li, head, hlist) - if (li->plen == plen) - return li; - - return NULL; + return tp; } -static inline struct list_head *get_fa_head(struct tnode *l, int plen) +static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l) { - struct leaf_info *li = find_leaf_info(l, plen); - - if (!li) - return NULL; - - return &li->falh; -} - -static void leaf_pull_suffix(struct tnode *l) -{ - struct tnode *tp = node_parent(l); - - while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) { + while ((tp->slen > tp->pos) && (tp->slen > l->slen)) { if (update_suffix(tp) > l->slen) break; tp = node_parent(tp); } } -static void leaf_push_suffix(struct tnode *l) +static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l) { - struct tnode *tn = node_parent(l); - /* if this is a new leaf then tn will be NULL and we can sort * out parent suffix lengths as a part of trie_rebalance */ - while (tn && (tn->slen < l->slen)) { + while (tn->slen < l->slen) { tn->slen = l->slen; tn = node_parent(tn); } } -static void remove_leaf_info(struct tnode *l, struct leaf_info *old) -{ - /* record the location of the previous list_info entry */ - struct hlist_node **pprev = old->hlist.pprev; - struct leaf_info *li = hlist_entry(pprev, typeof(*li), hlist.next); - - /* remove the leaf info from the list */ - hlist_del_rcu(&old->hlist); - - /* only access li if it is pointing at the last valid hlist_node */ - if (hlist_empty(&l->list) || (*pprev)) - return; - - /* update the trie with the latest suffix length */ - l->slen = KEYLENGTH - li->plen; - leaf_pull_suffix(l); -} - -static void insert_leaf_info(struct tnode *l, struct leaf_info *new) +/* rcu_read_lock needs to be hold by caller from readside */ +static struct key_vector *fib_find_node(struct trie *t, + struct key_vector **tp, u32 key) { - struct hlist_head *head = &l->list; - struct leaf_info *li = NULL, *last = NULL; + struct key_vector *pn, *n = t->kv; + unsigned long index = 0; - if (hlist_empty(head)) { - hlist_add_head_rcu(&new->hlist, head); - } else { - hlist_for_each_entry(li, head, hlist) { - if (new->plen > li->plen) - break; - - last = li; - } - if (last) - hlist_add_behind_rcu(&new->hlist, &last->hlist); - else - hlist_add_before_rcu(&new->hlist, &li->hlist); - } - - /* if we added to the tail node then we need to update slen */ - if (l->slen < (KEYLENGTH - new->plen)) { - l->slen = KEYLENGTH - new->plen; - leaf_push_suffix(l); - } -} + do { + pn = n; + n = get_child_rcu(n, index); -/* rcu_read_lock needs to be hold by caller from readside */ -static struct tnode *fib_find_node(struct trie *t, u32 key) -{ - struct tnode *n = rcu_dereference_rtnl(t->trie); + if (!n) + break; - while (n) { - unsigned long index = get_index(key, n); + index = get_cindex(key, n); /* This bit of code is a bit tricky but it combines multiple * checks into a single check. The prefix consists of the * prefix plus zeros for the bits in the cindex. The index * is the difference between the key and this value. From * this we can actually derive several pieces of data. - * if (index & (~0ul << bits)) + * if (index >= (1ul << bits)) * we have a mismatch in skip bits and failed * else * we know the value is cindex + * + * This check is safe even if bits == KEYLENGTH due to the + * fact that we can only allocate a node with 32 bits if a + * long is greater than 32 bits. */ - if (index & (~0ul << n->bits)) - return NULL; - - /* we have found a leaf. Prefixes have already been compared */ - if (IS_LEAF(n)) + if (index >= (1ul << n->bits)) { + n = NULL; break; + } - n = tnode_get_child_rcu(n, index); - } + /* keep searching until we find a perfect match leaf or NULL */ + } while (IS_TNODE(n)); + + *tp = pn; return n; } @@ -994,14 +951,19 @@ static struct tnode *fib_find_node(struct trie *t, u32 key) /* Return the first fib alias matching TOS with * priority less than or equal to PRIO. */ -static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) +static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, + u8 tos, u32 prio) { struct fib_alias *fa; if (!fah) return NULL; - list_for_each_entry(fa, fah, fa_list) { + hlist_for_each_entry(fa, fah, fa_list) { + if (fa->fa_slen < slen) + continue; + if (fa->fa_slen != slen) + break; if (fa->fa_tos > tos) continue; if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos) @@ -1011,77 +973,23 @@ static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) return NULL; } -static void trie_rebalance(struct trie *t, struct tnode *tn) +static void trie_rebalance(struct trie *t, struct key_vector *tn) { - struct tnode *tp; - - while ((tp = node_parent(tn)) != NULL) { - resize(t, tn); - tn = tp; - } - - /* Handle last (top) tnode */ - if (IS_TNODE(tn)) - resize(t, tn); + while (!IS_TRIE(tn)) + tn = resize(t, tn); } -/* only used from updater-side */ - -static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) +static int fib_insert_node(struct trie *t, struct key_vector *tp, + struct fib_alias *new, t_key key) { - struct list_head *fa_head = NULL; - struct tnode *l, *n, *tp = NULL; - struct leaf_info *li; - - li = leaf_info_new(plen); - if (!li) - return NULL; - fa_head = &li->falh; - - n = rtnl_dereference(t->trie); - - /* If we point to NULL, stop. Either the tree is empty and we should - * just put a new leaf in if, or we have reached an empty child slot, - * and we should just put our new leaf in that. - * - * If we hit a node with a key that does't match then we should stop - * and create a new tnode to replace that node and insert ourselves - * and the other node into the new tnode. - */ - while (n) { - unsigned long index = get_index(key, n); - - /* This bit of code is a bit tricky but it combines multiple - * checks into a single check. The prefix consists of the - * prefix plus zeros for the "bits" in the prefix. The index - * is the difference between the key and this value. From - * this we can actually derive several pieces of data. - * if !(index >> bits) - * we know the value is child index - * else - * we have a mismatch in skip bits and failed - */ - if (index >> n->bits) - break; + struct key_vector *n, *l; - /* we have found a leaf. Prefixes have already been compared */ - if (IS_LEAF(n)) { - /* Case 1: n is a leaf, and prefixes match*/ - insert_leaf_info(n, li); - return fa_head; - } - - tp = n; - n = tnode_get_child_rcu(n, index); - } - - l = leaf_new(key); - if (!l) { - free_leaf_info(li); - return NULL; - } + l = leaf_new(key, new); + if (!l) + goto noleaf; - insert_leaf_info(l, li); + /* retrieve child from parent node */ + n = get_child(tp, get_index(key, tp)); /* Case 2: n is a LEAF or a TNODE and the key doesn't match. * @@ -1090,21 +998,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) * leaves us in position for handling as case 3 */ if (n) { - struct tnode *tn; + struct key_vector *tn; tn = tnode_new(key, __fls(key ^ n->key), 1); - if (!tn) { - free_leaf_info(li); - node_free(l); - return NULL; - } + if (!tn) + goto notnode; /* initialize routes out of node */ NODE_INIT_PARENT(tn, tp); put_child(tn, get_index(key, tn) ^ 1, n); /* start adding routes into the node */ - put_child_root(tp, t, key, tn); + put_child_root(tp, key, tn); node_set_parent(n, tn); /* parent now has a NULL spot where the leaf can go */ @@ -1112,69 +1017,89 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) } /* Case 3: n is NULL, and will just insert a new leaf */ - if (tp) { - NODE_INIT_PARENT(l, tp); - put_child(tp, get_index(key, tp), l); - trie_rebalance(t, tp); + NODE_INIT_PARENT(l, tp); + put_child_root(tp, key, l); + trie_rebalance(t, tp); + + return 0; +notnode: + node_free(l); +noleaf: + return -ENOMEM; +} + +static int fib_insert_alias(struct trie *t, struct key_vector *tp, + struct key_vector *l, struct fib_alias *new, + struct fib_alias *fa, t_key key) +{ + if (!l) + return fib_insert_node(t, tp, new, key); + + if (fa) { + hlist_add_before_rcu(&new->fa_list, &fa->fa_list); } else { - rcu_assign_pointer(t->trie, l); + struct fib_alias *last; + + hlist_for_each_entry(last, &l->leaf, fa_list) { + if (new->fa_slen < last->fa_slen) + break; + fa = last; + } + + if (fa) + hlist_add_behind_rcu(&new->fa_list, &fa->fa_list); + else + hlist_add_head_rcu(&new->fa_list, &l->leaf); + } + + /* if we added to the tail node then we need to update slen */ + if (l->slen < new->fa_slen) { + l->slen = new->fa_slen; + leaf_push_suffix(tp, l); } - return fa_head; + return 0; } -/* - * Caller must hold RTNL. - */ +/* Caller must hold RTNL. */ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) { - struct trie *t = (struct trie *) tb->tb_data; + struct trie *t = (struct trie *)tb->tb_data; struct fib_alias *fa, *new_fa; - struct list_head *fa_head = NULL; + struct key_vector *l, *tp; struct fib_info *fi; - int plen = cfg->fc_dst_len; + u8 plen = cfg->fc_dst_len; + u8 slen = KEYLENGTH - plen; u8 tos = cfg->fc_tos; - u32 key, mask; + u32 key; int err; - struct tnode *l; - if (plen > 32) + if (plen > KEYLENGTH) return -EINVAL; key = ntohl(cfg->fc_dst); pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); - mask = ntohl(inet_make_mask(plen)); - - if (key & ~mask) + if ((plen < KEYLENGTH) && (key << plen)) return -EINVAL; - key = key & mask; - fi = fib_create_info(cfg); if (IS_ERR(fi)) { err = PTR_ERR(fi); goto err; } - l = fib_find_node(t, key); - fa = NULL; - - if (l) { - fa_head = get_fa_head(l, plen); - fa = fib_find_alias(fa_head, tos, fi->fib_priority); - } + l = fib_find_node(t, &tp, key); + fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority) : NULL; /* Now fa, if non-NULL, points to the first fib alias * with the same keys [prefix,tos,priority], if such key already * exists or to the node before which we will insert new one. * * If fa is NULL, we will need to allocate a new one and - * insert to the head of f. - * - * If f is NULL, no fib node matched the destination key - * and we need to allocate a new one of those as well. + * insert to the tail of the section matching the suffix length + * of the new alias. */ if (fa && fa->fa_tos == tos && @@ -1192,9 +1117,8 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) */ fa_match = NULL; fa_first = fa; - fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list); - list_for_each_entry_continue(fa, fa_head, fa_list) { - if (fa->fa_tos != tos) + hlist_for_each_entry_from(fa, fa_list) { + if ((fa->fa_slen != slen) || (fa->fa_tos != tos)) break; if (fa->fa_info->fib_priority != fi->fib_priority) break; @@ -1226,8 +1150,20 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) new_fa->fa_type = cfg->fc_type; state = fa->fa_state; new_fa->fa_state = state & ~FA_S_ACCESSED; + new_fa->fa_slen = fa->fa_slen; + + err = netdev_switch_fib_ipv4_add(key, plen, fi, + new_fa->fa_tos, + cfg->fc_type, + tb->tb_id); + if (err) { + netdev_switch_fib_ipv4_abort(fi); + kmem_cache_free(fn_alias_kmem, new_fa); + goto out; + } + + hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list); - list_replace_rcu(&fa->fa_list, &new_fa->fa_list); alias_free_mem_rcu(fa); fib_release_info(fi_drop); @@ -1261,30 +1197,32 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) new_fa->fa_tos = tos; new_fa->fa_type = cfg->fc_type; new_fa->fa_state = 0; - /* - * Insert new entry to the list. - */ - - if (!fa_head) { - fa_head = fib_insert_node(t, key, plen); - if (unlikely(!fa_head)) { - err = -ENOMEM; - goto out_free_new_fa; - } + new_fa->fa_slen = slen; + + /* (Optionally) offload fib entry to switch hardware. */ + err = netdev_switch_fib_ipv4_add(key, plen, fi, tos, + cfg->fc_type, tb->tb_id); + if (err) { + netdev_switch_fib_ipv4_abort(fi); + goto out_free_new_fa; } + /* Insert new entry to the list. */ + err = fib_insert_alias(t, tp, l, new_fa, fa, key); + if (err) + goto out_sw_fib_del; + if (!plen) tb->tb_num_default++; - list_add_tail_rcu(&new_fa->fa_list, - (fa ? &fa->fa_list : fa_head)); - rt_cache_flush(cfg->fc_nlinfo.nl_net); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, 0); succeeded: return 0; +out_sw_fib_del: + netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id); out_free_new_fa: kmem_cache_free(fn_alias_kmem, new_fa); out: @@ -1293,7 +1231,7 @@ err: return err; } -static inline t_key prefix_mismatch(t_key key, struct tnode *n) +static inline t_key prefix_mismatch(t_key key, struct key_vector *n) { t_key prefix = n->key; @@ -1309,11 +1247,15 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct trie_use_stats __percpu *stats = t->stats; #endif const t_key key = ntohl(flp->daddr); - struct tnode *n, *pn; - struct leaf_info *li; + struct key_vector *n, *pn; + struct fib_alias *fa; + unsigned long index; t_key cindex; - n = rcu_dereference(t->trie); + pn = t->kv; + cindex = 0; + + n = get_child_rcu(pn, cindex); if (!n) return -EAGAIN; @@ -1321,24 +1263,25 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, this_cpu_inc(stats->gets); #endif - pn = n; - cindex = 0; - /* Step 1: Travel to the longest prefix match in the trie */ for (;;) { - unsigned long index = get_index(key, n); + index = get_cindex(key, n); /* This bit of code is a bit tricky but it combines multiple * checks into a single check. The prefix consists of the * prefix plus zeros for the "bits" in the prefix. The index * is the difference between the key and this value. From * this we can actually derive several pieces of data. - * if (index & (~0ul << bits)) + * if (index >= (1ul << bits)) * we have a mismatch in skip bits and failed * else * we know the value is cindex + * + * This check is safe even if bits == KEYLENGTH due to the + * fact that we can only allocate a node with 32 bits if a + * long is greater than 32 bits. */ - if (index & (~0ul << n->bits)) + if (index >= (1ul << n->bits)) break; /* we have found a leaf. Prefixes have already been compared */ @@ -1353,7 +1296,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, cindex = index; } - n = tnode_get_child_rcu(n, index); + n = get_child_rcu(n, index); if (unlikely(!n)) goto backtrace; } @@ -1361,7 +1304,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, /* Step 2: Sort out leaves and begin backtracing for longest prefix */ for (;;) { /* record the pointer where our next node pointer is stored */ - struct tnode __rcu **cptr = n->child; + struct key_vector __rcu **cptr = n->tnode; /* This test verifies that none of the bits that differ * between the key and the prefix exist in the region of @@ -1393,13 +1336,17 @@ backtrace: while (!cindex) { t_key pkey = pn->key; - pn = node_parent_rcu(pn); - if (unlikely(!pn)) + /* If we don't have a parent then there is + * nothing for us to do as we do not have any + * further nodes to parse. + */ + if (IS_TRIE(pn)) return -EAGAIN; #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->backtrack); #endif /* Get Child's index */ + pn = node_parent_rcu(pn); cindex = get_index(pkey, pn); } @@ -1407,138 +1354,132 @@ backtrace: cindex &= cindex - 1; /* grab pointer for next child node */ - cptr = &pn->child[cindex]; + cptr = &pn->tnode[cindex]; } } found: + /* this line carries forward the xor from earlier in the function */ + index = key ^ n->key; + /* Step 3: Process the leaf, if that fails fall back to backtracing */ - hlist_for_each_entry_rcu(li, &n->list, hlist) { - struct fib_alias *fa; + hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { + struct fib_info *fi = fa->fa_info; + int nhsel, err; - if ((key ^ n->key) & li->mask_plen) + if ((index >= (1ul << fa->fa_slen)) && + ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH))) continue; - - list_for_each_entry_rcu(fa, &li->falh, fa_list) { - struct fib_info *fi = fa->fa_info; - int nhsel, err; - - if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) - continue; - if (fi->fib_dead) - continue; - if (fa->fa_info->fib_scope < flp->flowi4_scope) - continue; - fib_alias_accessed(fa); - err = fib_props[fa->fa_type].error; - if (unlikely(err < 0)) { + if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) + continue; + if (fi->fib_dead) + continue; + if (fa->fa_info->fib_scope < flp->flowi4_scope) + continue; + fib_alias_accessed(fa); + err = fib_props[fa->fa_type].error; + if (unlikely(err < 0)) { #ifdef CONFIG_IP_FIB_TRIE_STATS - this_cpu_inc(stats->semantic_match_passed); + this_cpu_inc(stats->semantic_match_passed); #endif - return err; - } - if (fi->fib_flags & RTNH_F_DEAD) + return err; + } + if (fi->fib_flags & RTNH_F_DEAD) + continue; + for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { + const struct fib_nh *nh = &fi->fib_nh[nhsel]; + + if (nh->nh_flags & RTNH_F_DEAD) + continue; + if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif) continue; - for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { - const struct fib_nh *nh = &fi->fib_nh[nhsel]; - - if (nh->nh_flags & RTNH_F_DEAD) - continue; - if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif) - continue; - - if (!(fib_flags & FIB_LOOKUP_NOREF)) - atomic_inc(&fi->fib_clntref); - - res->prefixlen = li->plen; - res->nh_sel = nhsel; - res->type = fa->fa_type; - res->scope = fi->fib_scope; - res->fi = fi; - res->table = tb; - res->fa_head = &li->falh; + + if (!(fib_flags & FIB_LOOKUP_NOREF)) + atomic_inc(&fi->fib_clntref); + + res->prefixlen = KEYLENGTH - fa->fa_slen; + res->nh_sel = nhsel; + res->type = fa->fa_type; + res->scope = fi->fib_scope; + res->fi = fi; + res->table = tb; + res->fa_head = &n->leaf; #ifdef CONFIG_IP_FIB_TRIE_STATS - this_cpu_inc(stats->semantic_match_passed); + this_cpu_inc(stats->semantic_match_passed); #endif - return err; - } + return err; } - + } #ifdef CONFIG_IP_FIB_TRIE_STATS - this_cpu_inc(stats->semantic_match_miss); + this_cpu_inc(stats->semantic_match_miss); #endif - } goto backtrace; } EXPORT_SYMBOL_GPL(fib_table_lookup); -/* - * Remove the leaf and return parent. - */ -static void trie_leaf_remove(struct trie *t, struct tnode *l) +static void fib_remove_alias(struct trie *t, struct key_vector *tp, + struct key_vector *l, struct fib_alias *old) { - struct tnode *tp = node_parent(l); + /* record the location of the previous list_info entry */ + struct hlist_node **pprev = old->fa_list.pprev; + struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next); - pr_debug("entering trie_leaf_remove(%p)\n", l); + /* remove the fib_alias from the list */ + hlist_del_rcu(&old->fa_list); - if (tp) { - put_child(tp, get_index(l->key, tp), NULL); + /* if we emptied the list this leaf will be freed and we can sort + * out parent suffix lengths as a part of trie_rebalance + */ + if (hlist_empty(&l->leaf)) { + put_child_root(tp, l->key, NULL); + node_free(l); trie_rebalance(t, tp); - } else { - RCU_INIT_POINTER(t->trie, NULL); + return; } - node_free(l); + /* only access fa if it is pointing at the last valid hlist_node */ + if (*pprev) + return; + + /* update the trie with the latest suffix length */ + l->slen = fa->fa_slen; + leaf_pull_suffix(tp, l); } -/* - * Caller must hold RTNL. - */ +/* Caller must hold RTNL. */ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) { struct trie *t = (struct trie *) tb->tb_data; - u32 key, mask; - int plen = cfg->fc_dst_len; - u8 tos = cfg->fc_tos; struct fib_alias *fa, *fa_to_delete; - struct list_head *fa_head; - struct tnode *l; - struct leaf_info *li; + struct key_vector *l, *tp; + u8 plen = cfg->fc_dst_len; + u8 slen = KEYLENGTH - plen; + u8 tos = cfg->fc_tos; + u32 key; - if (plen > 32) + if (plen > KEYLENGTH) return -EINVAL; key = ntohl(cfg->fc_dst); - mask = ntohl(inet_make_mask(plen)); - if (key & ~mask) + if ((plen < KEYLENGTH) && (key << plen)) return -EINVAL; - key = key & mask; - l = fib_find_node(t, key); - + l = fib_find_node(t, &tp, key); if (!l) return -ESRCH; - li = find_leaf_info(l, plen); - - if (!li) - return -ESRCH; - - fa_head = &li->falh; - fa = fib_find_alias(fa_head, tos, 0); - + fa = fib_find_alias(&l->leaf, slen, tos, 0); if (!fa) return -ESRCH; pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t); fa_to_delete = NULL; - fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list); - list_for_each_entry_continue(fa, fa_head, fa_list) { + hlist_for_each_entry_from(fa, fa_list) { struct fib_info *fi = fa->fa_info; - if (fa->fa_tos != tos) + if ((fa->fa_slen != slen) || (fa->fa_tos != tos)) break; if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && @@ -1557,172 +1498,215 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) if (!fa_to_delete) return -ESRCH; - fa = fa_to_delete; - rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, - &cfg->fc_nlinfo, 0); + netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos, + cfg->fc_type, tb->tb_id); - list_del_rcu(&fa->fa_list); + rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, + &cfg->fc_nlinfo, 0); if (!plen) tb->tb_num_default--; - if (list_empty(fa_head)) { - remove_leaf_info(l, li); - free_leaf_info(li); - } - - if (hlist_empty(&l->list)) - trie_leaf_remove(t, l); + fib_remove_alias(t, tp, l, fa_to_delete); - if (fa->fa_state & FA_S_ACCESSED) + if (fa_to_delete->fa_state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net); - fib_release_info(fa->fa_info); - alias_free_mem_rcu(fa); + fib_release_info(fa_to_delete->fa_info); + alias_free_mem_rcu(fa_to_delete); return 0; } -static int trie_flush_list(struct list_head *head) +/* Scan for the next leaf starting at the provided key value */ +static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key) { - struct fib_alias *fa, *fa_node; - int found = 0; + struct key_vector *pn, *n = *tn; + unsigned long cindex; - list_for_each_entry_safe(fa, fa_node, head, fa_list) { - struct fib_info *fi = fa->fa_info; + /* this loop is meant to try and find the key in the trie */ + do { + /* record parent and next child index */ + pn = n; + cindex = get_index(key, pn); - if (fi && (fi->fib_flags & RTNH_F_DEAD)) { - list_del_rcu(&fa->fa_list); - fib_release_info(fa->fa_info); - alias_free_mem_rcu(fa); - found++; - } - } - return found; -} + if (cindex >> pn->bits) + break; -static int trie_flush_leaf(struct tnode *l) -{ - int found = 0; - struct hlist_head *lih = &l->list; - struct hlist_node *tmp; - struct leaf_info *li = NULL; - unsigned char plen = KEYLENGTH; + /* descend into the next child */ + n = get_child_rcu(pn, cindex++); + if (!n) + break; + + /* guarantee forward progress on the keys */ + if (IS_LEAF(n) && (n->key >= key)) + goto found; + } while (IS_TNODE(n)); - hlist_for_each_entry_safe(li, tmp, lih, hlist) { - found += trie_flush_list(&li->falh); + /* this loop will search for the next leaf with a greater key */ + while (!IS_TRIE(pn)) { + /* if we exhausted the parent node we will need to climb */ + if (cindex >= (1ul << pn->bits)) { + t_key pkey = pn->key; - if (list_empty(&li->falh)) { - hlist_del_rcu(&li->hlist); - free_leaf_info(li); + pn = node_parent_rcu(pn); + cindex = get_index(pkey, pn) + 1; continue; } - plen = li->plen; - } + /* grab the next available node */ + n = get_child_rcu(pn, cindex++); + if (!n) + continue; - l->slen = KEYLENGTH - plen; + /* no need to compare keys since we bumped the index */ + if (IS_LEAF(n)) + goto found; - return found; + /* Rescan start scanning in new node */ + pn = n; + cindex = 0; + } + + *tn = pn; + return NULL; /* Root of trie */ +found: + /* if we are at the limit for keys just return NULL for the tnode */ + *tn = pn; + return n; } -/* - * Scan for the next right leaf starting at node p->child[idx] - * Since we have back pointer, no recursion necessary. - */ -static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c) +/* Caller must hold RTNL */ +void fib_table_flush_external(struct fib_table *tb) { - do { - unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0; + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *pn = t->kv; + unsigned long cindex = 1; + struct hlist_node *tmp; + struct fib_alias *fa; - while (idx < tnode_child_length(p)) { - c = tnode_get_child_rcu(p, idx++); - if (!c) - continue; + /* walk trie in reverse order */ + for (;;) { + struct key_vector *n; + + if (!(cindex--)) { + t_key pkey = pn->key; - if (IS_LEAF(c)) - return c; + /* cannot resize the trie vector */ + if (IS_TRIE(pn)) + break; + + /* no need to resize like in flush below */ + pn = node_parent(pn); + cindex = get_index(pkey, pn); - /* Rescan start scanning in new node */ - p = c; - idx = 0; + continue; } - /* Node empty, walk back up to parent */ - c = p; - } while ((p = node_parent_rcu(c)) != NULL); + /* grab the next available node */ + n = get_child(pn, cindex); + if (!n) + continue; - return NULL; /* Root of trie */ -} + if (IS_TNODE(n)) { + /* record pn and cindex for leaf walking */ + pn = n; + cindex = 1ul << n->bits; -static struct tnode *trie_firstleaf(struct trie *t) -{ - struct tnode *n = rcu_dereference_rtnl(t->trie); + continue; + } - if (!n) - return NULL; + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { + struct fib_info *fi = fa->fa_info; - if (IS_LEAF(n)) /* trie is just a leaf */ - return n; + if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL)) + continue; - return leaf_walk_rcu(n, NULL); + netdev_switch_fib_ipv4_del(n->key, + KEYLENGTH - fa->fa_slen, + fi, fa->fa_tos, + fa->fa_type, tb->tb_id); + } + } } -static struct tnode *trie_nextleaf(struct tnode *l) +/* Caller must hold RTNL. */ +int fib_table_flush(struct fib_table *tb) { - struct tnode *p = node_parent_rcu(l); + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *pn = t->kv; + unsigned long cindex = 1; + struct hlist_node *tmp; + struct fib_alias *fa; + int found = 0; - if (!p) - return NULL; /* trie with just one leaf */ + /* walk trie in reverse order */ + for (;;) { + unsigned char slen = 0; + struct key_vector *n; - return leaf_walk_rcu(p, l); -} + if (!(cindex--)) { + t_key pkey = pn->key; -static struct tnode *trie_leafindex(struct trie *t, int index) -{ - struct tnode *l = trie_firstleaf(t); + /* cannot resize the trie vector */ + if (IS_TRIE(pn)) + break; - while (l && index-- > 0) - l = trie_nextleaf(l); + /* resize completed node */ + pn = resize(t, pn); + cindex = get_index(pkey, pn); - return l; -} + continue; + } + /* grab the next available node */ + n = get_child(pn, cindex); + if (!n) + continue; -/* - * Caller must hold RTNL. - */ -int fib_table_flush(struct fib_table *tb) -{ - struct trie *t = (struct trie *) tb->tb_data; - struct tnode *l, *ll = NULL; - int found = 0; + if (IS_TNODE(n)) { + /* record pn and cindex for leaf walking */ + pn = n; + cindex = 1ul << n->bits; - for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) { - found += trie_flush_leaf(l); + continue; + } - if (ll) { - if (hlist_empty(&ll->list)) - trie_leaf_remove(t, ll); - else - leaf_pull_suffix(ll); + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { + struct fib_info *fi = fa->fa_info; + + if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) { + slen = fa->fa_slen; + continue; + } + + netdev_switch_fib_ipv4_del(n->key, + KEYLENGTH - fa->fa_slen, + fi, fa->fa_tos, + fa->fa_type, tb->tb_id); + hlist_del_rcu(&fa->fa_list); + fib_release_info(fa->fa_info); + alias_free_mem_rcu(fa); + found++; } - ll = l; - } + /* update leaf slen */ + n->slen = slen; - if (ll) { - if (hlist_empty(&ll->list)) - trie_leaf_remove(t, ll); - else - leaf_pull_suffix(ll); + if (hlist_empty(&n->leaf)) { + put_child_root(pn, n->key, NULL); + node_free(n); + } else { + leaf_pull_suffix(pn, n); + } } pr_debug("trie_flush found=%d\n", found); return found; } -void fib_free_table(struct fib_table *tb) +static void __trie_free_rcu(struct rcu_head *head) { + struct fib_table *tb = container_of(head, struct fib_table, rcu); #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie *t = (struct trie *)tb->tb_data; @@ -1731,20 +1715,23 @@ void fib_free_table(struct fib_table *tb) kfree(tb); } -static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, - struct fib_table *tb, - struct sk_buff *skb, struct netlink_callback *cb) +void fib_free_table(struct fib_table *tb) +{ + call_rcu(&tb->rcu, __trie_free_rcu); +} + +static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, + struct sk_buff *skb, struct netlink_callback *cb) { - int i, s_i; + __be32 xkey = htonl(l->key); struct fib_alias *fa; - __be32 xkey = htonl(key); + int i, s_i; - s_i = cb->args[5]; + s_i = cb->args[4]; i = 0; /* rcu_read_lock is hold by caller */ - - list_for_each_entry_rcu(fa, fah, fa_list) { + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { if (i < s_i) { i++; continue; @@ -1756,41 +1743,9 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, tb->tb_id, fa->fa_type, xkey, - plen, + KEYLENGTH - fa->fa_slen, fa->fa_tos, fa->fa_info, NLM_F_MULTI) < 0) { - cb->args[5] = i; - return -1; - } - i++; - } - cb->args[5] = i; - return skb->len; -} - -static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb, - struct sk_buff *skb, struct netlink_callback *cb) -{ - struct leaf_info *li; - int i, s_i; - - s_i = cb->args[4]; - i = 0; - - /* rcu_read_lock is hold by caller */ - hlist_for_each_entry_rcu(li, &l->list, hlist) { - if (i < s_i) { - i++; - continue; - } - - if (i > s_i) - cb->args[5] = 0; - - if (list_empty(&li->falh)) - continue; - - if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) { cb->args[4] = i; return -1; } @@ -1801,44 +1756,38 @@ static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb, return skb->len; } +/* rcu_read_lock needs to be hold by caller from readside */ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) { - struct tnode *l; - struct trie *t = (struct trie *) tb->tb_data; - t_key key = cb->args[2]; - int count = cb->args[3]; - - rcu_read_lock(); + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *l, *tp = t->kv; /* Dump starting at last key. * Note: 0.0.0.0/0 (ie default) is first key. */ - if (count == 0) - l = trie_firstleaf(t); - else { - /* Normally, continue from last key, but if that is missing - * fallback to using slow rescan - */ - l = fib_find_node(t, key); - if (!l) - l = trie_leafindex(t, count); - } + int count = cb->args[2]; + t_key key = cb->args[3]; - while (l) { - cb->args[2] = l->key; + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { - cb->args[3] = count; - rcu_read_unlock(); + cb->args[3] = key; + cb->args[2] = count; return -1; } ++count; - l = trie_nextleaf(l); + key = l->key + 1; + memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0])); + + /* stop loop if key wrapped back to 0 */ + if (key < l->key) + break; } - cb->args[3] = count; - rcu_read_unlock(); + + cb->args[3] = key; + cb->args[2] = count; return skb->len; } @@ -1850,19 +1799,16 @@ void __init fib_trie_init(void) 0, SLAB_PANIC, NULL); trie_leaf_kmem = kmem_cache_create("ip_fib_trie", - max(sizeof(struct tnode), - sizeof(struct leaf_info)), + LEAF_SIZE, 0, SLAB_PANIC, NULL); } - struct fib_table *fib_trie_table(u32 id) { struct fib_table *tb; struct trie *t; - tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie), - GFP_KERNEL); + tb = kzalloc(sizeof(*tb) + sizeof(struct trie), GFP_KERNEL); if (tb == NULL) return NULL; @@ -1871,7 +1817,8 @@ struct fib_table *fib_trie_table(u32 id) tb->tb_num_default = 0; t = (struct trie *) tb->tb_data; - RCU_INIT_POINTER(t->trie, NULL); + t->kv[0].pos = KEYLENGTH; + t->kv[0].slen = KEYLENGTH; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats = alloc_percpu(struct trie_use_stats); if (!t->stats) { @@ -1888,65 +1835,63 @@ struct fib_table *fib_trie_table(u32 id) struct fib_trie_iter { struct seq_net_private p; struct fib_table *tb; - struct tnode *tnode; + struct key_vector *tnode; unsigned int index; unsigned int depth; }; -static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter) +static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) { unsigned long cindex = iter->index; - struct tnode *tn = iter->tnode; - struct tnode *p; - - /* A single entry routing table */ - if (!tn) - return NULL; + struct key_vector *pn = iter->tnode; + t_key pkey; pr_debug("get_next iter={node=%p index=%d depth=%d}\n", iter->tnode, iter->index, iter->depth); -rescan: - while (cindex < tnode_child_length(tn)) { - struct tnode *n = tnode_get_child_rcu(tn, cindex); - if (n) { + while (!IS_TRIE(pn)) { + while (cindex < child_length(pn)) { + struct key_vector *n = get_child_rcu(pn, cindex++); + + if (!n) + continue; + if (IS_LEAF(n)) { - iter->tnode = tn; - iter->index = cindex + 1; + iter->tnode = pn; + iter->index = cindex; } else { /* push down one level */ iter->tnode = n; iter->index = 0; ++iter->depth; } + return n; } - ++cindex; - } - - /* Current node exhausted, pop back up */ - p = node_parent_rcu(tn); - if (p) { - cindex = get_index(tn->key, p) + 1; - tn = p; + /* Current node exhausted, pop back up */ + pkey = pn->key; + pn = node_parent_rcu(pn); + cindex = get_index(pkey, pn) + 1; --iter->depth; - goto rescan; } - /* got root? */ + /* record root node so further searches know we are done */ + iter->tnode = pn; + iter->index = 0; + return NULL; } -static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter, - struct trie *t) +static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter, + struct trie *t) { - struct tnode *n; + struct key_vector *n, *pn = t->kv; if (!t) return NULL; - n = rcu_dereference(t->trie); + n = rcu_dereference(pn->tnode[0]); if (!n) return NULL; @@ -1955,7 +1900,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter, iter->index = 0; iter->depth = 1; } else { - iter->tnode = NULL; + iter->tnode = pn; iter->index = 0; iter->depth = 0; } @@ -1965,7 +1910,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter, static void trie_collect_stats(struct trie *t, struct trie_stat *s) { - struct tnode *n; + struct key_vector *n; struct fib_trie_iter iter; memset(s, 0, sizeof(*s)); @@ -1973,20 +1918,20 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s) rcu_read_lock(); for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) { if (IS_LEAF(n)) { - struct leaf_info *li; + struct fib_alias *fa; s->leaves++; s->totdepth += iter.depth; if (iter.depth > s->maxdepth) s->maxdepth = iter.depth; - hlist_for_each_entry_rcu(li, &n->list, hlist) + hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) ++s->prefixes; } else { s->tnodes++; if (n->bits < MAX_STAT_DEPTH) s->nodesizes[n->bits]++; - s->nullpointers += n->empty_children; + s->nullpointers += tn_info(n)->empty_children; } } rcu_read_unlock(); @@ -2009,13 +1954,13 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat) seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth); seq_printf(seq, "\tLeaves: %u\n", stat->leaves); - bytes = sizeof(struct tnode) * stat->leaves; + bytes = LEAF_SIZE * stat->leaves; seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes); - bytes += sizeof(struct leaf_info) * stat->prefixes; + bytes += sizeof(struct fib_alias) * stat->prefixes; seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes); - bytes += sizeof(struct tnode) * stat->tnodes; + bytes += TNODE_SIZE(0) * stat->tnodes; max = MAX_STAT_DEPTH; while (max > 0 && stat->nodesizes[max-1] == 0) @@ -2030,7 +1975,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat) seq_putc(seq, '\n'); seq_printf(seq, "\tPointers: %u\n", pointers); - bytes += sizeof(struct tnode *) * pointers; + bytes += sizeof(struct key_vector *) * pointers; seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers); seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024); } @@ -2084,7 +2029,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "Basic info: size of leaf:" " %Zd bytes, size of tnode: %Zd bytes.\n", - sizeof(struct tnode), sizeof(struct tnode)); + LEAF_SIZE, TNODE_SIZE(0)); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; @@ -2123,7 +2068,7 @@ static const struct file_operations fib_triestat_fops = { .release = single_release_net, }; -static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos) +static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos) { struct fib_trie_iter *iter = seq->private; struct net *net = seq_file_net(seq); @@ -2135,7 +2080,7 @@ static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos) struct fib_table *tb; hlist_for_each_entry_rcu(tb, head, tb_hlist) { - struct tnode *n; + struct key_vector *n; for (n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); @@ -2164,7 +2109,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct fib_table *tb = iter->tb; struct hlist_node *tb_node; unsigned int h; - struct tnode *n; + struct key_vector *n; ++*pos; /* next node in same table */ @@ -2250,9 +2195,9 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t) static int fib_trie_seq_show(struct seq_file *seq, void *v) { const struct fib_trie_iter *iter = seq->private; - struct tnode *n = v; + struct key_vector *n = v; - if (!node_parent_rcu(n)) + if (IS_TRIE(node_parent_rcu(n))) fib_table_print(seq, iter->tb); if (IS_TNODE(n)) { @@ -2261,30 +2206,28 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) seq_indent(seq, iter->depth-1); seq_printf(seq, " +-- %pI4/%zu %u %u %u\n", &prf, KEYLENGTH - n->pos - n->bits, n->bits, - n->full_children, n->empty_children); + tn_info(n)->full_children, + tn_info(n)->empty_children); } else { - struct leaf_info *li; __be32 val = htonl(n->key); + struct fib_alias *fa; seq_indent(seq, iter->depth); seq_printf(seq, " |-- %pI4\n", &val); - hlist_for_each_entry_rcu(li, &n->list, hlist) { - struct fib_alias *fa; - - list_for_each_entry_rcu(fa, &li->falh, fa_list) { - char buf1[32], buf2[32]; - - seq_indent(seq, iter->depth+1); - seq_printf(seq, " /%d %s %s", li->plen, - rtn_scope(buf1, sizeof(buf1), - fa->fa_info->fib_scope), - rtn_type(buf2, sizeof(buf2), - fa->fa_type)); - if (fa->fa_tos) - seq_printf(seq, " tos=%d", fa->fa_tos); - seq_putc(seq, '\n'); - } + hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { + char buf1[32], buf2[32]; + + seq_indent(seq, iter->depth + 1); + seq_printf(seq, " /%zu %s %s", + KEYLENGTH - fa->fa_slen, + rtn_scope(buf1, sizeof(buf1), + fa->fa_info->fib_scope), + rtn_type(buf2, sizeof(buf2), + fa->fa_type)); + if (fa->fa_tos) + seq_printf(seq, " tos=%d", fa->fa_tos); + seq_putc(seq, '\n'); } } @@ -2314,31 +2257,47 @@ static const struct file_operations fib_trie_fops = { struct fib_route_iter { struct seq_net_private p; - struct trie *main_trie; + struct fib_table *main_tb; + struct key_vector *tnode; loff_t pos; t_key key; }; -static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos) +static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, + loff_t pos) { - struct tnode *l = NULL; - struct trie *t = iter->main_trie; + struct fib_table *tb = iter->main_tb; + struct key_vector *l, **tp = &iter->tnode; + struct trie *t; + t_key key; - /* use cache location of last found key */ - if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key))) + /* use cache location of next-to-find key */ + if (iter->pos > 0 && pos >= iter->pos) { pos -= iter->pos; - else { + key = iter->key; + } else { + t = (struct trie *)tb->tb_data; + iter->tnode = t->kv; iter->pos = 0; - l = trie_firstleaf(t); + key = 0; } - while (l && pos-- > 0) { + while ((l = leaf_walk_rcu(tp, key)) != NULL) { + key = l->key + 1; iter->pos++; - l = trie_nextleaf(l); + + if (pos-- <= 0) + break; + + l = NULL; + + /* handle unlikely case of a key wrap */ + if (!key) + break; } if (l) - iter->key = pos; /* remember it */ + iter->key = key; /* remember it */ else iter->pos = 0; /* forget it */ @@ -2350,37 +2309,46 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) { struct fib_route_iter *iter = seq->private; struct fib_table *tb; + struct trie *t; rcu_read_lock(); + tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN); if (!tb) return NULL; - iter->main_trie = (struct trie *) tb->tb_data; - if (*pos == 0) - return SEQ_START_TOKEN; - else - return fib_route_get_idx(iter, *pos - 1); + iter->main_tb = tb; + + if (*pos != 0) + return fib_route_get_idx(iter, *pos); + + t = (struct trie *)tb->tb_data; + iter->tnode = t->kv; + iter->pos = 0; + iter->key = 0; + + return SEQ_START_TOKEN; } static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_route_iter *iter = seq->private; - struct tnode *l = v; + struct key_vector *l = NULL; + t_key key = iter->key; ++*pos; - if (v == SEQ_START_TOKEN) { - iter->pos = 0; - l = trie_firstleaf(iter->main_trie); - } else { + + /* only allow key of 0 for start of sequence */ + if ((v == SEQ_START_TOKEN) || key) + l = leaf_walk_rcu(&iter->tnode, key); + + if (l) { + iter->key = l->key + 1; iter->pos++; - l = trie_nextleaf(l); + } else { + iter->pos = 0; } - if (l) - iter->key = l->key; - else - iter->pos = 0; return l; } @@ -2412,8 +2380,9 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info */ static int fib_route_seq_show(struct seq_file *seq, void *v) { - struct tnode *l = v; - struct leaf_info *li; + struct fib_alias *fa; + struct key_vector *l = v; + __be32 prefix; if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " @@ -2422,45 +2391,40 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) return 0; } - hlist_for_each_entry_rcu(li, &l->list, hlist) { - struct fib_alias *fa; - __be32 mask, prefix; + prefix = htonl(l->key); - mask = inet_make_mask(li->plen); - prefix = htonl(l->key); + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + const struct fib_info *fi = fa->fa_info; + __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen); + unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); - list_for_each_entry_rcu(fa, &li->falh, fa_list) { - const struct fib_info *fi = fa->fa_info; - unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); + if ((fa->fa_type == RTN_BROADCAST) || + (fa->fa_type == RTN_MULTICAST)) + continue; - if (fa->fa_type == RTN_BROADCAST - || fa->fa_type == RTN_MULTICAST) - continue; + seq_setwidth(seq, 127); + + if (fi) + seq_printf(seq, + "%s\t%08X\t%08X\t%04X\t%d\t%u\t" + "%d\t%08X\t%d\t%u\t%u", + fi->fib_dev ? fi->fib_dev->name : "*", + prefix, + fi->fib_nh->nh_gw, flags, 0, 0, + fi->fib_priority, + mask, + (fi->fib_advmss ? + fi->fib_advmss + 40 : 0), + fi->fib_window, + fi->fib_rtt >> 3); + else + seq_printf(seq, + "*\t%08X\t%08X\t%04X\t%d\t%u\t" + "%d\t%08X\t%d\t%u\t%u", + prefix, 0, flags, 0, 0, 0, + mask, 0, 0, 0); - seq_setwidth(seq, 127); - - if (fi) - seq_printf(seq, - "%s\t%08X\t%08X\t%04X\t%d\t%u\t" - "%d\t%08X\t%d\t%u\t%u", - fi->fib_dev ? fi->fib_dev->name : "*", - prefix, - fi->fib_nh->nh_gw, flags, 0, 0, - fi->fib_priority, - mask, - (fi->fib_advmss ? - fi->fib_advmss + 40 : 0), - fi->fib_window, - fi->fib_rtt >> 3); - else - seq_printf(seq, - "*\t%08X\t%08X\t%04X\t%d\t%u\t" - "%d\t%08X\t%d\t%u\t%u", - prefix, 0, flags, 0, 0, 0, - mask, 0, 0, 0); - - seq_pad(seq, '\n'); - } + seq_pad(seq, '\n'); } return 0; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 666cf36..5cb1ef4 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -97,6 +97,7 @@ #include <net/route.h> #include <net/sock.h> #include <net/checksum.h> +#include <net/inet_common.h> #include <linux/netfilter_ipv4.h> #ifdef CONFIG_IP_MROUTE #include <linux/mroute.h> @@ -1849,30 +1850,25 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc) pmc->sfcount[MCAST_EXCLUDE] = 1; } - -/* - * Join a multicast group - */ -int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) +int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) { - int err; __be32 addr = imr->imr_multiaddr.s_addr; - struct ip_mc_socklist *iml = NULL, *i; + struct ip_mc_socklist *iml, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); int ifindex; int count = 0; + int err; + + ASSERT_RTNL(); if (!ipv4_is_multicast(addr)) return -EINVAL; - rtnl_lock(); - in_dev = ip_mc_find_dev(net, imr); if (!in_dev) { - iml = NULL; err = -ENODEV; goto done; } @@ -1900,9 +1896,22 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) ip_mc_inc_group(in_dev, addr); err = 0; done: - rtnl_unlock(); return err; } +EXPORT_SYMBOL(__ip_mc_join_group); + +/* Join a multicast group + */ +int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) +{ + int ret; + + rtnl_lock(); + ret = __ip_mc_join_group(sk, imr); + rtnl_unlock(); + + return ret; +} EXPORT_SYMBOL(ip_mc_join_group); static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, @@ -1925,11 +1934,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, return err; } -/* - * Ask a socket to leave a group. - */ - -int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) +int __ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; @@ -1940,7 +1945,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) u32 ifindex; int ret = -EADDRNOTAVAIL; - rtnl_lock(); + ASSERT_RTNL(); + in_dev = ip_mc_find_dev(net, imr); if (!in_dev) { ret = -ENODEV; @@ -1964,14 +1970,25 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) *imlp = iml->next_rcu; ip_mc_dec_group(in_dev, group); - rtnl_unlock(); + /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); return 0; } out: + return ret; +} +EXPORT_SYMBOL(__ip_mc_leave_group); + +int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) +{ + int ret; + + rtnl_lock(); + ret = __ip_mc_leave_group(sk, imr); rtnl_unlock(); + return ret; } EXPORT_SYMBOL(ip_mc_leave_group); @@ -2724,6 +2741,7 @@ static const struct file_operations igmp_mcf_seq_fops = { static int __net_init igmp_net_init(struct net *net) { struct proc_dir_entry *pde; + int err; pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops); if (!pde) @@ -2732,8 +2750,18 @@ static int __net_init igmp_net_init(struct net *net) &igmp_mcf_seq_fops); if (!pde) goto out_mcfilter; + err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET, + SOCK_DGRAM, 0, net); + if (err < 0) { + pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n", + err); + goto out_sock; + } + return 0; +out_sock: + remove_proc_entry("mcfilter", net->proc_net); out_mcfilter: remove_proc_entry("igmp", net->proc_net); out_igmp: @@ -2744,6 +2772,7 @@ static void __net_exit igmp_net_exit(struct net *net) { remove_proc_entry("mcfilter", net->proc_net); remove_proc_entry("igmp", net->proc_net); + inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk); } static struct pernet_operations igmp_net_ops = { diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 81751f1..0c974d3 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -508,7 +508,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) } entry.sport = inet->inet_num; entry.dport = ntohs(inet->inet_dport); - entry.userlocks = sk->sk_userlocks; + entry.userlocks = (sk->sk_state != TCP_TIME_WAIT) ? sk->sk_userlocks : 0; return inet_diag_bc_run(bc, &entry); } @@ -642,37 +642,44 @@ static int inet_csk_diag_dump(struct sock *sk, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } +static void twsk_build_assert(void) +{ + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) != + offsetof(struct sock, sk_family)); + + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) != + offsetof(struct inet_sock, inet_num)); + + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) != + offsetof(struct inet_sock, inet_dport)); + + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) != + offsetof(struct inet_sock, inet_rcv_saddr)); + + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) != + offsetof(struct inet_sock, inet_daddr)); + +#if IS_ENABLED(CONFIG_IPV6) + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) != + offsetof(struct sock, sk_v6_rcv_saddr)); + + BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) != + offsetof(struct sock, sk_v6_daddr)); +#endif +} + static int inet_twsk_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, struct inet_diag_req_v2 *r, const struct nlattr *bc) { - struct inet_timewait_sock *tw = inet_twsk(sk); + twsk_build_assert(); - if (bc != NULL) { - struct inet_diag_entry entry; - - entry.family = tw->tw_family; -#if IS_ENABLED(CONFIG_IPV6) - if (tw->tw_family == AF_INET6) { - entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32; - entry.daddr = tw->tw_v6_daddr.s6_addr32; - } else -#endif - { - entry.saddr = &tw->tw_rcv_saddr; - entry.daddr = &tw->tw_daddr; - } - entry.sport = tw->tw_num; - entry.dport = ntohs(tw->tw_dport); - entry.userlocks = 0; - - if (!inet_diag_bc_run(bc, &entry)) - return 0; - } + if (!inet_diag_bc_sk(bc, sk)) + return 0; - return inet_twsk_diag_fill(tw, skb, r, + return inet_twsk_diag_fill(inet_twsk(sk), skb, r, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 59f883d..fb20f36 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -36,24 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT If unsure, say Y. -config NF_LOG_ARP - tristate "ARP packet logging" - default m if NETFILTER_ADVANCED=n - select NF_LOG_COMMON - -config NF_LOG_IPV4 - tristate "IPv4 packet logging" - default m if NETFILTER_ADVANCED=n - select NF_LOG_COMMON +if NF_TABLES config NF_TABLES_IPV4 - depends on NF_TABLES tristate "IPv4 nf_tables support" help This option enables the IPv4 support for nf_tables. +if NF_TABLES_IPV4 + config NFT_CHAIN_ROUTE_IPV4 - depends on NF_TABLES_IPV4 tristate "IPv4 nf_tables route chain support" help This option enables the "route" chain for IPv4 in nf_tables. This @@ -61,22 +53,34 @@ config NFT_CHAIN_ROUTE_IPV4 fields such as the source, destination, type of service and the packet mark. -config NF_REJECT_IPV4 - tristate "IPv4 packet rejection" - default m if NETFILTER_ADVANCED=n - config NFT_REJECT_IPV4 - depends on NF_TABLES_IPV4 select NF_REJECT_IPV4 default NFT_REJECT tristate +endif # NF_TABLES_IPV4 + config NF_TABLES_ARP - depends on NF_TABLES tristate "ARP nf_tables support" help This option enables the ARP support for nf_tables. +endif # NF_TABLES + +config NF_LOG_ARP + tristate "ARP packet logging" + default m if NETFILTER_ADVANCED=n + select NF_LOG_COMMON + +config NF_LOG_IPV4 + tristate "IPv4 packet logging" + default m if NETFILTER_ADVANCED=n + select NF_LOG_COMMON + +config NF_REJECT_IPV4 + tristate "IPv4 packet rejection" + default m if NETFILTER_ADVANCED=n + config NF_NAT_IPV4 tristate "IPv4 NAT" depends on NF_CONNTRACK_IPV4 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index e90f83a..f75e9df 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -418,6 +418,13 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); + + if (!par->net->xt.clusterip_deprecated_warning) { + pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " + "use xt_cluster instead\n"); + par->net->xt.clusterip_deprecated_warning = true; + } + return ret; } diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 8f48f55..87907d4 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -34,31 +34,32 @@ static unsigned int reject_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct ipt_reject_info *reject = par->targinfo; + int hook = par->hooknum; switch (reject->with) { case IPT_ICMP_NET_UNREACHABLE: - nf_send_unreach(skb, ICMP_NET_UNREACH); + nf_send_unreach(skb, ICMP_NET_UNREACH, hook); break; case IPT_ICMP_HOST_UNREACHABLE: - nf_send_unreach(skb, ICMP_HOST_UNREACH); + nf_send_unreach(skb, ICMP_HOST_UNREACH, hook); break; case IPT_ICMP_PROT_UNREACHABLE: - nf_send_unreach(skb, ICMP_PROT_UNREACH); + nf_send_unreach(skb, ICMP_PROT_UNREACH, hook); break; case IPT_ICMP_PORT_UNREACHABLE: - nf_send_unreach(skb, ICMP_PORT_UNREACH); + nf_send_unreach(skb, ICMP_PORT_UNREACH, hook); break; case IPT_ICMP_NET_PROHIBITED: - nf_send_unreach(skb, ICMP_NET_ANO); + nf_send_unreach(skb, ICMP_NET_ANO, hook); break; case IPT_ICMP_HOST_PROHIBITED: - nf_send_unreach(skb, ICMP_HOST_ANO); + nf_send_unreach(skb, ICMP_HOST_ANO, hook); break; case IPT_ICMP_ADMIN_PROHIBITED: - nf_send_unreach(skb, ICMP_PKT_FILTERED); + nf_send_unreach(skb, ICMP_PKT_FILTERED, hook); break; case IPT_TCP_RESET: - nf_send_reset(skb, par->hooknum); + nf_send_reset(skb, hook); case IPT_ICMP_ECHOREPLY: /* Doesn't happen. */ break; diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 536da7b..b7405eb 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -164,4 +164,27 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) } EXPORT_SYMBOL_GPL(nf_send_reset); +void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) +{ + struct iphdr *iph = ip_hdr(skb_in); + u8 proto; + + if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET)) + return; + + if (skb_csum_unnecessary(skb_in)) { + icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); + return; + } + + if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP) + proto = iph->protocol; + else + proto = 0; + + if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0) + icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); +} +EXPORT_SYMBOL_GPL(nf_send_unreach); + MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index d729542..16a5d4d 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -27,7 +27,8 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr, switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nf_send_unreach(pkt->skb, priv->icmp_code); + nf_send_unreach(pkt->skb, priv->icmp_code, + pkt->ops->hooknum); break; case NFT_REJECT_TCP_RST: nf_send_reset(pkt->skb, pkt->ops->hooknum); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 208d543..fd88f86 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -692,8 +692,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, } EXPORT_SYMBOL_GPL(ping_common_sendmsg); -static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len) +static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct net *net = sock_net(sk); struct flowi4 fl4; @@ -849,8 +848,8 @@ do_confirm: goto out; } -int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len) +int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index f027a70..923cf53 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -481,8 +481,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd, return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } -static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len) +static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct ipcm_cookie ipc; @@ -709,8 +708,8 @@ out: return ret; * we return it, otherwise we block. */ -static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len) +static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ad50643..649c8a3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -152,7 +152,6 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, static struct dst_ops ipv4_dst_ops = { .family = AF_INET, - .protocol = cpu_to_be16(ETH_P_IP), .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, .mtu = ipv4_mtu, @@ -2225,7 +2224,6 @@ static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, - .protocol = cpu_to_be16(ETH_P_IP), .check = ipv4_blackhole_dst_check, .mtu = ipv4_blackhole_mtu, .default_advmss = ipv4_default_advmss, diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d151539..fdf8991 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -883,6 +883,20 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "tcp_probe_threshold", + .data = &init_net.ipv4.sysctl_tcp_probe_threshold, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "tcp_probe_interval", + .data = &init_net.ipv4.sysctl_tcp_probe_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 995a225..62f3842 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1060,8 +1060,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, return err; } -int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t size) +int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -1539,8 +1538,8 @@ EXPORT_SYMBOL(tcp_read_sock); * Probably, code can be easily improved even more. */ -int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, int *addr_len) +int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); int copied = 0; @@ -3001,12 +3000,11 @@ static void __init tcp_init_mem(void) void __init tcp_init(void) { - struct sk_buff *skb = NULL; unsigned long limit; int max_rshare, max_wshare, cnt; unsigned int i; - BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); + sock_skb_cb_check_size(sizeof(struct tcp_skb_cb)); percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index d694088..d4c3a5e 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -83,7 +83,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca) ret = -EEXIST; } else { list_add_tail_rcu(&ca->list, &tcp_cong_list); - pr_info("%s registered\n", ca->name); + pr_debug("%s registered\n", ca->name); } spin_unlock(&tcp_cong_list_lock); diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index ea82fd4..fe77417 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -221,7 +221,6 @@ static bool tcp_fastopen_create_child(struct sock *sk, WARN_ON(req->sk == NULL); return true; } -EXPORT_SYMBOL(tcp_fastopen_create_child); static bool tcp_fastopen_queue_check(struct sock *sk) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a2dfed..f0c6fc3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2460,6 +2460,8 @@ static int __net_init tcp_sk_init(struct net *net) } net->ipv4.sysctl_tcp_ecn = 2; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; + net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; + net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; return 0; fail: diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 9d7930b..3f7c2fc 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -29,8 +29,8 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, } } -struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, + netdev_features_t features) { if (!pskb_may_pull(skb, sizeof(struct tcphdr))) return ERR_PTR(-EINVAL); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a2a796c..5a73ad5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1354,6 +1354,8 @@ void tcp_mtup_init(struct sock *sk) icsk->icsk_af_ops->net_header_len; icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); icsk->icsk_mtup.probe_size = 0; + if (icsk->icsk_mtup.enabled) + icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; } EXPORT_SYMBOL(tcp_mtup_init); @@ -1752,20 +1754,23 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, bool *is_cwnd_limited, u32 max_segs) { - struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); - u32 send_win, cong_win, limit, in_flight; + u32 age, send_win, cong_win, limit, in_flight; + struct tcp_sock *tp = tcp_sk(sk); + struct skb_mstamp now; + struct sk_buff *head; int win_divisor; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto send_now; - if (icsk->icsk_ca_state != TCP_CA_Open) + if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR))) goto send_now; - /* Defer for less than two clock ticks. */ - if (tp->tso_deferred && - (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) + /* Avoid bursty behavior by allowing defer + * only if the last write was recent. + */ + if ((s32)(tcp_time_stamp - tp->lsndtime) > 0) goto send_now; in_flight = tcp_packets_in_flight(tp); @@ -1807,11 +1812,14 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, goto send_now; } - /* Ok, it looks like it is advisable to defer. - * Do not rearm the timer if already set to not break TCP ACK clocking. - */ - if (!tp->tso_deferred) - tp->tso_deferred = 1 | (jiffies << 1); + head = tcp_write_queue_head(sk); + skb_mstamp_get(&now); + age = skb_mstamp_us_delta(&now, &head->skb_mstamp); + /* If next ACK is likely to come too late (half srtt), do not defer */ + if (age < (tp->srtt_us >> 4)) + goto send_now; + + /* Ok, it looks like it is advisable to defer. */ if (cong_win < send_win && cong_win < skb->len) *is_cwnd_limited = true; @@ -1819,10 +1827,34 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, return true; send_now: - tp->tso_deferred = 0; return false; } +static inline void tcp_mtu_check_reprobe(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + struct net *net = sock_net(sk); + u32 interval; + s32 delta; + + interval = net->ipv4.sysctl_tcp_probe_interval; + delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp; + if (unlikely(delta >= interval * HZ)) { + int mss = tcp_current_mss(sk); + + /* Update current search range */ + icsk->icsk_mtup.probe_size = 0; + icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + + sizeof(struct tcphdr) + + icsk->icsk_af_ops->net_header_len; + icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); + + /* Update probe time stamp */ + icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; + } +} + /* Create a new MTU probe if we are ready. * MTU probe is regularly attempting to increase the path MTU by * deliberately sending larger packets. This discovers routing @@ -1837,11 +1869,13 @@ static int tcp_mtu_probe(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb, *nskb, *next; + struct net *net = sock_net(sk); int len; int probe_size; int size_needed; int copy; int mss_now; + int interval; /* Not currently probing/verifying, * not in recovery, @@ -1854,12 +1888,25 @@ static int tcp_mtu_probe(struct sock *sk) tp->rx_opt.num_sacks || tp->rx_opt.dsack) return -1; - /* Very simple search strategy: just double the MSS. */ + /* Use binary search for probe_size between tcp_mss_base, + * and current mss_clamp. if (search_high - search_low) + * smaller than a threshold, backoff from probing. + */ mss_now = tcp_current_mss(sk); - probe_size = 2 * tp->mss_cache; + probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + + icsk->icsk_mtup.search_low) >> 1); size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; - if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { - /* TODO: set timer for probe_converge_event */ + interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; + /* When misfortune happens, we are reprobing actively, + * and then reprobe timer has expired. We stick with current + * probing process by not resetting search range to its orignal. + */ + if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || + interval < net->ipv4.sysctl_tcp_probe_threshold) { + /* Check whether enough time has elaplased for + * another round of probing. + */ + tcp_mtu_check_reprobe(sk); return -1; } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0732b78..1550593 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -107,6 +107,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) if (net->ipv4.sysctl_tcp_mtu_probing) { if (!icsk->icsk_mtup.enabled) { icsk->icsk_mtup.enabled = 1; + icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } else { struct net *net = sock_net(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 97ef1f8b..f27556e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -873,8 +873,7 @@ out: } EXPORT_SYMBOL(udp_push_pending_frames); -int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len) +int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); @@ -1136,7 +1135,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, * sendpage interface can't pass. * This will succeed only when the socket is connected. */ - ret = udp_sendmsg(NULL, sk, &msg, 0); + ret = udp_sendmsg(sk, &msg, 0); if (ret < 0) return ret; } @@ -1254,8 +1253,8 @@ EXPORT_SYMBOL(udp_ioctl); * return it, otherwise we block. */ -int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len) +int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); @@ -2525,6 +2524,16 @@ void __init udp_table_init(struct udp_table *table, const char *name) } } +u32 udp_flow_hashrnd(void) +{ + static u32 hashrnd __read_mostly; + + net_get_random_once(&hashrnd, sizeof(hashrnd)); + + return hashrnd; +} +EXPORT_SYMBOL(udp_flow_hashrnd); + void __init udp_init(void) { unsigned long limit; diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index f3c2789..7e0fe4b 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -21,8 +21,8 @@ int compat_udp_setsockopt(struct sock *sk, int level, int optname, int compat_udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); #endif -int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len); +int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 6156f68..c224c85 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -232,7 +232,6 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static struct dst_ops xfrm4_dst_ops = { .family = AF_INET, - .protocol = cpu_to_be16(ETH_P_IP), .gc = xfrm4_garbage_collect, .update_pmtu = xfrm4_update_pmtu, .redirect = xfrm4_redirect, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b603002..88d2cf0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2464,6 +2464,23 @@ err_exit: return err; } +static int ipv6_mc_config(struct sock *sk, bool join, + const struct in6_addr *addr, int ifindex) +{ + int ret; + + ASSERT_RTNL(); + + lock_sock(sk); + if (join) + ret = __ipv6_sock_mc_join(sk, ifindex, addr); + else + ret = __ipv6_sock_mc_drop(sk, ifindex, addr); + release_sock(sk); + + return ret; +} + /* * Manual configuration of address on an interface */ @@ -2476,10 +2493,10 @@ static int inet6_addr_add(struct net *net, int ifindex, struct inet6_ifaddr *ifp; struct inet6_dev *idev; struct net_device *dev; + unsigned long timeout; + clock_t expires; int scope; u32 flags; - clock_t expires; - unsigned long timeout; ASSERT_RTNL(); @@ -2501,6 +2518,14 @@ static int inet6_addr_add(struct net *net, int ifindex, if (IS_ERR(idev)) return PTR_ERR(idev); + if (ifa_flags & IFA_F_MCAUTOJOIN) { + int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk, + true, pfx, ifindex); + + if (ret < 0) + return ret; + } + scope = ipv6_addr_scope(pfx); timeout = addrconf_timeout_fixup(valid_lft, HZ); @@ -2542,6 +2567,9 @@ static int inet6_addr_add(struct net *net, int ifindex, in6_ifa_put(ifp); addrconf_verify_rtnl(); return 0; + } else if (ifa_flags & IFA_F_MCAUTOJOIN) { + ipv6_mc_config(net->ipv6.mc_autojoin_sk, + false, pfx, ifindex); } return PTR_ERR(ifp); @@ -2578,6 +2606,10 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, jiffies); ipv6_del_addr(ifp); addrconf_verify_rtnl(); + if (ipv6_addr_is_multicast(pfx)) { + ipv6_mc_config(net->ipv6.mc_autojoin_sk, + false, pfx, dev->ifindex); + } return 0; } } @@ -3945,7 +3977,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) /* We ignore other flags so far. */ ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | - IFA_F_NOPREFIXROUTE; + IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN; ifa = ipv6_get_ifaddr(net, pfx, dev, 1); if (ifa == NULL) { diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e8c4400..6bafcc2 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -824,7 +824,7 @@ static int __init inet6_init(void) struct list_head *r; int err = 0; - BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); + sock_skb_cb_check_size(sizeof(struct inet6_skb_parm)); /* Register the socket-side information for inet6_create. */ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 266a264..88300d4 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -64,12 +64,6 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("ip6tnl"); MODULE_ALIAS_NETDEV("ip6tnl0"); -#ifdef IP6_TNL_DEBUG -#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__) -#else -#define IP6_TNL_TRACE(x...) do {;} while(0) -#endif - #define HASH_SIZE_SHIFT 5 #define HASH_SIZE (1 << HASH_SIZE_SHIFT) diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 5ce107c..1dd1fed 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -132,7 +132,7 @@ static int unsolicited_report_interval(struct inet6_dev *idev) return iv > 0 ? iv : 1; } -int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) +int __ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct net_device *dev = NULL; struct ipv6_mc_socklist *mc_lst; @@ -140,6 +140,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) struct net *net = sock_net(sk); int err; + ASSERT_RTNL(); + if (!ipv6_addr_is_multicast(addr)) return -EINVAL; @@ -161,7 +163,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) mc_lst->next = NULL; mc_lst->addr = *addr; - rtnl_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); @@ -173,7 +174,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) dev = __dev_get_by_index(net, ifindex); if (dev == NULL) { - rtnl_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return -ENODEV; } @@ -190,7 +190,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) err = ipv6_dev_mc_inc(dev, addr); if (err) { - rtnl_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return err; } @@ -198,25 +197,37 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) mc_lst->next = np->ipv6_mc_list; rcu_assign_pointer(np->ipv6_mc_list, mc_lst); + return 0; +} +EXPORT_SYMBOL(__ipv6_sock_mc_join); + +int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) +{ + int ret; + + rtnl_lock(); + ret = __ipv6_sock_mc_join(sk, ifindex, addr); rtnl_unlock(); - return 0; + return ret; } +EXPORT_SYMBOL(ipv6_sock_mc_join); /* * socket leave on multicast group */ -int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) +int __ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc_lst; struct ipv6_mc_socklist __rcu **lnk; struct net *net = sock_net(sk); + ASSERT_RTNL(); + if (!ipv6_addr_is_multicast(addr)) return -EINVAL; - rtnl_lock(); for (lnk = &np->ipv6_mc_list; (mc_lst = rtnl_dereference(*lnk)) != NULL; lnk = &mc_lst->next) { @@ -235,17 +246,28 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); - rtnl_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); return 0; } } - rtnl_unlock(); return -EADDRNOTAVAIL; } +EXPORT_SYMBOL(__ipv6_sock_mc_drop); + +int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) +{ + int ret; + + rtnl_lock(); + ret = __ipv6_sock_mc_drop(sk, ifindex, addr); + rtnl_unlock(); + + return ret; +} +EXPORT_SYMBOL(ipv6_sock_mc_drop); /* called with rcu_read_lock() */ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, @@ -2907,20 +2929,32 @@ static int __net_init igmp6_net_init(struct net *net) inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; + err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6, + SOCK_RAW, IPPROTO_ICMPV6, net); + if (err < 0) { + pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n", + err); + goto out_sock_create; + } + err = igmp6_proc_init(net); if (err) - goto out_sock_create; -out: - return err; + goto out_sock_create_autojoin; + return 0; + +out_sock_create_autojoin: + inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk); out_sock_create: inet_ctl_sock_destroy(net->ipv6.igmp_sk); - goto out; +out: + return err; } static void __net_exit igmp6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.igmp_sk); + inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk); igmp6_proc_exit(net); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 471ed24..247ad7c 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -84,6 +84,7 @@ do { \ static u32 ndisc_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); +static bool ndisc_key_eq(const struct neighbour *neigh, const void *pkey); static int ndisc_constructor(struct neighbour *neigh); static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb); @@ -117,7 +118,9 @@ static const struct neigh_ops ndisc_direct_ops = { struct neigh_table nd_tbl = { .family = AF_INET6, .key_len = sizeof(struct in6_addr), + .protocol = cpu_to_be16(ETH_P_IPV6), .hash = ndisc_hash, + .key_eq = ndisc_key_eq, .constructor = ndisc_constructor, .pconstructor = pndisc_constructor, .pdestructor = pndisc_destructor, @@ -294,6 +297,11 @@ static u32 ndisc_hash(const void *pkey, return ndisc_hashfn(pkey, dev, hash_rnd); } +static bool ndisc_key_eq(const struct neighbour *n, const void *pkey) +{ + return neigh_key_eq128(n, pkey); +} + static int ndisc_constructor(struct neighbour *neigh) { struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key; diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index a069822..ca69983 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -25,14 +25,16 @@ config NF_CONNTRACK_IPV6 To compile it as a module, choose M here. If unsure, say N. +if NF_TABLES + config NF_TABLES_IPV6 - depends on NF_TABLES tristate "IPv6 nf_tables support" help This option enables the IPv6 support for nf_tables. +if NF_TABLES_IPV6 + config NFT_CHAIN_ROUTE_IPV6 - depends on NF_TABLES_IPV6 tristate "IPv6 nf_tables route chain support" help This option enables the "route" chain for IPv6 in nf_tables. This @@ -40,16 +42,18 @@ config NFT_CHAIN_ROUTE_IPV6 fields such as the source, destination, flowlabel, hop-limit and the packet mark. -config NF_REJECT_IPV6 - tristate "IPv6 packet rejection" - default m if NETFILTER_ADVANCED=n - config NFT_REJECT_IPV6 - depends on NF_TABLES_IPV6 select NF_REJECT_IPV6 default NFT_REJECT tristate +endif # NF_TABLES_IPV6 +endif # NF_TABLES + +config NF_REJECT_IPV6 + tristate "IPv6 packet rejection" + default m if NETFILTER_ADVANCED=n + config NF_LOG_IPV6 tristate "IPv6 packet logging" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index d05b364..68e0bb4 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -208,4 +208,39 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) } EXPORT_SYMBOL_GPL(nf_send_reset6); +static bool reject6_csum_ok(struct sk_buff *skb, int hook) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int thoff; + __be16 fo; + u8 proto; + + if (skb->csum_bad) + return false; + + if (skb_csum_unnecessary(skb)) + return true; + + proto = ip6h->nexthdr; + thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); + + if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) + return false; + + return nf_ip6_checksum(skb, hook, thoff, proto) == 0; +} + +void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, + unsigned char code, unsigned int hooknum) +{ + if (!reject6_csum_ok(skb_in, hooknum)) + return; + + if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) + skb_in->dev = net->loopback_dev; + + icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); +} +EXPORT_SYMBOL_GPL(nf_send_unreach6); + MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index a2dfff6..263a516 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -77,8 +77,7 @@ static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, return 0; } -int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len) +int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index dae7f1a..a5287b3 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -32,7 +32,7 @@ #include <linux/netfilter_ipv6.h> #include <linux/skbuff.h> #include <linux/compat.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/ioctls.h> #include <net/net_namespace.h> @@ -456,9 +456,8 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) * we return it, otherwise we block. */ -static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) +static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); @@ -730,8 +729,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } -static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len) +static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4688bd4..06fa819 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -194,7 +194,6 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, - .protocol = cpu_to_be16(ETH_P_IPV6), .gc = ip6_dst_gc, .gc_thresh = 1024, .check = ip6_dst_check, @@ -236,7 +235,6 @@ static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst, static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, - .protocol = cpu_to_be16(ETH_P_IPV6), .destroy = ip6_dst_destroy, .check = ip6_dst_check, .mtu = ip6_blackhole_mtu, diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index c1ab771..d883c92 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -41,8 +41,8 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff) return tcp_gro_complete(skb); } -struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, + netdev_features_t features) { struct tcphdr *th; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d048d46..70568a4 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -391,8 +391,7 @@ EXPORT_SYMBOL_GPL(udp6_lib_lookup); * return it, otherwise we block. */ -int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, +int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); @@ -1101,8 +1100,7 @@ out: return err; } -int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len) +int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct udp_sock *up = udp_sk(sk); @@ -1164,12 +1162,12 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, do_udp_sendmsg: if (__ipv6_only_sock(sk)) return -ENETUNREACH; - return udp_sendmsg(iocb, sk, msg, len); + return udp_sendmsg(sk, msg, len); } } if (up->pending == AF_INET) - return udp_sendmsg(iocb, sk, msg, len); + return udp_sendmsg(sk, msg, len); /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index c779c3c..0682c03 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -23,10 +23,9 @@ int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); #endif -int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len); -int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len); +int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); +int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len); int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); void udpv6_destroy_sock(struct sock *sk); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 48bf5a0..8ddf2b5 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -292,7 +292,6 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static struct dst_ops xfrm6_dst_ops = { .family = AF_INET6, - .protocol = cpu_to_be16(ETH_P_IPV6), .gc = xfrm6_garbage_collect, .update_pmtu = xfrm6_update_pmtu, .redirect = xfrm6_redirect, diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index f11ad1d..4ea5d74 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1688,8 +1688,7 @@ out: return rc; } -static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); @@ -1754,8 +1753,8 @@ out: } -static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 568edc7..ee0ea25 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1256,14 +1256,13 @@ static int irda_release(struct socket *sock) } /* - * Function irda_sendmsg (iocb, sock, msg, len) + * Function irda_sendmsg (sock, msg, len) * * Send message down to TinyTP. This function is used for both STREAM and * SEQPACK services. This is possible since it forces the client to * fragment the message if necessary */ -static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int irda_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; @@ -1348,13 +1347,13 @@ out: } /* - * Function irda_recvmsg_dgram (iocb, sock, msg, size, flags) + * Function irda_recvmsg_dgram (sock, msg, size, flags) * * Try to receive message and copy it to user. The frame is discarded * after being read, regardless of how much the user actually read */ -static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); @@ -1398,10 +1397,10 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, } /* - * Function irda_recvmsg_stream (iocb, sock, msg, size, flags) + * Function irda_recvmsg_stream (sock, msg, size, flags) */ -static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); @@ -1515,14 +1514,14 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, } /* - * Function irda_sendmsg_dgram (iocb, sock, msg, len) + * Function irda_sendmsg_dgram (sock, msg, len) * * Send message down to TinyTP for the unreliable sequenced * packet service... * */ -static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; @@ -1594,14 +1593,14 @@ out: } /* - * Function irda_sendmsg_ultra (iocb, sock, msg, len) + * Function irda_sendmsg_ultra (sock, msg, len) * * Send message down to IrLMP for the unreliable Ultra * packet service... */ #ifdef CONFIG_IRDA_ULTRA -static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 2e9953b..94b4c89 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1026,8 +1026,8 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, (void *) prmdata, 8); } -static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); @@ -1317,8 +1317,8 @@ static void iucv_process_message_q(struct sock *sk) } } -static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; diff --git a/net/key/af_key.c b/net/key/af_key.c index f8ac939..9255fd9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3588,8 +3588,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, } #endif -static int pfkey_sendmsg(struct kiocb *kiocb, - struct socket *sock, struct msghdr *msg, size_t len) +static int pfkey_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb = NULL; @@ -3630,8 +3629,7 @@ out: return err ? : len; } -static int pfkey_recvmsg(struct kiocb *kiocb, - struct socket *sock, struct msghdr *msg, size_t len, +static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 781b3a2..4b55287 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -74,7 +74,7 @@ static int l2tp_eth_dev_init(struct net_device *dev) priv->dev = dev; eth_hw_addr_random(dev); - memset(&dev->broadcast[0], 0xff, 6); + eth_broadcast_addr(dev->broadcast); dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock; return 0; } diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 05dfc8a..7964993 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -385,7 +385,7 @@ drop: /* Userspace will call sendmsg() on the tunnel socket to send L2TP * control frames. */ -static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) +static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct sk_buff *skb; int rc; @@ -506,7 +506,7 @@ no_route: goto out; } -static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, +static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 8611f1b..d1ded37 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -480,8 +480,7 @@ out: /* Userspace will call sendmsg() on the tunnel socket to send L2TP * control frames. */ -static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len) +static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); @@ -643,9 +642,8 @@ do_confirm: goto done; } -static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index cc7a828..e9b0dec 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -185,9 +185,8 @@ static int pppol2tp_recv_payload_hook(struct sk_buff *skb) /* Receive message. This is the recvmsg for the PPPoL2TP socket. */ -static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, - int flags) +static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) { int err; struct sk_buff *skb; @@ -295,7 +294,7 @@ static void pppol2tp_session_sock_put(struct l2tp_session *session) * when a user application does a sendmsg() on the session socket. L2TP and * PPP headers must be inserted into the user's data. */ -static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, +static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { static const unsigned char ppph[2] = { 0xff, 0x03 }; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 2c0b83c..17a8dff 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -704,8 +704,8 @@ out: * Copy received data to the socket user. * Returns non-negative upon success, negative otherwise. */ -static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { DECLARE_SOCKADDR(struct sockaddr_llc *, uaddr, msg->msg_name); const int nonblock = flags & MSG_DONTWAIT; @@ -878,8 +878,7 @@ copy_uaddr: * Transmit data provided by the socket user. * Returns non-negative upon success, negative otherwise. */ -static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index dd4ff36..74f509c 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1488,7 +1488,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, if (next_hop_sta) memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN); else - memset(next_hop, 0, ETH_ALEN); + eth_zero_addr(next_hop); memset(pinfo, 0, sizeof(*pinfo)); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b606b53..f9b0758 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1742,7 +1742,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) ieee80211_ibss_disconnect(sdata); ifibss->ssid_len = 0; - memset(ifibss->bssid, 0, ETH_ALEN); + eth_zero_addr(ifibss->bssid); /* remove beacon */ kfree(sdata->u.ibss.ie); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 0c8b2a7..49a44bc 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -520,7 +520,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, } else { *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ - memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ + eth_zero_addr(hdr->addr1); /* RA is resolved later */ memcpy(hdr->addr2, meshsa, ETH_ALEN); memcpy(hdr->addr3, meshda, ETH_ALEN); memcpy(hdr->addr4, meshsa, ETH_ALEN); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 10ac632..9f6f356 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2033,7 +2033,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_flush_queues(local, sdata, false); /* clear bssid only after building the needed mgmt frames */ - memset(ifmgd->bssid, 0, ETH_ALEN); + eth_zero_addr(ifmgd->bssid); /* remove AP and TDLS peers */ sta_info_flush(sdata); @@ -2464,7 +2464,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.timer); sta_info_destroy_addr(sdata, auth_data->bss->bssid); - memset(sdata->u.mgd.bssid, 0, ETH_ALEN); + eth_zero_addr(sdata->u.mgd.bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; mutex_lock(&sdata->local->mtx); @@ -2777,7 +2777,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.timer); sta_info_destroy_addr(sdata, assoc_data->bss->bssid); - memset(sdata->u.mgd.bssid, 0, ETH_ALEN); + eth_zero_addr(sdata->u.mgd.bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; mutex_lock(&sdata->local->mtx); @@ -4474,7 +4474,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, return 0; err_clear: - memset(ifmgd->bssid, 0, ETH_ALEN); + eth_zero_addr(ifmgd->bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->auth_data = NULL; err_free: @@ -4817,7 +4817,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, return 0; err_clear: - memset(ifmgd->bssid, 0, ETH_ALEN); + eth_zero_addr(ifmgd->bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig index 37421db..dfca485 100644 --- a/net/mpls/Kconfig +++ b/net/mpls/Kconfig @@ -1,9 +1,30 @@ # # MPLS configuration # + +menuconfig MPLS + tristate "MultiProtocol Label Switching" + default n + ---help--- + MultiProtocol Label Switching routes packets through logical + circuits. Originally conceived as a way of routing packets at + hardware speeds (before hardware was capable of routing ipv4 packets), + MPLS remains a simple way of making tunnels. + + If you have not heard of MPLS you probably want to say N here. + +if MPLS + config NET_MPLS_GSO - tristate "MPLS: GSO support" + bool "MPLS: GSO support" help This is helper module to allow segmentation of non-MPLS GSO packets that have had MPLS stack entries pushed onto them and thus become MPLS GSO packets. + +config MPLS_ROUTING + bool "MPLS: routing support" + help + Add support for forwarding of mpls packets. + +endif # MPLS diff --git a/net/mpls/Makefile b/net/mpls/Makefile index 6dec088..60af15f1 100644 --- a/net/mpls/Makefile +++ b/net/mpls/Makefile @@ -2,3 +2,4 @@ # Makefile for MPLS. # obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o +obj-$(CONFIG_MPLS_ROUTING) += af_mpls.o diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c new file mode 100644 index 0000000..0ad8f7141 --- /dev/null +++ b/net/mpls/af_mpls.c @@ -0,0 +1,1008 @@ +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/sysctl.h> +#include <linux/net.h> +#include <linux/module.h> +#include <linux/if_arp.h> +#include <linux/ipv6.h> +#include <linux/mpls.h> +#include <linux/vmalloc.h> +#include <net/ip.h> +#include <net/dst.h> +#include <net/sock.h> +#include <net/arp.h> +#include <net/ip_fib.h> +#include <net/netevent.h> +#include <net/netns/generic.h> +#include "internal.h" + +#define LABEL_NOT_SPECIFIED (1<<20) +#define MAX_NEW_LABELS 2 + +/* This maximum ha length copied from the definition of struct neighbour */ +#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))) + +struct mpls_route { /* next hop label forwarding entry */ + struct net_device __rcu *rt_dev; + struct rcu_head rt_rcu; + u32 rt_label[MAX_NEW_LABELS]; + u8 rt_protocol; /* routing protocol that set this entry */ + u8 rt_labels; + u8 rt_via_alen; + u8 rt_via_table; + u8 rt_via[0]; +}; + +static int zero = 0; +static int label_limit = (1 << 20) - 1; + +static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, + struct nlmsghdr *nlh, struct net *net, u32 portid, + unsigned int nlm_flags); + +static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) +{ + struct mpls_route *rt = NULL; + + if (index < net->mpls.platform_labels) { + struct mpls_route __rcu **platform_label = + rcu_dereference(net->mpls.platform_label); + rt = rcu_dereference(platform_label[index]); + } + return rt; +} + +static bool mpls_output_possible(const struct net_device *dev) +{ + return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev); +} + +static unsigned int mpls_rt_header_size(const struct mpls_route *rt) +{ + /* The size of the layer 2.5 labels to be added for this route */ + return rt->rt_labels * sizeof(struct mpls_shim_hdr); +} + +static unsigned int mpls_dev_mtu(const struct net_device *dev) +{ + /* The amount of data the layer 2 frame can hold */ + return dev->mtu; +} + +static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) +{ + if (skb->len <= mtu) + return false; + + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) + return false; + + return true; +} + +static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, + struct mpls_entry_decoded dec) +{ + /* RFC4385 and RFC5586 encode other packets in mpls such that + * they don't conflict with the ip version number, making + * decoding by examining the ip version correct in everything + * except for the strangest cases. + * + * The strange cases if we choose to support them will require + * manual configuration. + */ + struct iphdr *hdr4 = ip_hdr(skb); + bool success = true; + + if (hdr4->version == 4) { + skb->protocol = htons(ETH_P_IP); + csum_replace2(&hdr4->check, + htons(hdr4->ttl << 8), + htons(dec.ttl << 8)); + hdr4->ttl = dec.ttl; + } + else if (hdr4->version == 6) { + struct ipv6hdr *hdr6 = ipv6_hdr(skb); + skb->protocol = htons(ETH_P_IPV6); + hdr6->hop_limit = dec.ttl; + } + else + /* version 0 and version 1 are used by pseudo wires */ + success = false; + return success; +} + +static int mpls_forward(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct net *net = dev_net(dev); + struct mpls_shim_hdr *hdr; + struct mpls_route *rt; + struct mpls_entry_decoded dec; + struct net_device *out_dev; + unsigned int hh_len; + unsigned int new_header_size; + unsigned int mtu; + int err; + + /* Careful this entire function runs inside of an rcu critical section */ + + if (skb->pkt_type != PACKET_HOST) + goto drop; + + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) + goto drop; + + if (!pskb_may_pull(skb, sizeof(*hdr))) + goto drop; + + /* Read and decode the label */ + hdr = mpls_hdr(skb); + dec = mpls_entry_decode(hdr); + + /* Pop the label */ + skb_pull(skb, sizeof(*hdr)); + skb_reset_network_header(skb); + + skb_orphan(skb); + + rt = mpls_route_input_rcu(net, dec.label); + if (!rt) + goto drop; + + /* Find the output device */ + out_dev = rcu_dereference(rt->rt_dev); + if (!mpls_output_possible(out_dev)) + goto drop; + + if (skb_warn_if_lro(skb)) + goto drop; + + skb_forward_csum(skb); + + /* Verify ttl is valid */ + if (dec.ttl <= 1) + goto drop; + dec.ttl -= 1; + + /* Verify the destination can hold the packet */ + new_header_size = mpls_rt_header_size(rt); + mtu = mpls_dev_mtu(out_dev); + if (mpls_pkt_too_big(skb, mtu - new_header_size)) + goto drop; + + hh_len = LL_RESERVED_SPACE(out_dev); + if (!out_dev->header_ops) + hh_len = 0; + + /* Ensure there is enough space for the headers in the skb */ + if (skb_cow(skb, hh_len + new_header_size)) + goto drop; + + skb->dev = out_dev; + skb->protocol = htons(ETH_P_MPLS_UC); + + if (unlikely(!new_header_size && dec.bos)) { + /* Penultimate hop popping */ + if (!mpls_egress(rt, skb, dec)) + goto drop; + } else { + bool bos; + int i; + skb_push(skb, new_header_size); + skb_reset_network_header(skb); + /* Push the new labels */ + hdr = mpls_hdr(skb); + bos = dec.bos; + for (i = rt->rt_labels - 1; i >= 0; i--) { + hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos); + bos = false; + } + } + + err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb); + if (err) + net_dbg_ratelimited("%s: packet transmission failed: %d\n", + __func__, err); + return 0; + +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static struct packet_type mpls_packet_type __read_mostly = { + .type = cpu_to_be16(ETH_P_MPLS_UC), + .func = mpls_forward, +}; + +static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = { + [RTA_DST] = { .type = NLA_U32 }, + [RTA_OIF] = { .type = NLA_U32 }, +}; + +struct mpls_route_config { + u32 rc_protocol; + u32 rc_ifindex; + u16 rc_via_table; + u16 rc_via_alen; + u8 rc_via[MAX_VIA_ALEN]; + u32 rc_label; + u32 rc_output_labels; + u32 rc_output_label[MAX_NEW_LABELS]; + u32 rc_nlflags; + struct nl_info rc_nlinfo; +}; + +static struct mpls_route *mpls_rt_alloc(size_t alen) +{ + struct mpls_route *rt; + + rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL); + if (rt) + rt->rt_via_alen = alen; + return rt; +} + +static void mpls_rt_free(struct mpls_route *rt) +{ + if (rt) + kfree_rcu(rt, rt_rcu); +} + +static void mpls_notify_route(struct net *net, unsigned index, + struct mpls_route *old, struct mpls_route *new, + const struct nl_info *info) +{ + struct nlmsghdr *nlh = info ? info->nlh : NULL; + unsigned portid = info ? info->portid : 0; + int event = new ? RTM_NEWROUTE : RTM_DELROUTE; + struct mpls_route *rt = new ? new : old; + unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0; + /* Ignore reserved labels for now */ + if (rt && (index >= 16)) + rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags); +} + +static void mpls_route_update(struct net *net, unsigned index, + struct net_device *dev, struct mpls_route *new, + const struct nl_info *info) +{ + struct mpls_route __rcu **platform_label; + struct mpls_route *rt, *old = NULL; + + ASSERT_RTNL(); + + platform_label = rtnl_dereference(net->mpls.platform_label); + rt = rtnl_dereference(platform_label[index]); + if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) { + rcu_assign_pointer(platform_label[index], new); + old = rt; + } + + mpls_notify_route(net, index, old, new, info); + + /* If we removed a route free it now */ + mpls_rt_free(old); +} + +static unsigned find_free_label(struct net *net) +{ + struct mpls_route __rcu **platform_label; + size_t platform_labels; + unsigned index; + + platform_label = rtnl_dereference(net->mpls.platform_label); + platform_labels = net->mpls.platform_labels; + for (index = 16; index < platform_labels; index++) { + if (!rtnl_dereference(platform_label[index])) + return index; + } + return LABEL_NOT_SPECIFIED; +} + +static int mpls_route_add(struct mpls_route_config *cfg) +{ + struct mpls_route __rcu **platform_label; + struct net *net = cfg->rc_nlinfo.nl_net; + struct net_device *dev = NULL; + struct mpls_route *rt, *old; + unsigned index; + int i; + int err = -EINVAL; + + index = cfg->rc_label; + + /* If a label was not specified during insert pick one */ + if ((index == LABEL_NOT_SPECIFIED) && + (cfg->rc_nlflags & NLM_F_CREATE)) { + index = find_free_label(net); + } + + /* The first 16 labels are reserved, and may not be set */ + if (index < 16) + goto errout; + + /* The full 20 bit range may not be supported. */ + if (index >= net->mpls.platform_labels) + goto errout; + + /* Ensure only a supported number of labels are present */ + if (cfg->rc_output_labels > MAX_NEW_LABELS) + goto errout; + + err = -ENODEV; + dev = dev_get_by_index(net, cfg->rc_ifindex); + if (!dev) + goto errout; + + /* For now just support ethernet devices */ + err = -EINVAL; + if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) + goto errout; + + err = -EINVAL; + if ((cfg->rc_via_table == NEIGH_LINK_TABLE) && + (dev->addr_len != cfg->rc_via_alen)) + goto errout; + + /* Append makes no sense with mpls */ + err = -EOPNOTSUPP; + if (cfg->rc_nlflags & NLM_F_APPEND) + goto errout; + + err = -EEXIST; + platform_label = rtnl_dereference(net->mpls.platform_label); + old = rtnl_dereference(platform_label[index]); + if ((cfg->rc_nlflags & NLM_F_EXCL) && old) + goto errout; + + err = -EEXIST; + if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old) + goto errout; + + err = -ENOENT; + if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old) + goto errout; + + err = -ENOMEM; + rt = mpls_rt_alloc(cfg->rc_via_alen); + if (!rt) + goto errout; + + rt->rt_labels = cfg->rc_output_labels; + for (i = 0; i < rt->rt_labels; i++) + rt->rt_label[i] = cfg->rc_output_label[i]; + rt->rt_protocol = cfg->rc_protocol; + RCU_INIT_POINTER(rt->rt_dev, dev); + rt->rt_via_table = cfg->rc_via_table; + memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen); + + mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo); + + dev_put(dev); + return 0; + +errout: + if (dev) + dev_put(dev); + return err; +} + +static int mpls_route_del(struct mpls_route_config *cfg) +{ + struct net *net = cfg->rc_nlinfo.nl_net; + unsigned index; + int err = -EINVAL; + + index = cfg->rc_label; + + /* The first 16 labels are reserved, and may not be removed */ + if (index < 16) + goto errout; + + /* The full 20 bit range may not be supported */ + if (index >= net->mpls.platform_labels) + goto errout; + + mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo); + + err = 0; +errout: + return err; +} + +static void mpls_ifdown(struct net_device *dev) +{ + struct mpls_route __rcu **platform_label; + struct net *net = dev_net(dev); + unsigned index; + + platform_label = rtnl_dereference(net->mpls.platform_label); + for (index = 0; index < net->mpls.platform_labels; index++) { + struct mpls_route *rt = rtnl_dereference(platform_label[index]); + if (!rt) + continue; + if (rtnl_dereference(rt->rt_dev) != dev) + continue; + rt->rt_dev = NULL; + } +} + +static int mpls_dev_notify(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch(event) { + case NETDEV_UNREGISTER: + mpls_ifdown(dev); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block mpls_dev_notifier = { + .notifier_call = mpls_dev_notify, +}; + +static int nla_put_via(struct sk_buff *skb, + u8 table, const void *addr, int alen) +{ + static const int table_to_family[NEIGH_NR_TABLES + 1] = { + AF_INET, AF_INET6, AF_DECnet, AF_PACKET, + }; + struct nlattr *nla; + struct rtvia *via; + int family = AF_UNSPEC; + + nla = nla_reserve(skb, RTA_VIA, alen + 2); + if (!nla) + return -EMSGSIZE; + + if (table <= NEIGH_NR_TABLES) + family = table_to_family[table]; + + via = nla_data(nla); + via->rtvia_family = family; + memcpy(via->rtvia_addr, addr, alen); + return 0; +} + +int nla_put_labels(struct sk_buff *skb, int attrtype, + u8 labels, const u32 label[]) +{ + struct nlattr *nla; + struct mpls_shim_hdr *nla_label; + bool bos; + int i; + nla = nla_reserve(skb, attrtype, labels*4); + if (!nla) + return -EMSGSIZE; + + nla_label = nla_data(nla); + bos = true; + for (i = labels - 1; i >= 0; i--) { + nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos); + bos = false; + } + + return 0; +} + +int nla_get_labels(const struct nlattr *nla, + u32 max_labels, u32 *labels, u32 label[]) +{ + unsigned len = nla_len(nla); + unsigned nla_labels; + struct mpls_shim_hdr *nla_label; + bool bos; + int i; + + /* len needs to be an even multiple of 4 (the label size) */ + if (len & 3) + return -EINVAL; + + /* Limit the number of new labels allowed */ + nla_labels = len/4; + if (nla_labels > max_labels) + return -EINVAL; + + nla_label = nla_data(nla); + bos = true; + for (i = nla_labels - 1; i >= 0; i--, bos = false) { + struct mpls_entry_decoded dec; + dec = mpls_entry_decode(nla_label + i); + + /* Ensure the bottom of stack flag is properly set + * and ttl and tc are both clear. + */ + if ((dec.bos != bos) || dec.ttl || dec.tc) + return -EINVAL; + + label[i] = dec.label; + } + *labels = nla_labels; + return 0; +} + +static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, + struct mpls_route_config *cfg) +{ + struct rtmsg *rtm; + struct nlattr *tb[RTA_MAX+1]; + int index; + int err; + + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy); + if (err < 0) + goto errout; + + err = -EINVAL; + rtm = nlmsg_data(nlh); + memset(cfg, 0, sizeof(*cfg)); + + if (rtm->rtm_family != AF_MPLS) + goto errout; + if (rtm->rtm_dst_len != 20) + goto errout; + if (rtm->rtm_src_len != 0) + goto errout; + if (rtm->rtm_tos != 0) + goto errout; + if (rtm->rtm_table != RT_TABLE_MAIN) + goto errout; + /* Any value is acceptable for rtm_protocol */ + + /* As mpls uses destination specific addresses + * (or source specific address in the case of multicast) + * all addresses have universal scope. + */ + if (rtm->rtm_scope != RT_SCOPE_UNIVERSE) + goto errout; + if (rtm->rtm_type != RTN_UNICAST) + goto errout; + if (rtm->rtm_flags != 0) + goto errout; + + cfg->rc_label = LABEL_NOT_SPECIFIED; + cfg->rc_protocol = rtm->rtm_protocol; + cfg->rc_nlflags = nlh->nlmsg_flags; + cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; + cfg->rc_nlinfo.nlh = nlh; + cfg->rc_nlinfo.nl_net = sock_net(skb->sk); + + for (index = 0; index <= RTA_MAX; index++) { + struct nlattr *nla = tb[index]; + if (!nla) + continue; + + switch(index) { + case RTA_OIF: + cfg->rc_ifindex = nla_get_u32(nla); + break; + case RTA_NEWDST: + if (nla_get_labels(nla, MAX_NEW_LABELS, + &cfg->rc_output_labels, + cfg->rc_output_label)) + goto errout; + break; + case RTA_DST: + { + u32 label_count; + if (nla_get_labels(nla, 1, &label_count, + &cfg->rc_label)) + goto errout; + + /* The first 16 labels are reserved, and may not be set */ + if (cfg->rc_label < 16) + goto errout; + + break; + } + case RTA_VIA: + { + struct rtvia *via = nla_data(nla); + if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) + goto errout; + cfg->rc_via_alen = nla_len(nla) - + offsetof(struct rtvia, rtvia_addr); + if (cfg->rc_via_alen > MAX_VIA_ALEN) + goto errout; + + /* Validate the address family */ + switch(via->rtvia_family) { + case AF_PACKET: + cfg->rc_via_table = NEIGH_LINK_TABLE; + break; + case AF_INET: + cfg->rc_via_table = NEIGH_ARP_TABLE; + if (cfg->rc_via_alen != 4) + goto errout; + break; + case AF_INET6: + cfg->rc_via_table = NEIGH_ND_TABLE; + if (cfg->rc_via_alen != 16) + goto errout; + break; + default: + /* Unsupported address family */ + goto errout; + } + + memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen); + break; + } + default: + /* Unsupported attribute */ + goto errout; + } + } + + err = 0; +errout: + return err; +} + +static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + struct mpls_route_config cfg; + int err; + + err = rtm_to_route_config(skb, nlh, &cfg); + if (err < 0) + return err; + + return mpls_route_del(&cfg); +} + + +static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + struct mpls_route_config cfg; + int err; + + err = rtm_to_route_config(skb, nlh, &cfg); + if (err < 0) + return err; + + return mpls_route_add(&cfg); +} + +static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, + u32 label, struct mpls_route *rt, int flags) +{ + struct net_device *dev; + struct nlmsghdr *nlh; + struct rtmsg *rtm; + + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); + if (nlh == NULL) + return -EMSGSIZE; + + rtm = nlmsg_data(nlh); + rtm->rtm_family = AF_MPLS; + rtm->rtm_dst_len = 20; + rtm->rtm_src_len = 0; + rtm->rtm_tos = 0; + rtm->rtm_table = RT_TABLE_MAIN; + rtm->rtm_protocol = rt->rt_protocol; + rtm->rtm_scope = RT_SCOPE_UNIVERSE; + rtm->rtm_type = RTN_UNICAST; + rtm->rtm_flags = 0; + + if (rt->rt_labels && + nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label)) + goto nla_put_failure; + if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen)) + goto nla_put_failure; + dev = rtnl_dereference(rt->rt_dev); + if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) + goto nla_put_failure; + if (nla_put_labels(skb, RTA_DST, 1, &label)) + goto nla_put_failure; + + nlmsg_end(skb, nlh); + return 0; + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct mpls_route __rcu **platform_label; + size_t platform_labels; + unsigned int index; + + ASSERT_RTNL(); + + index = cb->args[0]; + if (index < 16) + index = 16; + + platform_label = rtnl_dereference(net->mpls.platform_label); + platform_labels = net->mpls.platform_labels; + for (; index < platform_labels; index++) { + struct mpls_route *rt; + rt = rtnl_dereference(platform_label[index]); + if (!rt) + continue; + + if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWROUTE, + index, rt, NLM_F_MULTI) < 0) + break; + } + cb->args[0] = index; + + return skb->len; +} + +static inline size_t lfib_nlmsg_size(struct mpls_route *rt) +{ + size_t payload = + NLMSG_ALIGN(sizeof(struct rtmsg)) + + nla_total_size(2 + rt->rt_via_alen) /* RTA_VIA */ + + nla_total_size(4); /* RTA_DST */ + if (rt->rt_labels) /* RTA_NEWDST */ + payload += nla_total_size(rt->rt_labels * 4); + if (rt->rt_dev) /* RTA_OIF */ + payload += nla_total_size(4); + return payload; +} + +static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, + struct nlmsghdr *nlh, struct net *net, u32 portid, + unsigned int nlm_flags) +{ + struct sk_buff *skb; + u32 seq = nlh ? nlh->nlmsg_seq : 0; + int err = -ENOBUFS; + + skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL); + if (skb == NULL) + goto errout; + + err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags); + if (err < 0) { + /* -EMSGSIZE implies BUG in lfib_nlmsg_size */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); + goto errout; + } + rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL); + + return; +errout: + if (err < 0) + rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err); +} + +static int resize_platform_label_table(struct net *net, size_t limit) +{ + size_t size = sizeof(struct mpls_route *) * limit; + size_t old_limit; + size_t cp_size; + struct mpls_route __rcu **labels = NULL, **old; + struct mpls_route *rt0 = NULL, *rt2 = NULL; + unsigned index; + + if (size) { + labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!labels) + labels = vzalloc(size); + + if (!labels) + goto nolabels; + } + + /* In case the predefined labels need to be populated */ + if (limit > LABEL_IPV4_EXPLICIT_NULL) { + struct net_device *lo = net->loopback_dev; + rt0 = mpls_rt_alloc(lo->addr_len); + if (!rt0) + goto nort0; + RCU_INIT_POINTER(rt0->rt_dev, lo); + rt0->rt_protocol = RTPROT_KERNEL; + rt0->rt_via_table = NEIGH_LINK_TABLE; + memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len); + } + if (limit > LABEL_IPV6_EXPLICIT_NULL) { + struct net_device *lo = net->loopback_dev; + rt2 = mpls_rt_alloc(lo->addr_len); + if (!rt2) + goto nort2; + RCU_INIT_POINTER(rt2->rt_dev, lo); + rt2->rt_protocol = RTPROT_KERNEL; + rt2->rt_via_table = NEIGH_LINK_TABLE; + memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len); + } + + rtnl_lock(); + /* Remember the original table */ + old = rtnl_dereference(net->mpls.platform_label); + old_limit = net->mpls.platform_labels; + + /* Free any labels beyond the new table */ + for (index = limit; index < old_limit; index++) + mpls_route_update(net, index, NULL, NULL, NULL); + + /* Copy over the old labels */ + cp_size = size; + if (old_limit < limit) + cp_size = old_limit * sizeof(struct mpls_route *); + + memcpy(labels, old, cp_size); + + /* If needed set the predefined labels */ + if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) && + (limit > LABEL_IPV6_EXPLICIT_NULL)) { + RCU_INIT_POINTER(labels[LABEL_IPV6_EXPLICIT_NULL], rt2); + rt2 = NULL; + } + + if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) && + (limit > LABEL_IPV4_EXPLICIT_NULL)) { + RCU_INIT_POINTER(labels[LABEL_IPV4_EXPLICIT_NULL], rt0); + rt0 = NULL; + } + + /* Update the global pointers */ + net->mpls.platform_labels = limit; + rcu_assign_pointer(net->mpls.platform_label, labels); + + rtnl_unlock(); + + mpls_rt_free(rt2); + mpls_rt_free(rt0); + + if (old) { + synchronize_rcu(); + kvfree(old); + } + return 0; + +nort2: + mpls_rt_free(rt0); +nort0: + kvfree(labels); +nolabels: + return -ENOMEM; +} + +static int mpls_platform_labels(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct net *net = table->data; + int platform_labels = net->mpls.platform_labels; + int ret; + struct ctl_table tmp = { + .procname = table->procname, + .data = &platform_labels, + .maxlen = sizeof(int), + .mode = table->mode, + .extra1 = &zero, + .extra2 = &label_limit, + }; + + ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + + if (write && ret == 0) + ret = resize_platform_label_table(net, platform_labels); + + return ret; +} + +static struct ctl_table mpls_table[] = { + { + .procname = "platform_labels", + .data = NULL, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = mpls_platform_labels, + }, + { } +}; + +static int mpls_net_init(struct net *net) +{ + struct ctl_table *table; + + net->mpls.platform_labels = 0; + net->mpls.platform_label = NULL; + + table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL); + if (table == NULL) + return -ENOMEM; + + table[0].data = net; + net->mpls.ctl = register_net_sysctl(net, "net/mpls", table); + if (net->mpls.ctl == NULL) + return -ENOMEM; + + return 0; +} + +static void mpls_net_exit(struct net *net) +{ + struct mpls_route __rcu **platform_label; + size_t platform_labels; + struct ctl_table *table; + unsigned int index; + + table = net->mpls.ctl->ctl_table_arg; + unregister_net_sysctl_table(net->mpls.ctl); + kfree(table); + + /* An rcu grace period has passed since there was a device in + * the network namespace (and thus the last in flight packet) + * left this network namespace. This is because + * unregister_netdevice_many and netdev_run_todo has completed + * for each network device that was in this network namespace. + * + * As such no additional rcu synchronization is necessary when + * freeing the platform_label table. + */ + rtnl_lock(); + platform_label = rtnl_dereference(net->mpls.platform_label); + platform_labels = net->mpls.platform_labels; + for (index = 0; index < platform_labels; index++) { + struct mpls_route *rt = rtnl_dereference(platform_label[index]); + RCU_INIT_POINTER(platform_label[index], NULL); + mpls_rt_free(rt); + } + rtnl_unlock(); + + kvfree(platform_label); +} + +static struct pernet_operations mpls_net_ops = { + .init = mpls_net_init, + .exit = mpls_net_exit, +}; + +static int __init mpls_init(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4); + + err = register_pernet_subsys(&mpls_net_ops); + if (err) + goto out; + + err = register_netdevice_notifier(&mpls_dev_notifier); + if (err) + goto out_unregister_pernet; + + dev_add_pack(&mpls_packet_type); + + rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL); + rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL); + rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL); + err = 0; +out: + return err; + +out_unregister_pernet: + unregister_pernet_subsys(&mpls_net_ops); + goto out; +} +module_init(mpls_init); + +static void __exit mpls_exit(void) +{ + rtnl_unregister_all(PF_MPLS); + dev_remove_pack(&mpls_packet_type); + unregister_netdevice_notifier(&mpls_dev_notifier); + unregister_pernet_subsys(&mpls_net_ops); +} +module_exit(mpls_exit); + +MODULE_DESCRIPTION("MultiProtocol Label Switching"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_NETPROTO(PF_MPLS); diff --git a/net/mpls/internal.h b/net/mpls/internal.h new file mode 100644 index 0000000..fb6de920 --- /dev/null +++ b/net/mpls/internal.h @@ -0,0 +1,59 @@ +#ifndef MPLS_INTERNAL_H +#define MPLS_INTERNAL_H + +#define LABEL_IPV4_EXPLICIT_NULL 0 /* RFC3032 */ +#define LABEL_ROUTER_ALERT_LABEL 1 /* RFC3032 */ +#define LABEL_IPV6_EXPLICIT_NULL 2 /* RFC3032 */ +#define LABEL_IMPLICIT_NULL 3 /* RFC3032 */ +#define LABEL_ENTROPY_INDICATOR 7 /* RFC6790 */ +#define LABEL_GAL 13 /* RFC5586 */ +#define LABEL_OAM_ALERT 14 /* RFC3429 */ +#define LABEL_EXTENSION 15 /* RFC7274 */ + + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +struct mpls_entry_decoded { + u32 label; + u8 ttl; + u8 tc; + u8 bos; +}; + +struct sk_buff; + +static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) +{ + return (struct mpls_shim_hdr *)skb_network_header(skb); +} + +static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos) +{ + struct mpls_shim_hdr result; + result.label_stack_entry = + cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) | + (tc << MPLS_LS_TC_SHIFT) | + (bos ? (1 << MPLS_LS_S_SHIFT) : 0) | + (ttl << MPLS_LS_TTL_SHIFT)); + return result; +} + +static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr) +{ + struct mpls_entry_decoded result; + unsigned entry = be32_to_cpu(hdr->label_stack_entry); + + result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; + result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; + result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; + result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; + + return result; +} + +int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]); +int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]); + +#endif /* MPLS_INTERNAL_H */ diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index b02660f..971cd75 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -438,8 +438,10 @@ config NF_TABLES To compile it as a module, choose M here. +if NF_TABLES + config NF_TABLES_INET - depends on NF_TABLES && IPV6 + depends on IPV6 select NF_TABLES_IPV4 select NF_TABLES_IPV6 tristate "Netfilter nf_tables mixed IPv4/IPv6 tables support" @@ -447,21 +449,18 @@ config NF_TABLES_INET This option enables support for a mixed IPv4/IPv6 "inet" table. config NFT_EXTHDR - depends on NF_TABLES tristate "Netfilter nf_tables IPv6 exthdr module" help This option adds the "exthdr" expression that you can use to match IPv6 extension headers. config NFT_META - depends on NF_TABLES tristate "Netfilter nf_tables meta module" help This option adds the "meta" expression that you can use to match and to set packet metainformation such as the packet mark. config NFT_CT - depends on NF_TABLES depends on NF_CONNTRACK tristate "Netfilter nf_tables conntrack module" help @@ -469,42 +468,36 @@ config NFT_CT connection tracking information such as the flow state. config NFT_RBTREE - depends on NF_TABLES tristate "Netfilter nf_tables rbtree set module" help This option adds the "rbtree" set type (Red Black tree) that is used to build interval-based sets. config NFT_HASH - depends on NF_TABLES tristate "Netfilter nf_tables hash set module" help This option adds the "hash" set type that is used to build one-way mappings between matchings and actions. config NFT_COUNTER - depends on NF_TABLES tristate "Netfilter nf_tables counter module" help This option adds the "counter" expression that you can use to include packet and byte counters in a rule. config NFT_LOG - depends on NF_TABLES tristate "Netfilter nf_tables log module" help This option adds the "log" expression that you can use to log packets matching some criteria. config NFT_LIMIT - depends on NF_TABLES tristate "Netfilter nf_tables limit module" help This option adds the "limit" expression that you can use to ratelimit rule matchings. config NFT_MASQ - depends on NF_TABLES depends on NF_CONNTRACK depends on NF_NAT tristate "Netfilter nf_tables masquerade support" @@ -513,7 +506,6 @@ config NFT_MASQ to perform NAT in the masquerade flavour. config NFT_REDIR - depends on NF_TABLES depends on NF_CONNTRACK depends on NF_NAT tristate "Netfilter nf_tables redirect support" @@ -522,7 +514,6 @@ config NFT_REDIR to perform NAT in the redirect flavour. config NFT_NAT - depends on NF_TABLES depends on NF_CONNTRACK select NF_NAT tristate "Netfilter nf_tables nat module" @@ -531,7 +522,6 @@ config NFT_NAT typical Network Address Translation (NAT) packet transformations. config NFT_QUEUE - depends on NF_TABLES depends on NETFILTER_XTABLES depends on NETFILTER_NETLINK_QUEUE tristate "Netfilter nf_tables queue module" @@ -540,7 +530,6 @@ config NFT_QUEUE infrastructure (also known as NFQUEUE) from nftables. config NFT_REJECT - depends on NF_TABLES default m if NETFILTER_ADVANCED=n tristate "Netfilter nf_tables reject support" help @@ -554,7 +543,6 @@ config NFT_REJECT_INET tristate config NFT_COMPAT - depends on NF_TABLES depends on NETFILTER_XTABLES tristate "Netfilter x_tables over nf_tables module" help @@ -562,6 +550,8 @@ config NFT_COMPAT x_tables match/target extensions over the nf_tables framework. +endif # NF_TABLES + config NETFILTER_XTABLES tristate "Netfilter Xtables support (required for ip_tables)" default m if NETFILTER_ADVANCED=n @@ -951,7 +941,7 @@ comment "Xtables matches" config NETFILTER_XT_MATCH_ADDRTYPE tristate '"addrtype" address type match support' - depends on NETFILTER_ADVANCED + default m if NETFILTER_ADVANCED=n ---help--- This option allows you to match what routing thinks of an address, eg. UNICAST, LOCAL, BROADCAST, ... diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b87ca32..04dbd9c 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -119,24 +119,24 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) struct ip_vs_service *svc; s = this_cpu_ptr(dest->stats.cpustats); - s->ustats.inpkts++; u64_stats_update_begin(&s->syncp); - s->ustats.inbytes += skb->len; + s->cnt.inpkts++; + s->cnt.inbytes += skb->len; u64_stats_update_end(&s->syncp); rcu_read_lock(); svc = rcu_dereference(dest->svc); s = this_cpu_ptr(svc->stats.cpustats); - s->ustats.inpkts++; u64_stats_update_begin(&s->syncp); - s->ustats.inbytes += skb->len; + s->cnt.inpkts++; + s->cnt.inbytes += skb->len; u64_stats_update_end(&s->syncp); rcu_read_unlock(); s = this_cpu_ptr(ipvs->tot_stats.cpustats); - s->ustats.inpkts++; u64_stats_update_begin(&s->syncp); - s->ustats.inbytes += skb->len; + s->cnt.inpkts++; + s->cnt.inbytes += skb->len; u64_stats_update_end(&s->syncp); } } @@ -153,24 +153,24 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) struct ip_vs_service *svc; s = this_cpu_ptr(dest->stats.cpustats); - s->ustats.outpkts++; u64_stats_update_begin(&s->syncp); - s->ustats.outbytes += skb->len; + s->cnt.outpkts++; + s->cnt.outbytes += skb->len; u64_stats_update_end(&s->syncp); rcu_read_lock(); svc = rcu_dereference(dest->svc); s = this_cpu_ptr(svc->stats.cpustats); - s->ustats.outpkts++; u64_stats_update_begin(&s->syncp); - s->ustats.outbytes += skb->len; + s->cnt.outpkts++; + s->cnt.outbytes += skb->len; u64_stats_update_end(&s->syncp); rcu_read_unlock(); s = this_cpu_ptr(ipvs->tot_stats.cpustats); - s->ustats.outpkts++; u64_stats_update_begin(&s->syncp); - s->ustats.outbytes += skb->len; + s->cnt.outpkts++; + s->cnt.outbytes += skb->len; u64_stats_update_end(&s->syncp); } } @@ -183,13 +183,19 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) struct ip_vs_cpu_stats *s; s = this_cpu_ptr(cp->dest->stats.cpustats); - s->ustats.conns++; + u64_stats_update_begin(&s->syncp); + s->cnt.conns++; + u64_stats_update_end(&s->syncp); s = this_cpu_ptr(svc->stats.cpustats); - s->ustats.conns++; + u64_stats_update_begin(&s->syncp); + s->cnt.conns++; + u64_stats_update_end(&s->syncp); s = this_cpu_ptr(ipvs->tot_stats.cpustats); - s->ustats.conns++; + u64_stats_update_begin(&s->syncp); + s->cnt.conns++; + u64_stats_update_end(&s->syncp); } @@ -1046,6 +1052,26 @@ static inline bool is_new_conn(const struct sk_buff *skb, } } +static inline bool is_new_conn_expected(const struct ip_vs_conn *cp, + int conn_reuse_mode) +{ + /* Controlled (FTP DATA or persistence)? */ + if (cp->control) + return false; + + switch (cp->protocol) { + case IPPROTO_TCP: + return (cp->state == IP_VS_TCP_S_TIME_WAIT) || + ((conn_reuse_mode & 2) && + (cp->state == IP_VS_TCP_S_FIN_WAIT) && + (cp->flags & IP_VS_CONN_F_NOOUTPUT)); + case IPPROTO_SCTP: + return cp->state == IP_VS_SCTP_S_CLOSED; + default: + return false; + } +} + /* Handle response packets: rewrite addresses and send away... */ static unsigned int @@ -1585,6 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) struct ip_vs_conn *cp; int ret, pkts; struct netns_ipvs *ipvs; + int conn_reuse_mode; /* Already marked as IPVS request or reply? */ if (skb->ipvs_property) @@ -1653,10 +1680,14 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) */ cp = pp->conn_in_get(af, skb, &iph, 0); - if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest && - unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs && - is_new_conn(skb, &iph)) { - ip_vs_conn_expire_now(cp); + conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); + if (conn_reuse_mode && !iph.fragoffs && + is_new_conn(skb, &iph) && cp && + ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && + unlikely(!atomic_read(&cp->dest->weight))) || + unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) { + if (!atomic_read(&cp->n_control)) + ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); cp = NULL; } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index ed99448..4953267 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -729,9 +729,9 @@ static void ip_vs_trash_cleanup(struct net *net) } static void -ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) +ip_vs_copy_stats(struct ip_vs_kstats *dst, struct ip_vs_stats *src) { -#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c +#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c spin_lock_bh(&src->lock); @@ -747,13 +747,28 @@ ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) } static void +ip_vs_export_stats_user(struct ip_vs_stats_user *dst, struct ip_vs_kstats *src) +{ + dst->conns = (u32)src->conns; + dst->inpkts = (u32)src->inpkts; + dst->outpkts = (u32)src->outpkts; + dst->inbytes = src->inbytes; + dst->outbytes = src->outbytes; + dst->cps = (u32)src->cps; + dst->inpps = (u32)src->inpps; + dst->outpps = (u32)src->outpps; + dst->inbps = (u32)src->inbps; + dst->outbps = (u32)src->outbps; +} + +static void ip_vs_zero_stats(struct ip_vs_stats *stats) { spin_lock_bh(&stats->lock); /* get current counters as zero point, rates are zeroed */ -#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c +#define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c IP_VS_ZERO_STATS_COUNTER(conns); IP_VS_ZERO_STATS_COUNTER(inpkts); @@ -1808,6 +1823,12 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "conn_reuse_mode", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", @@ -2044,7 +2065,7 @@ static const struct file_operations ip_vs_info_fops = { static int ip_vs_stats_show(struct seq_file *seq, void *v) { struct net *net = seq_file_single_net(seq); - struct ip_vs_stats_user show; + struct ip_vs_kstats show; /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, @@ -2053,17 +2074,22 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v) " Conns Packets Packets Bytes Bytes\n"); ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats); - seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns, - show.inpkts, show.outpkts, - (unsigned long long) show.inbytes, - (unsigned long long) show.outbytes); - -/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ + seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n", + (unsigned long long)show.conns, + (unsigned long long)show.inpkts, + (unsigned long long)show.outpkts, + (unsigned long long)show.inbytes, + (unsigned long long)show.outbytes); + +/* 01234567 01234567 01234567 0123456701234567 0123456701234567*/ seq_puts(seq, - " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); - seq_printf(seq, "%8X %8X %8X %16X %16X\n", - show.cps, show.inpps, show.outpps, - show.inbps, show.outbps); + " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); + seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n", + (unsigned long long)show.cps, + (unsigned long long)show.inpps, + (unsigned long long)show.outpps, + (unsigned long long)show.inbps, + (unsigned long long)show.outbps); return 0; } @@ -2086,7 +2112,7 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) struct net *net = seq_file_single_net(seq); struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats; struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats; - struct ip_vs_stats_user rates; + struct ip_vs_kstats kstats; int i; /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ @@ -2098,41 +2124,41 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) for_each_possible_cpu(i) { struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i); unsigned int start; - __u64 inbytes, outbytes; + u64 conns, inpkts, outpkts, inbytes, outbytes; do { start = u64_stats_fetch_begin_irq(&u->syncp); - inbytes = u->ustats.inbytes; - outbytes = u->ustats.outbytes; + conns = u->cnt.conns; + inpkts = u->cnt.inpkts; + outpkts = u->cnt.outpkts; + inbytes = u->cnt.inbytes; + outbytes = u->cnt.outbytes; } while (u64_stats_fetch_retry_irq(&u->syncp, start)); - seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n", - i, u->ustats.conns, u->ustats.inpkts, - u->ustats.outpkts, (__u64)inbytes, - (__u64)outbytes); + seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n", + i, (u64)conns, (u64)inpkts, + (u64)outpkts, (u64)inbytes, + (u64)outbytes); } - spin_lock_bh(&tot_stats->lock); - - seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n", - tot_stats->ustats.conns, tot_stats->ustats.inpkts, - tot_stats->ustats.outpkts, - (unsigned long long) tot_stats->ustats.inbytes, - (unsigned long long) tot_stats->ustats.outbytes); - - ip_vs_read_estimator(&rates, tot_stats); + ip_vs_copy_stats(&kstats, tot_stats); - spin_unlock_bh(&tot_stats->lock); + seq_printf(seq, " ~ %8LX %8LX %8LX %16LX %16LX\n\n", + (unsigned long long)kstats.conns, + (unsigned long long)kstats.inpkts, + (unsigned long long)kstats.outpkts, + (unsigned long long)kstats.inbytes, + (unsigned long long)kstats.outbytes); -/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ +/* ... 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, - " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); - seq_printf(seq, " %8X %8X %8X %16X %16X\n", - rates.cps, - rates.inpps, - rates.outpps, - rates.inbps, - rates.outbps); + " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); + seq_printf(seq, " %8LX %8LX %8LX %16LX %16LX\n", + kstats.cps, + kstats.inpps, + kstats.outpps, + kstats.inbps, + kstats.outbps); return 0; } @@ -2400,6 +2426,7 @@ static void ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) { struct ip_vs_scheduler *sched; + struct ip_vs_kstats kstats; sched = rcu_dereference_protected(src->scheduler, 1); dst->protocol = src->protocol; @@ -2411,7 +2438,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; dst->num_dests = src->num_dests; - ip_vs_copy_stats(&dst->stats, &src->stats); + ip_vs_copy_stats(&kstats, &src->stats); + ip_vs_export_stats_user(&dst->stats, &kstats); } static inline int @@ -2485,6 +2513,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, int count = 0; struct ip_vs_dest *dest; struct ip_vs_dest_entry entry; + struct ip_vs_kstats kstats; memset(&entry, 0, sizeof(entry)); list_for_each_entry(dest, &svc->destinations, n_list) { @@ -2506,7 +2535,8 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, entry.activeconns = atomic_read(&dest->activeconns); entry.inactconns = atomic_read(&dest->inactconns); entry.persistconns = atomic_read(&dest->persistconns); - ip_vs_copy_stats(&entry.stats, &dest->stats); + ip_vs_copy_stats(&kstats, &dest->stats); + ip_vs_export_stats_user(&entry.stats, &kstats); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; @@ -2798,25 +2828,51 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = { }; static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, - struct ip_vs_stats *stats) + struct ip_vs_kstats *kstats) { - struct ip_vs_stats_user ustats; struct nlattr *nl_stats = nla_nest_start(skb, container_type); + if (!nl_stats) return -EMSGSIZE; - ip_vs_copy_stats(&ustats, stats); - - if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) || - nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) || - nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) || - nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) || - nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) || - nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) || - nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) || - nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) || - nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) || - nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps)) + if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) || + nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) || + nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) || + nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) || + nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) || + nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps)) + goto nla_put_failure; + nla_nest_end(skb, nl_stats); + + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nl_stats); + return -EMSGSIZE; +} + +static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type, + struct ip_vs_kstats *kstats) +{ + struct nlattr *nl_stats = nla_nest_start(skb, container_type); + + if (!nl_stats) + return -EMSGSIZE; + + if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) || + nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) || + nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) || + nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) || + nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) || + nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps)) goto nla_put_failure; nla_nest_end(skb, nl_stats); @@ -2835,6 +2891,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb, struct nlattr *nl_service; struct ip_vs_flags flags = { .flags = svc->flags, .mask = ~0 }; + struct ip_vs_kstats kstats; nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); if (!nl_service) @@ -2860,7 +2917,10 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb, nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask)) goto nla_put_failure; - if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) + ip_vs_copy_stats(&kstats, &svc->stats); + if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats)) + goto nla_put_failure; + if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats)) goto nla_put_failure; nla_nest_end(skb, nl_service); @@ -3032,6 +3092,7 @@ static struct ip_vs_service *ip_vs_genl_find_service(struct net *net, static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) { struct nlattr *nl_dest; + struct ip_vs_kstats kstats; nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST); if (!nl_dest) @@ -3054,7 +3115,10 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) atomic_read(&dest->persistconns)) || nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af)) goto nla_put_failure; - if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) + ip_vs_copy_stats(&kstats, &dest->stats); + if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats)) + goto nla_put_failure; + if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats)) goto nla_put_failure; nla_nest_end(skb, nl_dest); @@ -3732,6 +3796,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) ipvs->sysctl_pmtu_disc = 1; tbl[idx++].data = &ipvs->sysctl_pmtu_disc; tbl[idx++].data = &ipvs->sysctl_backup_only; + ipvs->sysctl_conn_reuse_mode = 1; + tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 1425e9a..ef0eb0a 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c @@ -45,17 +45,19 @@ NOTES. - * The stored value for average bps is scaled by 2^5, so that maximal - rate is ~2.15Gbits/s, average pps and cps are scaled by 2^10. + * Average bps is scaled by 2^5, while average pps and cps are scaled by 2^10. - * A lot code is taken from net/sched/estimator.c + * Netlink users can see 64-bit values but sockopt users are restricted + to 32-bit values for conns, packets, bps, cps and pps. + + * A lot of code is taken from net/core/gen_estimator.c */ /* * Make a summary from each cpu */ -static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum, +static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum, struct ip_vs_cpu_stats __percpu *stats) { int i; @@ -64,27 +66,31 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum, for_each_possible_cpu(i) { struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); unsigned int start; - __u64 inbytes, outbytes; + u64 conns, inpkts, outpkts, inbytes, outbytes; + if (add) { - sum->conns += s->ustats.conns; - sum->inpkts += s->ustats.inpkts; - sum->outpkts += s->ustats.outpkts; do { start = u64_stats_fetch_begin(&s->syncp); - inbytes = s->ustats.inbytes; - outbytes = s->ustats.outbytes; + conns = s->cnt.conns; + inpkts = s->cnt.inpkts; + outpkts = s->cnt.outpkts; + inbytes = s->cnt.inbytes; + outbytes = s->cnt.outbytes; } while (u64_stats_fetch_retry(&s->syncp, start)); + sum->conns += conns; + sum->inpkts += inpkts; + sum->outpkts += outpkts; sum->inbytes += inbytes; sum->outbytes += outbytes; } else { add = true; - sum->conns = s->ustats.conns; - sum->inpkts = s->ustats.inpkts; - sum->outpkts = s->ustats.outpkts; do { start = u64_stats_fetch_begin(&s->syncp); - sum->inbytes = s->ustats.inbytes; - sum->outbytes = s->ustats.outbytes; + sum->conns = s->cnt.conns; + sum->inpkts = s->cnt.inpkts; + sum->outpkts = s->cnt.outpkts; + sum->inbytes = s->cnt.inbytes; + sum->outbytes = s->cnt.outbytes; } while (u64_stats_fetch_retry(&s->syncp, start)); } } @@ -95,10 +101,7 @@ static void estimation_timer(unsigned long arg) { struct ip_vs_estimator *e; struct ip_vs_stats *s; - u32 n_conns; - u32 n_inpkts, n_outpkts; - u64 n_inbytes, n_outbytes; - u32 rate; + u64 rate; struct net *net = (struct net *)arg; struct netns_ipvs *ipvs; @@ -108,33 +111,29 @@ static void estimation_timer(unsigned long arg) s = container_of(e, struct ip_vs_stats, est); spin_lock(&s->lock); - ip_vs_read_cpu_stats(&s->ustats, s->cpustats); - n_conns = s->ustats.conns; - n_inpkts = s->ustats.inpkts; - n_outpkts = s->ustats.outpkts; - n_inbytes = s->ustats.inbytes; - n_outbytes = s->ustats.outbytes; + ip_vs_read_cpu_stats(&s->kstats, s->cpustats); /* scaled by 2^10, but divided 2 seconds */ - rate = (n_conns - e->last_conns) << 9; - e->last_conns = n_conns; - e->cps += ((long)rate - (long)e->cps) >> 2; - - rate = (n_inpkts - e->last_inpkts) << 9; - e->last_inpkts = n_inpkts; - e->inpps += ((long)rate - (long)e->inpps) >> 2; - - rate = (n_outpkts - e->last_outpkts) << 9; - e->last_outpkts = n_outpkts; - e->outpps += ((long)rate - (long)e->outpps) >> 2; - - rate = (n_inbytes - e->last_inbytes) << 4; - e->last_inbytes = n_inbytes; - e->inbps += ((long)rate - (long)e->inbps) >> 2; - - rate = (n_outbytes - e->last_outbytes) << 4; - e->last_outbytes = n_outbytes; - e->outbps += ((long)rate - (long)e->outbps) >> 2; + rate = (s->kstats.conns - e->last_conns) << 9; + e->last_conns = s->kstats.conns; + e->cps += ((s64)rate - (s64)e->cps) >> 2; + + rate = (s->kstats.inpkts - e->last_inpkts) << 9; + e->last_inpkts = s->kstats.inpkts; + e->inpps += ((s64)rate - (s64)e->inpps) >> 2; + + rate = (s->kstats.outpkts - e->last_outpkts) << 9; + e->last_outpkts = s->kstats.outpkts; + e->outpps += ((s64)rate - (s64)e->outpps) >> 2; + + /* scaled by 2^5, but divided 2 seconds */ + rate = (s->kstats.inbytes - e->last_inbytes) << 4; + e->last_inbytes = s->kstats.inbytes; + e->inbps += ((s64)rate - (s64)e->inbps) >> 2; + + rate = (s->kstats.outbytes - e->last_outbytes) << 4; + e->last_outbytes = s->kstats.outbytes; + e->outbps += ((s64)rate - (s64)e->outbps) >> 2; spin_unlock(&s->lock); } spin_unlock(&ipvs->est_lock); @@ -166,14 +165,14 @@ void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats) void ip_vs_zero_estimator(struct ip_vs_stats *stats) { struct ip_vs_estimator *est = &stats->est; - struct ip_vs_stats_user *u = &stats->ustats; + struct ip_vs_kstats *k = &stats->kstats; /* reset counters, caller must hold the stats->lock lock */ - est->last_inbytes = u->inbytes; - est->last_outbytes = u->outbytes; - est->last_conns = u->conns; - est->last_inpkts = u->inpkts; - est->last_outpkts = u->outpkts; + est->last_inbytes = k->inbytes; + est->last_outbytes = k->outbytes; + est->last_conns = k->conns; + est->last_inpkts = k->inpkts; + est->last_outpkts = k->outpkts; est->cps = 0; est->inpps = 0; est->outpps = 0; @@ -182,8 +181,7 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats) } /* Get decoded rates */ -void ip_vs_read_estimator(struct ip_vs_stats_user *dst, - struct ip_vs_stats *stats) +void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats) { struct ip_vs_estimator *e = &stats->est; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index d93ceeb..08d9555 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -845,10 +845,27 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, struct ip_vs_conn *cp; struct netns_ipvs *ipvs = net_ipvs(net); - if (!(flags & IP_VS_CONN_F_TEMPLATE)) + if (!(flags & IP_VS_CONN_F_TEMPLATE)) { cp = ip_vs_conn_in_get(param); - else + if (cp && ((cp->dport != dport) || + !ip_vs_addr_equal(cp->daf, &cp->daddr, daddr))) { + if (!(flags & IP_VS_CONN_F_INACTIVE)) { + ip_vs_conn_expire_now(cp); + __ip_vs_conn_put(cp); + cp = NULL; + } else { + /* This is the expiration message for the + * connection that was already replaced, so we + * just ignore it. + */ + __ip_vs_conn_put(cp); + kfree(param->pe_data); + return; + } + } + } else { cp = ip_vs_ct_in_get(param); + } if (cp) { /* Free pe_data */ diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index b8b95f4..57a26cc 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c @@ -88,7 +88,6 @@ static int amanda_help(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { - struct ts_state ts; struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; unsigned int dataoff, start, stop, off, i; @@ -113,23 +112,20 @@ static int amanda_help(struct sk_buff *skb, return NF_ACCEPT; } - memset(&ts, 0, sizeof(ts)); start = skb_find_text(skb, dataoff, skb->len, - search[SEARCH_CONNECT].ts, &ts); + search[SEARCH_CONNECT].ts); if (start == UINT_MAX) goto out; start += dataoff + search[SEARCH_CONNECT].len; - memset(&ts, 0, sizeof(ts)); stop = skb_find_text(skb, start, skb->len, - search[SEARCH_NEWLINE].ts, &ts); + search[SEARCH_NEWLINE].ts); if (stop == UINT_MAX) goto out; stop += start; for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) { - memset(&ts, 0, sizeof(ts)); - off = skb_find_text(skb, start, stop, search[i].ts, &ts); + off = skb_find_text(skb, start, stop, search[i].ts); if (off == UINT_MAX) continue; off += start + search[i].len; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 6ab7779..ea51833 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -401,7 +401,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi, } static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { - [NFTA_TABLE_NAME] = { .type = NLA_STRING }, + [NFTA_TABLE_NAME] = { .type = NLA_STRING, + .len = NFT_TABLE_MAXNAMELEN - 1 }, [NFTA_TABLE_FLAGS] = { .type = NLA_U32 }, }; @@ -686,13 +687,13 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, if (!try_module_get(afi->owner)) return -EAFNOSUPPORT; - table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL); + table = kzalloc(sizeof(*table), GFP_KERNEL); if (table == NULL) { module_put(afi->owner); return -ENOMEM; } - nla_strlcpy(table->name, name, nla_len(name)); + nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN); INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); table->flags = flags; diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 3b90eb2..77165bf 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -21,6 +21,48 @@ #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_log.h> +enum nft_trace { + NFT_TRACE_RULE, + NFT_TRACE_RETURN, + NFT_TRACE_POLICY, +}; + +static const char *const comments[] = { + [NFT_TRACE_RULE] = "rule", + [NFT_TRACE_RETURN] = "return", + [NFT_TRACE_POLICY] = "policy", +}; + +static struct nf_loginfo trace_loginfo = { + .type = NF_LOG_TYPE_LOG, + .u = { + .log = { + .level = 4, + .logflags = NF_LOG_MASK, + }, + }, +}; + +static void __nft_trace_packet(const struct nft_pktinfo *pkt, + const struct nft_chain *chain, + int rulenum, enum nft_trace type) +{ + struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); + + nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, + pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", + chain->table->name, chain->name, comments[type], + rulenum); +} + +static inline void nft_trace_packet(const struct nft_pktinfo *pkt, + const struct nft_chain *chain, + int rulenum, enum nft_trace type) +{ + if (unlikely(pkt->skb->nf_trace)) + __nft_trace_packet(pkt, chain, rulenum, type); +} + static void nft_cmp_fast_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1]) { @@ -66,40 +108,6 @@ struct nft_jumpstack { int rulenum; }; -enum nft_trace { - NFT_TRACE_RULE, - NFT_TRACE_RETURN, - NFT_TRACE_POLICY, -}; - -static const char *const comments[] = { - [NFT_TRACE_RULE] = "rule", - [NFT_TRACE_RETURN] = "return", - [NFT_TRACE_POLICY] = "policy", -}; - -static struct nf_loginfo trace_loginfo = { - .type = NF_LOG_TYPE_LOG, - .u = { - .log = { - .level = 4, - .logflags = NF_LOG_MASK, - }, - }, -}; - -static void nft_trace_packet(const struct nft_pktinfo *pkt, - const struct nft_chain *chain, - int rulenum, enum nft_trace type) -{ - struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); - - nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, - pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", - chain->table->name, chain->name, comments[type], - rulenum); -} - unsigned int nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) { @@ -146,8 +154,7 @@ next_rule: data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; continue; case NFT_CONTINUE: - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); + nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); continue; } break; @@ -157,37 +164,28 @@ next_rule: case NF_ACCEPT: case NF_DROP: case NF_QUEUE: - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); - + nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); return data[NFT_REG_VERDICT].verdict; } switch (data[NFT_REG_VERDICT].verdict) { case NFT_JUMP: - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); - BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE); jumpstack[stackptr].chain = chain; jumpstack[stackptr].rule = rule; jumpstack[stackptr].rulenum = rulenum; stackptr++; - chain = data[NFT_REG_VERDICT].chain; - goto do_chain; + /* fall through */ case NFT_GOTO: - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); + nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); chain = data[NFT_REG_VERDICT].chain; goto do_chain; - case NFT_RETURN: - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN); - break; case NFT_CONTINUE: - if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN))) - nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN); + rulenum++; + /* fall through */ + case NFT_RETURN: + nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN); break; default: WARN_ON(1); @@ -201,8 +199,7 @@ next_rule: goto next_rule; } - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); + nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); rcu_read_lock_bh(); stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats)); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 213584c..0b0fd4e 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -20,6 +20,7 @@ #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter_bridge/ebtables.h> +#include <linux/netfilter_arp/arp_tables.h> #include <net/netfilter/nf_tables.h> static int nft_compat_chain_validate_dependency(const char *tablename, @@ -42,6 +43,7 @@ union nft_entry { struct ipt_entry e4; struct ip6t_entry e6; struct ebt_entry ebt; + struct arpt_entry arp; }; static inline void @@ -140,6 +142,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, entry->ebt.ethproto = (__force __be16)proto; entry->ebt.invflags = inv ? EBT_IPROTO : 0; break; + case NFPROTO_ARP: + break; } par->entryinfo = entry; par->target = target; @@ -351,6 +355,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, entry->ebt.ethproto = (__force __be16)proto; entry->ebt.invflags = inv ? EBT_IPROTO : 0; break; + case NFPROTO_ARP: + break; } par->entryinfo = entry; par->match = match; @@ -537,6 +543,9 @@ nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb, case NFPROTO_BRIDGE: fmt = "ebt_%s"; break; + case NFPROTO_ARP: + fmt = "arpt_%s"; + break; default: pr_err("nft_compat: unsupported protocol %d\n", nfmsg->nfgen_family); diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 7b5f9d5..9287711 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -28,14 +28,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr, case NFPROTO_IPV4: switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nf_send_unreach(pkt->skb, priv->icmp_code); + nf_send_unreach(pkt->skb, priv->icmp_code, + pkt->ops->hooknum); break; case NFT_REJECT_TCP_RST: nf_send_reset(pkt->skb, pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: nf_send_unreach(pkt->skb, - nft_reject_icmp_code(priv->icmp_code)); + nft_reject_icmp_code(priv->icmp_code), + pkt->ops->hooknum); break; } break; diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 0d47afe..8904598 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -193,7 +193,7 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) return ret; if (!match_counter0(opt.ext.packets, &info->packets)) - return 0; + return false; return match_counter0(opt.ext.bytes, &info->bytes); } @@ -239,7 +239,7 @@ set_match_v4(const struct sk_buff *skb, struct xt_action_param *par) return ret; if (!match_counter(opt.ext.packets, &info->packets)) - return 0; + return false; return match_counter(opt.ext.bytes, &info->bytes); } diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 5699adb..0bc3460 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -26,13 +26,12 @@ static bool string_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_string_info *conf = par->matchinfo; - struct ts_state state; bool invert; invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT; return (skb_find_text((struct sk_buff *)skb, conf->from_offset, - conf->to_offset, conf->config, &state) + conf->to_offset, conf->config) != UINT_MAX) ^ invert; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 05919bf..6b0f219 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2256,8 +2256,7 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); } -static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); @@ -2346,8 +2345,7 @@ out: return err; } -static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len, +static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct scm_cookie scm; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 69f1d5e..b987fd5 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1023,8 +1023,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) return 1; } -static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int nr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); @@ -1133,8 +1132,8 @@ out: return err; } -static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name); diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 6ae063c..988f542 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -65,36 +65,6 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) return 1; } -#ifdef CONFIG_INET - -static int nr_rebuild_header(struct sk_buff *skb) -{ - unsigned char *bp = skb->data; - - if (arp_find(bp + 7, skb)) - return 1; - - bp[6] &= ~AX25_CBIT; - bp[6] &= ~AX25_EBIT; - bp[6] |= AX25_SSSID_SPARE; - bp += AX25_ADDR_LEN; - - bp[6] &= ~AX25_CBIT; - bp[6] |= AX25_EBIT; - bp[6] |= AX25_SSSID_SPARE; - - return 0; -} - -#else - -static int nr_rebuild_header(struct sk_buff *skb) -{ - return 1; -} - -#endif - static int nr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) @@ -188,7 +158,6 @@ static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev) static const struct header_ops nr_header_ops = { .create = nr_header, - .rebuild= nr_rebuild_header, }; static const struct net_device_ops nr_netdev_ops = { diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index e181e29..9578bd6 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -750,8 +750,8 @@ error: return ret; } -static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); @@ -793,8 +793,8 @@ static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock, return nfc_llcp_send_i_frame(llcp_sock, msg, len); } -static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 373e138..82b4e80 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -211,8 +211,7 @@ static void rawsock_tx_work(struct work_struct *work) } } -static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; @@ -248,8 +247,8 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, return len; } -static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index b7d818c..ed6b0f8 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -6,6 +6,7 @@ config OPENVSWITCH tristate "Open vSwitch" depends on INET select LIBCRC32C + select MPLS select NET_MPLS_GSO ---help--- Open vSwitch is a multilayer Ethernet switch targeted at virtualized diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f8db706..b91ac59 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -216,10 +216,16 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *, static void packet_flush_mclist(struct sock *sk); struct packet_skb_cb { - unsigned int origlen; union { struct sockaddr_pkt pkt; - struct sockaddr_ll ll; + union { + /* Trick: alias skb original length with + * ll.sll_family and ll.protocol in order + * to save room. + */ + unsigned int origlen; + struct sockaddr_ll ll; + }; } sa; }; @@ -1608,8 +1614,8 @@ oom: * protocol layers and you must therefore supply it with a complete frame */ -static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); @@ -1818,13 +1824,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, skb = nskb; } - BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > - sizeof(skb->cb)); + sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); sll = &PACKET_SKB_CB(skb)->sa.ll; - sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; - sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; @@ -1833,7 +1836,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, sll->sll_halen = dev_parse_header(skb, sll->sll_addr); - PACKET_SKB_CB(skb)->origlen = skb->len; + /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). + * Use their space for storing the original skb length. + */ + PACKET_SKB_CB(skb)->sa.origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; @@ -1847,7 +1853,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; - skb->dropcount = atomic_read(&sk->sk_drops); + sock_skb_set_dropcount(sk, skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); @@ -2603,8 +2609,7 @@ out: return err; } -static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); @@ -2884,13 +2889,14 @@ out: * If necessary we block. */ -static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; int vnet_hdr_len = 0; + unsigned int origlen = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) @@ -2990,6 +2996,15 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, if (err) goto out_free; + if (sock->type != SOCK_PACKET) { + struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; + + /* Original length was stored in sockaddr_ll fields */ + origlen = PACKET_SKB_CB(skb)->sa.origlen; + sll->sll_family = AF_PACKET; + sll->sll_protocol = skb->protocol; + } + sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { @@ -3001,6 +3016,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_namelen = sizeof(struct sockaddr_pkt); } else { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; + msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); } @@ -3014,7 +3030,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; - aux.tp_len = PACKET_SKB_CB(skb)->origlen; + aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 26054b4..5e71043 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -83,8 +83,7 @@ static int pn_init(struct sock *sk) return 0; } -static int pn_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len) +static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { DECLARE_SOCKADDR(struct sockaddr_pn *, target, msg->msg_name); struct sk_buff *skb; @@ -125,9 +124,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk, return (err >= 0) ? len : err; } -static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { struct sk_buff *skb = NULL; struct sockaddr_pn sa; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 5d3f2b7..6de2aeb9 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -1118,8 +1118,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) } -static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len) +static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct pep_sock *pn = pep_sk(sk); struct sk_buff *skb; @@ -1246,9 +1245,8 @@ struct sk_buff *pep_read(struct sock *sk) return skb; } -static int pep_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { struct sk_buff *skb; int err; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 008214a..d575ef4 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -425,15 +425,15 @@ out: return err; } -static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len) +static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m, + size_t total_len) { struct sock *sk = sock->sk; if (pn_socket_autobind(sock)) return -EAGAIN; - return sk->sk_prot->sendmsg(iocb, sk, m, total_len); + return sk->sk_prot->sendmsg(sk, m, total_len); } const struct proto_ops phonet_dgram_ops = { diff --git a/net/rds/rds.h b/net/rds/rds.h index c2a5eef..c3f2855 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -702,8 +702,8 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, void rds_inc_put(struct rds_incoming *inc); void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, struct rds_incoming *inc, gfp_t gfp); -int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int msg_flags); +int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int msg_flags); void rds_clear_recv_queue(struct rds_sock *rs); int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg); void rds_inc_info_copy(struct rds_incoming *inc, @@ -711,8 +711,7 @@ void rds_inc_info_copy(struct rds_incoming *inc, __be32 saddr, __be32 daddr, int flip); /* send.c */ -int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t payload_len); +int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len); void rds_send_reset(struct rds_connection *conn); int rds_send_xmit(struct rds_connection *conn); struct sockaddr_in; diff --git a/net/rds/recv.c b/net/rds/recv.c index f9ec1ac..a00462b 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -395,8 +395,8 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg) return 0; } -int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int msg_flags) +int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int msg_flags) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); diff --git a/net/rds/send.c b/net/rds/send.c index 42f65d4..44672be 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -920,8 +920,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, return ret; } -int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t payload_len) +int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 43bac7c..8ae6030 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1046,8 +1046,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros return 1; } -static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); @@ -1211,8 +1210,8 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, } -static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 5000588..369ca81 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -41,6 +41,9 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev, { unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2); + if (daddr) + memcpy(buff + 7, daddr, dev->addr_len); + *buff++ = ROSE_GFI | ROSE_Q_BIT; *buff++ = 0x00; *buff++ = ROSE_DATA; @@ -53,43 +56,6 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev, return -37; } -static int rose_rebuild_header(struct sk_buff *skb) -{ -#ifdef CONFIG_INET - struct net_device *dev = skb->dev; - struct net_device_stats *stats = &dev->stats; - unsigned char *bp = (unsigned char *)skb->data; - struct sk_buff *skbn; - unsigned int len; - - if (arp_find(bp + 7, skb)) { - return 1; - } - - if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { - kfree_skb(skb); - return 1; - } - - if (skb->sk != NULL) - skb_set_owner_w(skbn, skb->sk); - - kfree_skb(skb); - - len = skbn->len; - - if (!rose_route_frame(skbn, NULL)) { - kfree_skb(skbn); - stats->tx_errors++; - return 1; - } - - stats->tx_packets++; - stats->tx_bytes += len; -#endif - return 1; -} - static int rose_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; @@ -134,19 +100,26 @@ static int rose_close(struct net_device *dev) static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; + unsigned int len = skb->len; if (!netif_running(dev)) { printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n"); return NETDEV_TX_BUSY; } - dev_kfree_skb(skb); - stats->tx_errors++; + + if (!rose_route_frame(skb, NULL)) { + dev_kfree_skb(skb); + stats->tx_errors++; + return NETDEV_TX_OK; + } + + stats->tx_packets++; + stats->tx_bytes += len; return NETDEV_TX_OK; } static const struct header_ops rose_header_ops = { .create = rose_header, - .rebuild = rose_rebuild_header, }; static const struct net_device_ops rose_netdev_ops = { diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 7b16704..0095b9a 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -441,8 +441,7 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, * - sends a call data packet * - may send an abort (abort code in control data) */ -static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t len) +static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) { struct rxrpc_transport *trans; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); @@ -482,7 +481,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, switch (rx->sk.sk_state) { case RXRPC_SERVER_LISTENING: if (!m->msg_name) { - ret = rxrpc_server_sendmsg(iocb, rx, m, len); + ret = rxrpc_server_sendmsg(rx, m, len); break; } case RXRPC_SERVER_BOUND: @@ -492,7 +491,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, break; } case RXRPC_CLIENT_CONNECTED: - ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len); + ret = rxrpc_client_sendmsg(rx, trans, m, len); break; default: ret = -ENOTCONN; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index ba9fd36..2fc1e65 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -548,10 +548,9 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t, extern unsigned rxrpc_resend_timeout; int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *); -int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *, - struct rxrpc_transport *, struct msghdr *, size_t); -int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *, - size_t); +int rxrpc_client_sendmsg(struct rxrpc_sock *, struct rxrpc_transport *, + struct msghdr *, size_t); +int rxrpc_server_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t); /* * ar-peer.c @@ -572,8 +571,7 @@ extern const struct file_operations rxrpc_connection_seq_fops; * ar-recvmsg.c */ void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *); -int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, - int); +int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); /* * ar-security.c diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 8331c95..09f5845 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -23,8 +23,7 @@ */ unsigned rxrpc_resend_timeout = 4 * HZ; -static int rxrpc_send_data(struct kiocb *iocb, - struct rxrpc_sock *rx, +static int rxrpc_send_data(struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len); @@ -129,9 +128,8 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) * - caller holds the socket locked * - the socket may be either a client socket or a server socket */ -int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, - struct rxrpc_transport *trans, struct msghdr *msg, - size_t len) +int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans, + struct msghdr *msg, size_t len) { struct rxrpc_conn_bundle *bundle; enum rxrpc_command cmd; @@ -191,7 +189,7 @@ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, /* request phase complete for this client call */ ret = -EPROTO; } else { - ret = rxrpc_send_data(iocb, rx, call, msg, len); + ret = rxrpc_send_data(rx, call, msg, len); } rxrpc_put_call(call); @@ -232,7 +230,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, call->state != RXRPC_CALL_SERVER_SEND_REPLY) { ret = -EPROTO; /* request phase complete for this client call */ } else { - ret = rxrpc_send_data(NULL, call->socket, call, msg, len); + ret = rxrpc_send_data(call->socket, call, msg, len); } release_sock(&call->socket->sk); @@ -271,8 +269,7 @@ EXPORT_SYMBOL(rxrpc_kernel_abort_call); * send a message through a server socket * - caller holds the socket locked */ -int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, - struct msghdr *msg, size_t len) +int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) { enum rxrpc_command cmd; struct rxrpc_call *call; @@ -313,7 +310,7 @@ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, break; } - ret = rxrpc_send_data(iocb, rx, call, msg, len); + ret = rxrpc_send_data(rx, call, msg, len); break; case RXRPC_CMD_SEND_ABORT: @@ -520,8 +517,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, * - must be called in process context * - caller holds the socket locked */ -static int rxrpc_send_data(struct kiocb *iocb, - struct rxrpc_sock *rx, +static int rxrpc_send_data(struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len) { diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 4575485..a4f883e 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -43,8 +43,8 @@ void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) * - we need to be careful about two or more threads calling recvmsg * simultaneously */ -int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; @@ -150,7 +150,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, &call->conn->trans->peer->srx, len); msg->msg_namelen = len; } - sock_recv_ts_and_drops(msg, &rx->sk, skb); + sock_recv_timestamp(msg, &rx->sk, skb); } /* receive the message */ diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index baef987..8b0470e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -286,7 +286,7 @@ replay: RCU_INIT_POINTER(*back, next); tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); - tcf_destroy(tp); + tcf_destroy(tp, true); err = 0; goto errout; } @@ -301,14 +301,20 @@ replay: err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) { if (tp_created) - tcf_destroy(tp); + tcf_destroy(tp, true); goto errout; } break; case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); - if (err == 0) + if (err == 0) { tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); + if (tcf_destroy(tp, false)) { + struct tcf_proto *next = rtnl_dereference(tp->next); + + RCU_INIT_POINTER(*back, next); + } + } goto errout; case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); @@ -329,7 +335,7 @@ replay: tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); } else { if (tp_created) - tcf_destroy(tp); + tcf_destroy(tp, true); } errout: diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index fc399db..0b8c3ac 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -96,11 +96,14 @@ static void basic_delete_filter(struct rcu_head *head) kfree(f); } -static void basic_destroy(struct tcf_proto *tp) +static bool basic_destroy(struct tcf_proto *tp, bool force) { struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f, *n; + if (!force && !list_empty(&head->flist)) + return false; + list_for_each_entry_safe(f, n, &head->flist, link) { list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); @@ -108,6 +111,7 @@ static void basic_destroy(struct tcf_proto *tp) } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); + return true; } static int basic_delete(struct tcf_proto *tp, unsigned long arg) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 5f3ee9e..243c9f2 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -16,6 +16,8 @@ #include <linux/types.h> #include <linux/skbuff.h> #include <linux/filter.h> +#include <linux/bpf.h> + #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> @@ -24,6 +26,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); MODULE_DESCRIPTION("TC BPF based classifier"); +#define CLS_BPF_NAME_LEN 256 + struct cls_bpf_head { struct list_head plist; u32 hgen; @@ -32,18 +36,24 @@ struct cls_bpf_head { struct cls_bpf_prog { struct bpf_prog *filter; - struct sock_filter *bpf_ops; - struct tcf_exts exts; - struct tcf_result res; struct list_head link; + struct tcf_result res; + struct tcf_exts exts; u32 handle; - u16 bpf_num_ops; + union { + u32 bpf_fd; + u16 bpf_num_ops; + }; + struct sock_filter *bpf_ops; + const char *bpf_name; struct tcf_proto *tp; struct rcu_head rcu; }; static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { [TCA_BPF_CLASSID] = { .type = NLA_U32 }, + [TCA_BPF_FD] = { .type = NLA_U32 }, + [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN }, [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, [TCA_BPF_OPS] = { .type = NLA_BINARY, .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, @@ -76,6 +86,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; } +static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) +{ + return !prog->bpf_ops; +} + static int cls_bpf_init(struct tcf_proto *tp) { struct cls_bpf_head *head; @@ -94,8 +109,12 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) { tcf_exts_destroy(&prog->exts); - bpf_prog_destroy(prog->filter); + if (cls_bpf_is_ebpf(prog)) + bpf_prog_put(prog->filter); + else + bpf_prog_destroy(prog->filter); + kfree(prog->bpf_name); kfree(prog->bpf_ops); kfree(prog); } @@ -114,14 +133,18 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) list_del_rcu(&prog->link); tcf_unbind_filter(tp, &prog->res); call_rcu(&prog->rcu, __cls_bpf_delete_prog); + return 0; } -static void cls_bpf_destroy(struct tcf_proto *tp) +static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *tmp; + if (!force && !list_empty(&head->plist)) + return false; + list_for_each_entry_safe(prog, tmp, &head->plist, link) { list_del_rcu(&prog->link); tcf_unbind_filter(tp, &prog->res); @@ -130,6 +153,7 @@ static void cls_bpf_destroy(struct tcf_proto *tp) RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); + return true; } static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) @@ -151,69 +175,121 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) return ret; } -static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, - struct cls_bpf_prog *prog, - unsigned long base, struct nlattr **tb, - struct nlattr *est, bool ovr) +static int cls_bpf_prog_from_ops(struct nlattr **tb, + struct cls_bpf_prog *prog, u32 classid) { struct sock_filter *bpf_ops; - struct tcf_exts exts; - struct sock_fprog_kern tmp; + struct sock_fprog_kern fprog_tmp; struct bpf_prog *fp; u16 bpf_size, bpf_num_ops; - u32 classid; int ret; - if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID]) - return -EINVAL; - - tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); - ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); - if (ret < 0) - return ret; - - classid = nla_get_u32(tb[TCA_BPF_CLASSID]); bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); - if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) { - ret = -EINVAL; - goto errout; - } + if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) + return -EINVAL; bpf_size = bpf_num_ops * sizeof(*bpf_ops); - if (bpf_size != nla_len(tb[TCA_BPF_OPS])) { - ret = -EINVAL; - goto errout; - } + if (bpf_size != nla_len(tb[TCA_BPF_OPS])) + return -EINVAL; bpf_ops = kzalloc(bpf_size, GFP_KERNEL); - if (bpf_ops == NULL) { - ret = -ENOMEM; - goto errout; - } + if (bpf_ops == NULL) + return -ENOMEM; memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); - tmp.len = bpf_num_ops; - tmp.filter = bpf_ops; + fprog_tmp.len = bpf_num_ops; + fprog_tmp.filter = bpf_ops; - ret = bpf_prog_create(&fp, &tmp); - if (ret) - goto errout_free; + ret = bpf_prog_create(&fp, &fprog_tmp); + if (ret < 0) { + kfree(bpf_ops); + return ret; + } - prog->bpf_num_ops = bpf_num_ops; prog->bpf_ops = bpf_ops; + prog->bpf_num_ops = bpf_num_ops; + prog->bpf_name = NULL; + + prog->filter = fp; + prog->res.classid = classid; + + return 0; +} + +static int cls_bpf_prog_from_efd(struct nlattr **tb, + struct cls_bpf_prog *prog, u32 classid) +{ + struct bpf_prog *fp; + char *name = NULL; + u32 bpf_fd; + + bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); + + fp = bpf_prog_get(bpf_fd); + if (IS_ERR(fp)) + return PTR_ERR(fp); + + if (fp->type != BPF_PROG_TYPE_SCHED_CLS) { + bpf_prog_put(fp); + return -EINVAL; + } + + if (tb[TCA_BPF_NAME]) { + name = kmemdup(nla_data(tb[TCA_BPF_NAME]), + nla_len(tb[TCA_BPF_NAME]), + GFP_KERNEL); + if (!name) { + bpf_prog_put(fp); + return -ENOMEM; + } + } + + prog->bpf_ops = NULL; + prog->bpf_fd = bpf_fd; + prog->bpf_name = name; + prog->filter = fp; prog->res.classid = classid; + return 0; +} + +static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, + struct cls_bpf_prog *prog, + unsigned long base, struct nlattr **tb, + struct nlattr *est, bool ovr) +{ + struct tcf_exts exts; + bool is_bpf, is_ebpf; + u32 classid; + int ret; + + is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; + is_ebpf = tb[TCA_BPF_FD]; + + if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) || + !tb[TCA_BPF_CLASSID]) + return -EINVAL; + + tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); + ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); + if (ret < 0) + return ret; + + classid = nla_get_u32(tb[TCA_BPF_CLASSID]); + + ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) : + cls_bpf_prog_from_efd(tb, prog, classid); + if (ret < 0) { + tcf_exts_destroy(&exts); + return ret; + } + tcf_bind_filter(tp, &prog->res, base); tcf_exts_change(tp, &prog->exts, &exts); return 0; -errout_free: - kfree(bpf_ops); -errout: - tcf_exts_destroy(&exts); - return ret; } static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, @@ -297,11 +373,43 @@ errout: return ret; } +static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, + struct sk_buff *skb) +{ + struct nlattr *nla; + + if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) + return -EMSGSIZE; + + nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * + sizeof(struct sock_filter)); + if (nla == NULL) + return -EMSGSIZE; + + memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); + + return 0; +} + +static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, + struct sk_buff *skb) +{ + if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd)) + return -EMSGSIZE; + + if (prog->bpf_name && + nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) + return -EMSGSIZE; + + return 0; +} + static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *tm) { struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh; - struct nlattr *nest, *nla; + struct nlattr *nest; + int ret; if (prog == NULL) return skb->len; @@ -314,16 +422,14 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) goto nla_put_failure; - if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) - goto nla_put_failure; - nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * - sizeof(struct sock_filter)); - if (nla == NULL) + if (cls_bpf_is_ebpf(prog)) + ret = cls_bpf_dump_ebpf_info(prog, skb); + else + ret = cls_bpf_dump_bpf_info(prog, skb); + if (ret) goto nla_put_failure; - memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); - if (tcf_exts_dump(skb, &prog->exts) < 0) goto nla_put_failure; diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 221697a..ea611b21 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -143,14 +143,18 @@ errout: return err; } -static void cls_cgroup_destroy(struct tcf_proto *tp) +static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); + if (!force) + return false; + if (head) { RCU_INIT_POINTER(tp->root, NULL); call_rcu(&head->rcu, cls_cgroup_destroy_rcu); } + return true; } static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 4614103..a620c4e 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -557,17 +557,21 @@ static int flow_init(struct tcf_proto *tp) return 0; } -static void flow_destroy(struct tcf_proto *tp) +static bool flow_destroy(struct tcf_proto *tp, bool force) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f, *next; + if (!force && !list_empty(&head->filters)) + return false; + list_for_each_entry_safe(f, next, &head->filters, list) { list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); + return true; } static unsigned long flow_get(struct tcf_proto *tp, u32 handle) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index a5269f7..715e01e 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -33,6 +33,7 @@ struct fw_head { u32 mask; + bool mask_set; struct fw_filter __rcu *ht[HTSIZE]; struct rcu_head rcu; }; @@ -113,6 +114,14 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) static int fw_init(struct tcf_proto *tp) { + struct fw_head *head; + + head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); + if (head == NULL) + return -ENOBUFS; + + head->mask_set = false; + rcu_assign_pointer(tp->root, head); return 0; } @@ -124,14 +133,20 @@ static void fw_delete_filter(struct rcu_head *head) kfree(f); } -static void fw_destroy(struct tcf_proto *tp) +static bool fw_destroy(struct tcf_proto *tp, bool force) { struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; int h; if (head == NULL) - return; + return true; + + if (!force) { + for (h = 0; h < HTSIZE; h++) + if (rcu_access_pointer(head->ht[h])) + return false; + } for (h = 0; h < HTSIZE; h++) { while ((f = rtnl_dereference(head->ht[h])) != NULL) { @@ -143,6 +158,7 @@ static void fw_destroy(struct tcf_proto *tp) } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); + return true; } static int fw_delete(struct tcf_proto *tp, unsigned long arg) @@ -286,17 +302,11 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (!handle) return -EINVAL; - if (head == NULL) { - u32 mask = 0xFFFFFFFF; + if (!head->mask_set) { + head->mask = 0xFFFFFFFF; if (tb[TCA_FW_MASK]) - mask = nla_get_u32(tb[TCA_FW_MASK]); - - head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); - if (head == NULL) - return -ENOBUFS; - head->mask = mask; - - rcu_assign_pointer(tp->root, head); + head->mask = nla_get_u32(tb[TCA_FW_MASK]); + head->mask_set = true; } f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 2ecd246..08a3b0a 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -258,6 +258,13 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) static int route4_init(struct tcf_proto *tp) { + struct route4_head *head; + + head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); + if (head == NULL) + return -ENOBUFS; + + rcu_assign_pointer(tp->root, head); return 0; } @@ -270,13 +277,20 @@ route4_delete_filter(struct rcu_head *head) kfree(f); } -static void route4_destroy(struct tcf_proto *tp) +static bool route4_destroy(struct tcf_proto *tp, bool force) { struct route4_head *head = rtnl_dereference(tp->root); int h1, h2; if (head == NULL) - return; + return true; + + if (!force) { + for (h1 = 0; h1 <= 256; h1++) { + if (rcu_access_pointer(head->table[h1])) + return false; + } + } for (h1 = 0; h1 <= 256; h1++) { struct route4_bucket *b; @@ -301,6 +315,7 @@ static void route4_destroy(struct tcf_proto *tp) } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); + return true; } static int route4_delete(struct tcf_proto *tp, unsigned long arg) @@ -484,13 +499,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; err = -ENOBUFS; - if (head == NULL) { - head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); - if (head == NULL) - goto errout; - rcu_assign_pointer(tp->root, head); - } - f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); if (!f) goto errout; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index edd8ade..02fa827 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -291,13 +291,20 @@ rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) kfree_rcu(f, rcu); } -static void rsvp_destroy(struct tcf_proto *tp) +static bool rsvp_destroy(struct tcf_proto *tp, bool force) { struct rsvp_head *data = rtnl_dereference(tp->root); int h1, h2; if (data == NULL) - return; + return true; + + if (!force) { + for (h1 = 0; h1 < 256; h1++) { + if (rcu_access_pointer(data->ht[h1])) + return false; + } + } RCU_INIT_POINTER(tp->root, NULL); @@ -319,6 +326,7 @@ static void rsvp_destroy(struct tcf_proto *tp) } } kfree_rcu(data, rcu); + return true; } static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index bd49bf5..a557dba 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -468,11 +468,14 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) } } -static void tcindex_destroy(struct tcf_proto *tp) +static bool tcindex_destroy(struct tcf_proto *tp, bool force) { struct tcindex_data *p = rtnl_dereference(tp->root); struct tcf_walker walker; + if (!force) + return false; + pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); walker.count = 0; walker.skip = 0; @@ -481,6 +484,7 @@ static void tcindex_destroy(struct tcf_proto *tp) RCU_INIT_POINTER(tp->root, NULL); call_rcu(&p->rcu, __tcindex_destroy); + return true; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 09487af..375e51b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -460,13 +460,35 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) return -ENOENT; } -static void u32_destroy(struct tcf_proto *tp) +static bool ht_empty(struct tc_u_hnode *ht) +{ + unsigned int h; + + for (h = 0; h <= ht->divisor; h++) + if (rcu_access_pointer(ht->ht[h])) + return false; + + return true; +} + +static bool u32_destroy(struct tcf_proto *tp, bool force) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); WARN_ON(root_ht == NULL); + if (!force) { + if (root_ht) { + if (root_ht->refcnt > 1) + return false; + if (root_ht->refcnt == 1) { + if (!ht_empty(root_ht)) + return false; + } + } + } + if (root_ht && --root_ht->refcnt == 0) u32_destroy_hnode(tp, root_ht); @@ -491,6 +513,7 @@ static void u32_destroy(struct tcf_proto *tp) } tp->data = NULL; + return true; } static int u32_delete(struct tcf_proto *tp, unsigned long arg) diff --git a/net/sched/em_text.c b/net/sched/em_text.c index f03c3de..73e2ed5 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c @@ -34,7 +34,6 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m, { struct text_match *tm = EM_TEXT_PRIV(m); int from, to; - struct ts_state state; from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data; from += tm->from_offset; @@ -42,7 +41,7 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m, to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data; to += tm->to_offset; - return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX; + return skb_find_text(skb, from, to, tm->config) != UINT_MAX; } static int em_text_change(struct net *net, void *data, int len, diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 243b7d1..ad9eed7 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1858,11 +1858,15 @@ reclassify: } EXPORT_SYMBOL(tc_classify); -void tcf_destroy(struct tcf_proto *tp) +bool tcf_destroy(struct tcf_proto *tp, bool force) { - tp->ops->destroy(tp); - module_put(tp->ops->owner); - kfree_rcu(tp, rcu); + if (tp->ops->destroy(tp, force)) { + module_put(tp->ops->owner); + kfree_rcu(tp, rcu); + return true; + } + + return false; } void tcf_destroy_chain(struct tcf_proto __rcu **fl) @@ -1871,7 +1875,7 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl) while ((tp = rtnl_dereference(*fl)) != NULL) { RCU_INIT_POINTER(*fl, tp->next); - tcf_destroy(tp); + tcf_destroy(tp, true); } } EXPORT_SYMBOL(tcf_destroy_chain); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8f34b27..53b7acd 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1322,8 +1322,7 @@ static __init int sctp_init(void) int max_share; int order; - BUILD_BUG_ON(sizeof(struct sctp_ulpevent) > - sizeof(((struct sk_buff *) 0)->cb)); + sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index aafe94b..f1a6539 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1586,8 +1586,7 @@ static int sctp_error(struct sock *sk, int flags, int err) static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); -static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t msg_len) +static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; @@ -2066,9 +2065,8 @@ static int sctp_skb_pull(struct sk_buff *skb, int len) * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ -static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); diff --git a/net/socket.c b/net/socket.c index bbedbfc..95d3085 100644 --- a/net/socket.c +++ b/net/socket.c @@ -610,45 +610,20 @@ void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) } EXPORT_SYMBOL(__sock_tx_timestamp); -static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size) +static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, + size_t size) { - return sock->ops->sendmsg(iocb, sock, msg, size); + return sock->ops->sendmsg(sock, msg, size); } -static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size) +int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); - return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); -} - -static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg, - size_t size, bool nosec) -{ - struct kiocb iocb; - int ret; - - init_sync_kiocb(&iocb, NULL); - ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) : - __sock_sendmsg(&iocb, sock, msg, size); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&iocb); - return ret; -} - -int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) -{ - return do_sock_sendmsg(sock, msg, size, false); + return err ?: sock_sendmsg_nosec(sock, msg, size); } EXPORT_SYMBOL(sock_sendmsg); -static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) -{ - return do_sock_sendmsg(sock, msg, size, true); -} - int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { @@ -731,9 +706,9 @@ EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { - if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) + if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, - sizeof(__u32), &skb->dropcount); + sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, @@ -744,47 +719,21 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); -static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { - return sock->ops->recvmsg(iocb, sock, msg, size, flags); + return sock->ops->recvmsg(sock, msg, size, flags); } -static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) +int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); - return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); -} - -int sock_recvmsg(struct socket *sock, struct msghdr *msg, - size_t size, int flags) -{ - struct kiocb iocb; - int ret; - - init_sync_kiocb(&iocb, NULL); - ret = __sock_recvmsg(&iocb, sock, msg, size, flags); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&iocb); - return ret; + return err ?: sock_recvmsg_nosec(sock, msg, size, flags); } EXPORT_SYMBOL(sock_recvmsg); -static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, - size_t size, int flags) -{ - struct kiocb iocb; - int ret; - - init_sync_kiocb(&iocb, NULL); - ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&iocb); - return ret; -} - /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from @@ -861,8 +810,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */ return 0; - res = __sock_recvmsg(iocb, sock, &msg, - iocb->ki_nbytes, msg.msg_flags); + res = sock_recvmsg(sock, &msg, iocb->ki_nbytes, msg.msg_flags); *to = msg.msg_iter; return res; } @@ -883,7 +831,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) if (sock->type == SOCK_SEQPACKET) msg.msg_flags |= MSG_EOR; - res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes); + res = sock_sendmsg(sock, &msg, iocb->ki_nbytes); *from = msg.msg_iter; return res; } diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 8c1e558..aba6aa2 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -14,6 +14,7 @@ #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/netdevice.h> +#include <net/ip_fib.h> #include <net/switchdev.h> /** @@ -32,7 +33,7 @@ int netdev_switch_parent_id_get(struct net_device *dev, return -EOPNOTSUPP; return ops->ndo_switch_parent_id_get(dev, psid); } -EXPORT_SYMBOL(netdev_switch_parent_id_get); +EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get); /** * netdev_switch_port_stp_update - Notify switch device port of STP @@ -51,7 +52,7 @@ int netdev_switch_port_stp_update(struct net_device *dev, u8 state) WARN_ON(!ops->ndo_switch_parent_id_get); return ops->ndo_switch_port_stp_update(dev, state); } -EXPORT_SYMBOL(netdev_switch_port_stp_update); +EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update); static DEFINE_MUTEX(netdev_switch_mutex); static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain); @@ -73,7 +74,7 @@ int register_netdev_switch_notifier(struct notifier_block *nb) mutex_unlock(&netdev_switch_mutex); return err; } -EXPORT_SYMBOL(register_netdev_switch_notifier); +EXPORT_SYMBOL_GPL(register_netdev_switch_notifier); /** * unregister_netdev_switch_notifier - Unregister nofifier @@ -91,7 +92,7 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb) mutex_unlock(&netdev_switch_mutex); return err; } -EXPORT_SYMBOL(unregister_netdev_switch_notifier); +EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier); /** * call_netdev_switch_notifiers - Call nofifiers @@ -114,7 +115,7 @@ int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev, mutex_unlock(&netdev_switch_mutex); return err; } -EXPORT_SYMBOL(call_netdev_switch_notifiers); +EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers); /** * netdev_switch_port_bridge_setlink - Notify switch device port of bridge @@ -139,7 +140,7 @@ int netdev_switch_port_bridge_setlink(struct net_device *dev, return ops->ndo_bridge_setlink(dev, nlh, flags); } -EXPORT_SYMBOL(netdev_switch_port_bridge_setlink); +EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink); /** * netdev_switch_port_bridge_dellink - Notify switch device port of bridge @@ -164,7 +165,7 @@ int netdev_switch_port_bridge_dellink(struct net_device *dev, return ops->ndo_bridge_dellink(dev, nlh, flags); } -EXPORT_SYMBOL(netdev_switch_port_bridge_dellink); +EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink); /** * ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink @@ -194,7 +195,7 @@ int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, return ret; } -EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_setlink); +EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink); /** * ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink @@ -224,4 +225,168 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, return ret; } -EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_dellink); +EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink); + +static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev) +{ + const struct net_device_ops *ops = dev->netdev_ops; + struct net_device *lower_dev; + struct net_device *port_dev; + struct list_head *iter; + + /* Recusively search down until we find a sw port dev. + * (A sw port dev supports ndo_switch_parent_id_get). + */ + + if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD && + ops->ndo_switch_parent_id_get) + return dev; + + netdev_for_each_lower_dev(dev, lower_dev, iter) { + port_dev = netdev_switch_get_lowest_dev(lower_dev); + if (port_dev) + return port_dev; + } + + return NULL; +} + +static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi) +{ + struct netdev_phys_item_id psid; + struct netdev_phys_item_id prev_psid; + struct net_device *dev = NULL; + int nhsel; + + /* For this route, all nexthop devs must be on the same switch. */ + + for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { + const struct fib_nh *nh = &fi->fib_nh[nhsel]; + + if (!nh->nh_dev) + return NULL; + + dev = netdev_switch_get_lowest_dev(nh->nh_dev); + if (!dev) + return NULL; + + if (netdev_switch_parent_id_get(dev, &psid)) + return NULL; + + if (nhsel > 0) { + if (prev_psid.id_len != psid.id_len) + return NULL; + if (memcmp(prev_psid.id, psid.id, psid.id_len)) + return NULL; + } + + prev_psid = psid; + } + + return dev; +} + +/** + * netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch + * + * @dst: route's IPv4 destination address + * @dst_len: destination address length (prefix length) + * @fi: route FIB info structure + * @tos: route TOS + * @type: route type + * @tb_id: route table ID + * + * Add IPv4 route entry to switch device. + */ +int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + struct net_device *dev; + const struct net_device_ops *ops; + int err = 0; + + /* Don't offload route if using custom ip rules or if + * IPv4 FIB offloading has been disabled completely. + */ + +#ifdef CONFIG_IP_MULTIPLE_TABLES + if (fi->fib_net->ipv4.fib_has_custom_rules) + return 0; +#endif + + if (fi->fib_net->ipv4.fib_offload_disabled) + return 0; + + dev = netdev_switch_get_dev_by_nhs(fi); + if (!dev) + return 0; + ops = dev->netdev_ops; + + if (ops->ndo_switch_fib_ipv4_add) { + err = ops->ndo_switch_fib_ipv4_add(dev, htonl(dst), dst_len, + fi, tos, type, tb_id); + if (!err) + fi->fib_flags |= RTNH_F_EXTERNAL; + } + + return err; +} +EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add); + +/** + * netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch + * + * @dst: route's IPv4 destination address + * @dst_len: destination address length (prefix length) + * @fi: route FIB info structure + * @tos: route TOS + * @type: route type + * @tb_id: route table ID + * + * Delete IPv4 route entry from switch device. + */ +int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + struct net_device *dev; + const struct net_device_ops *ops; + int err = 0; + + if (!(fi->fib_flags & RTNH_F_EXTERNAL)) + return 0; + + dev = netdev_switch_get_dev_by_nhs(fi); + if (!dev) + return 0; + ops = dev->netdev_ops; + + if (ops->ndo_switch_fib_ipv4_del) { + err = ops->ndo_switch_fib_ipv4_del(dev, htonl(dst), dst_len, + fi, tos, type, tb_id); + if (!err) + fi->fib_flags &= ~RTNH_F_EXTERNAL; + } + + return err; +} +EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del); + +/** + * netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation + * + * @fi: route FIB info structure + */ +void netdev_switch_fib_ipv4_abort(struct fib_info *fi) +{ + /* There was a problem installing this route to the offload + * device. For now, until we come up with more refined + * policy handling, abruptly end IPv4 fib offloading for + * for entire net by flushing offload device(s) of all + * IPv4 routes, and mark IPv4 fib offloading broken from + * this point forward. + */ + + fib_flush_external(fi->fib_net); + fi->fib_net->ipv4.fib_offload_disabled = true; +} +EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort); diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig index 91c8a8e..c25a3a1 100644 --- a/net/tipc/Kconfig +++ b/net/tipc/Kconfig @@ -26,3 +26,11 @@ config TIPC_MEDIA_IB help Saying Y here will enable support for running TIPC on IP-over-InfiniBand devices. +config TIPC_MEDIA_UDP + bool "IP/UDP media type support" + depends on TIPC + select NET_UDP_TUNNEL + help + Saying Y here will enable support for running TIPC over IP/UDP + bool + default y diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 599b1a5..57e460b 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -10,5 +10,6 @@ tipc-y += addr.o bcast.o bearer.o \ netlink.o netlink_compat.o node.o socket.o eth_media.o \ server.o socket.o +tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o tipc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 48852c2..840db89 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -48,6 +48,9 @@ static struct tipc_media * const media_info_array[] = { #ifdef CONFIG_TIPC_MEDIA_IB &ib_media_info, #endif +#ifdef CONFIG_TIPC_MEDIA_UDP + &udp_media_info, +#endif NULL }; @@ -216,7 +219,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) * tipc_enable_bearer - enable bearer with the given name */ static int tipc_enable_bearer(struct net *net, const char *name, - u32 disc_domain, u32 priority) + u32 disc_domain, u32 priority, + struct nlattr *attr[]) { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_bearer *b_ptr; @@ -304,7 +308,7 @@ restart: strcpy(b_ptr->name, name); b_ptr->media = m_ptr; - res = m_ptr->enable_media(net, b_ptr); + res = m_ptr->enable_media(net, b_ptr, attr); if (res) { pr_warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res); @@ -372,7 +376,8 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr, kfree_rcu(b_ptr, rcu); } -int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b) +int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, + struct nlattr *attr[]) { struct net_device *dev; char *driver_name = strchr((const char *)b->name, ':') + 1; @@ -742,7 +747,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - bearer_disable(net, bearer, false); + bearer_disable(net, bearer, true); rtnl_unlock(); return 0; @@ -791,7 +796,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) } rtnl_lock(); - err = tipc_enable_bearer(net, bearer, domain, prio); + err = tipc_enable_bearer(net, bearer, domain, prio, attrs); if (err) { rtnl_unlock(); return err; diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 6b17795..5cad243 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -41,7 +41,7 @@ #include <net/genetlink.h> #define MAX_BEARERS 2 -#define MAX_MEDIA 2 +#define MAX_MEDIA 3 #define MAX_NODES 4096 #define WSIZE 32 @@ -50,14 +50,16 @@ * - the field's actual content and length is defined per media * - remaining unused bytes in the field are set to zero */ -#define TIPC_MEDIA_ADDR_SIZE 32 +#define TIPC_MEDIA_INFO_SIZE 32 #define TIPC_MEDIA_TYPE_OFFSET 3 +#define TIPC_MEDIA_ADDR_OFFSET 4 /* * Identifiers of supported TIPC media types */ #define TIPC_MEDIA_TYPE_ETH 1 #define TIPC_MEDIA_TYPE_IB 2 +#define TIPC_MEDIA_TYPE_UDP 3 /** * struct tipc_node_map - set of node identifiers @@ -76,7 +78,7 @@ struct tipc_node_map { * @broadcast: non-zero if address is a broadcast address */ struct tipc_media_addr { - u8 value[TIPC_MEDIA_ADDR_SIZE]; + u8 value[TIPC_MEDIA_INFO_SIZE]; u8 media_id; u8 broadcast; }; @@ -103,7 +105,8 @@ struct tipc_media { int (*send_msg)(struct net *net, struct sk_buff *buf, struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); - int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr); + int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr, + struct nlattr *attr[]); void (*disable_media)(struct tipc_bearer *b_ptr); int (*addr2str)(struct tipc_media_addr *addr, char *strbuf, @@ -182,6 +185,9 @@ extern struct tipc_media eth_media_info; #ifdef CONFIG_TIPC_MEDIA_IB extern struct tipc_media ib_media_info; #endif +#ifdef CONFIG_TIPC_MEDIA_UDP +extern struct tipc_media udp_media_info; +#endif int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); @@ -196,7 +202,8 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); int tipc_media_set_priority(const char *name, u32 new_value); int tipc_media_set_window(const char *name, u32 new_value); void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); -int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b); +int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, + struct nlattr *attrs[]); void tipc_disable_l2_media(struct tipc_bearer *b); int tipc_l2_send_msg(struct net *net, struct sk_buff *buf, struct tipc_bearer *b, struct tipc_media_addr *dest); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index feef375..5967506 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -86,7 +86,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type, msg = buf_msg(buf); tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type, - INT_H_SIZE, dest_domain); + MAX_H_SIZE, dest_domain); msg_set_non_seq(msg, 1); msg_set_node_sig(msg, tn->random); msg_set_dest_domain(msg, dest_domain); @@ -249,7 +249,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, /* Send response, if necessary */ if (respond && (mtyp == DSC_REQ_MSG)) { - rbuf = tipc_buf_acquire(INT_H_SIZE); + rbuf = tipc_buf_acquire(MAX_H_SIZE); if (rbuf) { tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer); tipc_bearer_send(net, bearer->identity, rbuf, &maddr); @@ -359,8 +359,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr, req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) return -ENOMEM; - - req->buf = tipc_buf_acquire(INT_H_SIZE); + req->buf = tipc_buf_acquire(MAX_H_SIZE); if (!req->buf) { kfree(req); return -ENOMEM; diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 5e1426f..f69a2fd 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -37,8 +37,6 @@ #include "core.h" #include "bearer.h" -#define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */ - /* Convert Ethernet address (media address format) to string */ static int tipc_eth_addr2str(struct tipc_media_addr *addr, char *strbuf, int bufsz) @@ -53,9 +51,9 @@ static int tipc_eth_addr2str(struct tipc_media_addr *addr, /* Convert from media address format to discovery message addr format */ static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) { - memset(msg, 0, TIPC_MEDIA_ADDR_SIZE); + memset(msg, 0, TIPC_MEDIA_INFO_SIZE); msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; - memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN); + memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN); return 0; } @@ -79,7 +77,7 @@ static int tipc_eth_msg2addr(struct tipc_bearer *b, char *msg) { /* Skip past preamble: */ - msg += ETH_ADDR_OFFSET; + msg += TIPC_MEDIA_ADDR_OFFSET; return tipc_eth_raw2addr(b, addr, msg); } diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c index 8522eef..e8c1671 100644 --- a/net/tipc/ib_media.c +++ b/net/tipc/ib_media.c @@ -57,7 +57,7 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, /* Convert from media address format to discovery message addr format */ static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) { - memset(msg, 0, TIPC_MEDIA_ADDR_SIZE); + memset(msg, 0, TIPC_MEDIA_INFO_SIZE); memcpy(msg, addr->value, INFINIBAND_ALEN); return 0; } diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 9ace47f..fa16784 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -76,7 +76,7 @@ struct plist; #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) -#define TIPC_MEDIA_ADDR_OFFSET 5 +#define TIPC_MEDIA_INFO_OFFSET 5 /** * TIPC message buffer code @@ -87,7 +87,7 @@ struct plist; * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields * are word aligned for quicker access */ -#define BUF_HEADROOM LL_MAX_HEADER +#define BUF_HEADROOM (LL_MAX_HEADER + 48) struct tipc_skb_cb { void *handle; @@ -688,7 +688,7 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r) static inline char *msg_media_addr(struct tipc_msg *m) { - return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET]; + return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; } /* diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index fcb0791..506aaa5 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -98,7 +98,7 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb) continue; if (!tipc_node_active_links(node)) continue; - oskb = skb_copy(skb, GFP_ATOMIC); + oskb = pskb_copy(skb, GFP_ATOMIC); if (!oskb) break; msg_set_destnode(buf_msg(oskb), dnode); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index b4d4467..934947f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -114,6 +114,9 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); static int tipc_sk_insert(struct tipc_sock *tsk); static void tipc_sk_remove(struct tipc_sock *tsk); +static int __tipc_send_stream(struct socket *sock, struct msghdr *m, + size_t dsz); +static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; @@ -892,7 +895,6 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) /** * tipc_sendmsg - send message in connectionless manner - * @iocb: if NULL, indicates that socket lock is already held * @sock: socket structure * @m: message to send * @dsz: amount of user data to be sent @@ -904,9 +906,21 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) * * Returns the number of bytes sent on success, or errno otherwise */ -static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, +static int tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) { + struct sock *sk = sock->sk; + int ret; + + lock_sock(sk); + ret = __tipc_sendmsg(sock, m, dsz); + release_sock(sk); + + return ret; +} + +static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) +{ DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); @@ -931,22 +945,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, if (dsz > TIPC_MAX_USER_MSG_SIZE) return -EMSGSIZE; - if (iocb) - lock_sock(sk); - if (unlikely(sock->state != SS_READY)) { - if (sock->state == SS_LISTENING) { - rc = -EPIPE; - goto exit; - } - if (sock->state != SS_UNCONNECTED) { - rc = -EISCONN; - goto exit; - } - if (tsk->published) { - rc = -EOPNOTSUPP; - goto exit; - } + if (sock->state == SS_LISTENING) + return -EPIPE; + if (sock->state != SS_UNCONNECTED) + return -EISCONN; + if (tsk->published) + return -EOPNOTSUPP; if (dest->addrtype == TIPC_ADDR_NAME) { tsk->conn_type = dest->addr.name.name.type; tsk->conn_instance = dest->addr.name.name.instance; @@ -956,8 +961,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); if (dest->addrtype == TIPC_ADDR_MCAST) { - rc = tipc_sendmcast(sock, seq, m, dsz, timeo); - goto exit; + return tipc_sendmcast(sock, seq, m, dsz, timeo); } else if (dest->addrtype == TIPC_ADDR_NAME) { u32 type = dest->addr.name.name.type; u32 inst = dest->addr.name.name.instance; @@ -972,10 +976,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, dport = tipc_nametbl_translate(net, type, inst, &dnode); msg_set_destnode(mhdr, dnode); msg_set_destport(mhdr, dport); - if (unlikely(!dport && !dnode)) { - rc = -EHOSTUNREACH; - goto exit; - } + if (unlikely(!dport && !dnode)) + return -EHOSTUNREACH; } else if (dest->addrtype == TIPC_ADDR_ID) { dnode = dest->addr.id.node; msg_set_type(mhdr, TIPC_DIRECT_MSG); @@ -990,7 +992,7 @@ new_mtu: mtu = tipc_node_get_mtu(net, dnode, tsk->portid); rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); if (rc < 0) - goto exit; + return rc; do { skb = skb_peek(pktchain); @@ -1013,9 +1015,6 @@ new_mtu: if (rc) __skb_queue_purge(pktchain); } while (!rc); -exit: - if (iocb) - release_sock(sk); return rc; } @@ -1052,7 +1051,6 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) /** * tipc_send_stream - send stream-oriented data - * @iocb: (unused) * @sock: socket structure * @m: data to send * @dsz: total length of data to be transmitted @@ -1062,8 +1060,19 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) * Returns the number of bytes sent on success (or partial success), * or errno if no data sent */ -static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t dsz) +static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) +{ + struct sock *sk = sock->sk; + int ret; + + lock_sock(sk); + ret = __tipc_send_stream(sock, m, dsz); + release_sock(sk); + + return ret; +} + +static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); @@ -1080,7 +1089,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, /* Handle implied connection establishment */ if (unlikely(dest)) { - rc = tipc_sendmsg(iocb, sock, m, dsz); + rc = __tipc_sendmsg(sock, m, dsz); if (dsz && (dsz == rc)) tsk->sent_unacked = 1; return rc; @@ -1088,15 +1097,11 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, if (dsz > (uint)INT_MAX) return -EMSGSIZE; - if (iocb) - lock_sock(sk); - if (unlikely(sock->state != SS_CONNECTED)) { if (sock->state == SS_DISCONNECTING) - rc = -EPIPE; + return -EPIPE; else - rc = -ENOTCONN; - goto exit; + return -ENOTCONN; } timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); @@ -1108,7 +1113,7 @@ next: send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); if (unlikely(rc < 0)) - goto exit; + return rc; do { if (likely(!tsk_conn_cong(tsk))) { rc = tipc_link_xmit(net, pktchain, dnode, portid); @@ -1133,15 +1138,12 @@ next: if (rc) __skb_queue_purge(pktchain); } while (!rc); -exit: - if (iocb) - release_sock(sk); + return sent ? sent : rc; } /** * tipc_send_packet - send a connection-oriented message - * @iocb: if NULL, indicates that socket lock is already held * @sock: socket structure * @m: message to send * @dsz: length of data to be transmitted @@ -1150,13 +1152,12 @@ exit: * * Returns the number of bytes sent on success, or errno otherwise */ -static int tipc_send_packet(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t dsz) +static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) { if (dsz > TIPC_MAX_USER_MSG_SIZE) return -EMSGSIZE; - return tipc_send_stream(iocb, sock, m, dsz); + return tipc_send_stream(sock, m, dsz); } /* tipc_sk_finish_conn - complete the setup of a connection @@ -1317,12 +1318,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) err = 0; if (!skb_queue_empty(&sk->sk_receive_queue)) break; - err = sock_intr_errno(timeo); - if (signal_pending(current)) - break; err = -EAGAIN; if (!timeo) break; + err = sock_intr_errno(timeo); + if (signal_pending(current)) + break; } finish_wait(sk_sleep(sk), &wait); *timeop = timeo; @@ -1331,7 +1332,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) /** * tipc_recvmsg - receive packet-oriented message - * @iocb: (unused) * @m: descriptor for message info * @buf_len: total size of user buffer area * @flags: receive flags @@ -1341,8 +1341,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) * * Returns size of returned message data, errno otherwise */ -static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t buf_len, int flags) +static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, + int flags) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); @@ -1426,7 +1426,6 @@ exit: /** * tipc_recv_stream - receive stream-oriented data - * @iocb: (unused) * @m: descriptor for message info * @buf_len: total size of user buffer area * @flags: receive flags @@ -1436,8 +1435,8 @@ exit: * * Returns size of returned message data, errno otherwise */ -static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t buf_len, int flags) +static int tipc_recv_stream(struct socket *sock, struct msghdr *m, + size_t buf_len, int flags) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); @@ -1947,7 +1946,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, if (!timeout) m.msg_flags = MSG_DONTWAIT; - res = tipc_sendmsg(NULL, sock, &m, 0); + res = __tipc_sendmsg(sock, &m, 0); if ((res < 0) && (res != -EWOULDBLOCK)) goto exit; @@ -2027,12 +2026,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) err = -EINVAL; if (sock->state != SS_LISTENING) break; - err = sock_intr_errno(timeo); - if (signal_pending(current)) - break; err = -EAGAIN; if (!timeo) break; + err = sock_intr_errno(timeo); + if (signal_pending(current)) + break; } finish_wait(sk_sleep(sk), &wait); return err; @@ -2103,7 +2102,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) struct msghdr m = {NULL,}; tsk_advance_rx_queue(sk); - tipc_send_packet(NULL, new_sock, &m, 0); + __tipc_send_stream(new_sock, &m, 0); } else { __skb_dequeue(&sk->sk_receive_queue); __skb_queue_head(&new_sk->sk_receive_queue, buf); diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 72c339e..1c147c86 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -162,19 +162,6 @@ static void subscr_del(struct tipc_subscription *sub) atomic_dec(&tn->subscription_count); } -/** - * subscr_terminate - terminate communication with a subscriber - * - * Note: Must call it in process context since it might sleep. - */ -static void subscr_terminate(struct tipc_subscription *sub) -{ - struct tipc_subscriber *subscriber = sub->subscriber; - struct tipc_net *tn = net_generic(sub->net, tipc_net_id); - - tipc_conn_terminate(tn->topsrv, subscriber->conid); -} - static void subscr_release(struct tipc_subscriber *subscriber) { struct tipc_subscription *sub; @@ -312,16 +299,14 @@ static void subscr_conn_msg_event(struct net *net, int conid, { struct tipc_subscriber *subscriber = usr_data; struct tipc_subscription *sub = NULL; + struct tipc_net *tn = net_generic(net, tipc_net_id); spin_lock_bh(&subscriber->lock); - if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, - &sub) < 0) { - spin_unlock_bh(&subscriber->lock); - subscr_terminate(sub); - return; - } + subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub); if (sub) tipc_nametbl_subscribe(sub); + else + tipc_conn_terminate(tn->topsrv, subscriber->conid); spin_unlock_bh(&subscriber->lock); } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c new file mode 100644 index 0000000..fc2fb11 --- /dev/null +++ b/net/tipc/udp_media.c @@ -0,0 +1,442 @@ +/* net/tipc/udp_media.c: IP bearer support for TIPC + * + * Copyright (c) 2015, Ericsson AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/socket.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/inet.h> +#include <linux/inetdevice.h> +#include <linux/igmp.h> +#include <linux/kernel.h> +#include <linux/workqueue.h> +#include <linux/list.h> +#include <net/sock.h> +#include <net/ip.h> +#include <net/udp_tunnel.h> +#include <linux/tipc_netlink.h> +#include "core.h" +#include "bearer.h" + +/* IANA assigned UDP port */ +#define UDP_PORT_DEFAULT 6118 + +static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = { + [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC}, + [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY, + .len = sizeof(struct sockaddr_storage)}, + [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY, + .len = sizeof(struct sockaddr_storage)}, +}; + +/** + * struct udp_media_addr - IP/UDP addressing information + * + * This is the bearer level originating address used in neighbor discovery + * messages, and all fields should be in network byte order + */ +struct udp_media_addr { + __be16 proto; + __be16 udp_port; + union { + struct in_addr ipv4; + struct in6_addr ipv6; + }; +}; + +/** + * struct udp_bearer - ip/udp bearer data structure + * @bearer: associated generic tipc bearer + * @ubsock: bearer associated socket + * @ifindex: local address scope + * @work: used to schedule deferred work on a bearer + */ +struct udp_bearer { + struct tipc_bearer __rcu *bearer; + struct socket *ubsock; + u32 ifindex; + struct work_struct work; +}; + +/* udp_media_addr_set - convert a ip/udp address to a TIPC media address */ +static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, + struct udp_media_addr *ua) +{ + memset(addr, 0, sizeof(struct tipc_media_addr)); + addr->media_id = TIPC_MEDIA_TYPE_UDP; + memcpy(addr->value, ua, sizeof(struct udp_media_addr)); + if (ntohs(ua->proto) == ETH_P_IP) { + if (ipv4_is_multicast(ua->ipv4.s_addr)) + addr->broadcast = 1; + } else if (ntohs(ua->proto) == ETH_P_IPV6) { + if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST) + addr->broadcast = 1; + } else { + pr_err("Invalid UDP media address\n"); + } +} + +/* tipc_udp_addr2str - convert ip/udp address to string */ +static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size) +{ + struct udp_media_addr *ua = (struct udp_media_addr *)&a->value; + + if (ntohs(ua->proto) == ETH_P_IP) + snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port)); + else if (ntohs(ua->proto) == ETH_P_IPV6) + snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port)); + else + pr_err("Invalid UDP media address\n"); + return 0; +} + +/* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */ +static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a, + char *msg) +{ + struct udp_media_addr *ua; + + ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET); + if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP) + return -EINVAL; + tipc_udp_media_addr_set(a, ua); + return 0; +} + +/* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */ +static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a) +{ + memset(msg, 0, TIPC_MEDIA_INFO_SIZE); + msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP; + memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value, + sizeof(struct udp_media_addr)); + return 0; +} + +/* tipc_send_msg - enqueue a send request */ +static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, + struct tipc_bearer *b, + struct tipc_media_addr *dest) +{ + int ttl, err = 0; + struct udp_bearer *ub; + struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value; + struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; + struct sk_buff *clone; + struct rtable *rt; + + clone = skb_clone(skb, GFP_ATOMIC); + skb_set_inner_protocol(clone, htons(ETH_P_TIPC)); + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) { + err = -ENODEV; + goto tx_error; + } + if (dst->proto == htons(ETH_P_IP)) { + struct flowi4 fl = { + .daddr = dst->ipv4.s_addr, + .saddr = src->ipv4.s_addr, + .flowi4_mark = clone->mark, + .flowi4_proto = IPPROTO_UDP + }; + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + goto tx_error; + } + ttl = ip4_dst_hoplimit(&rt->dst); + err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr, + dst->ipv4.s_addr, 0, ttl, 0, + src->udp_port, dst->udp_port, + false, true); + if (err < 0) { + ip_rt_put(rt); + goto tx_error; + } +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct dst_entry *ndst; + struct flowi6 fl6 = { + .flowi6_oif = ub->ifindex, + .daddr = dst->ipv6, + .saddr = src->ipv6, + .flowi6_proto = IPPROTO_UDP + }; + err = ip6_dst_lookup(ub->ubsock->sk, &ndst, &fl6); + if (err) + goto tx_error; + ttl = ip6_dst_hoplimit(ndst); + err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6, + &dst->ipv6, 0, ttl, src->udp_port, + dst->udp_port, false); +#endif + } + return err; + +tx_error: + kfree_skb(clone); + return err; +} + +/* tipc_udp_recv - read data from bearer socket */ +static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) +{ + struct udp_bearer *ub; + struct tipc_bearer *b; + + ub = rcu_dereference_sk_user_data(sk); + if (!ub) { + pr_err_ratelimited("Failed to get UDP bearer reference"); + kfree_skb(skb); + return 0; + } + + skb_pull(skb, sizeof(struct udphdr)); + rcu_read_lock(); + b = rcu_dereference_rtnl(ub->bearer); + + if (b) { + tipc_rcv(sock_net(sk), skb, b); + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + kfree_skb(skb); + return 0; +} + +static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) +{ + int err = 0; + struct ip_mreqn mreqn; + struct sock *sk = ub->ubsock->sk; + + if (ntohs(remote->proto) == ETH_P_IP) { + if (!ipv4_is_multicast(remote->ipv4.s_addr)) + return 0; + mreqn.imr_multiaddr = remote->ipv4; + mreqn.imr_ifindex = ub->ifindex; + err = __ip_mc_join_group(sk, &mreqn); + } else { + if (!ipv6_addr_is_multicast(&remote->ipv6)) + return 0; + err = __ipv6_sock_mc_join(sk, ub->ifindex, &remote->ipv6); + } + return err; +} + +/** + * parse_options - build local/remote addresses from configuration + * @attrs: netlink config data + * @ub: UDP bearer instance + * @local: local bearer IP address/port + * @remote: peer or multicast IP/port + */ +static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub, + struct udp_media_addr *local, + struct udp_media_addr *remote) +{ + struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; + struct sockaddr_storage *sa_local, *sa_remote; + + if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) + goto err; + if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, + attrs[TIPC_NLA_BEARER_UDP_OPTS], + tipc_nl_udp_policy)) + goto err; + if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) { + sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]); + sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]); + } else { +err: + pr_err("Invalid UDP bearer configuration"); + return -EINVAL; + } + if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) { + struct sockaddr_in *ip4; + + ip4 = (struct sockaddr_in *)sa_local; + local->proto = htons(ETH_P_IP); + local->udp_port = ip4->sin_port; + local->ipv4.s_addr = ip4->sin_addr.s_addr; + + ip4 = (struct sockaddr_in *)sa_remote; + remote->proto = htons(ETH_P_IP); + remote->udp_port = ip4->sin_port; + remote->ipv4.s_addr = ip4->sin_addr.s_addr; + return 0; + +#if IS_ENABLED(CONFIG_IPV6) + } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) { + struct sockaddr_in6 *ip6; + + ip6 = (struct sockaddr_in6 *)sa_local; + local->proto = htons(ETH_P_IPV6); + local->udp_port = ip6->sin6_port; + local->ipv6 = ip6->sin6_addr; + ub->ifindex = ip6->sin6_scope_id; + + ip6 = (struct sockaddr_in6 *)sa_remote; + remote->proto = htons(ETH_P_IPV6); + remote->udp_port = ip6->sin6_port; + remote->ipv6 = ip6->sin6_addr; + return 0; +#endif + } + return -EADDRNOTAVAIL; +} + +/** + * tipc_udp_enable - callback to create a new udp bearer instance + * @net: network namespace + * @b: pointer to generic tipc_bearer + * @attrs: netlink bearer configuration + * + * validate the bearer parameters and initialize the udp bearer + * rtnl_lock should be held + */ +static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, + struct nlattr *attrs[]) +{ + int err = -EINVAL; + struct udp_bearer *ub; + struct udp_media_addr *remote; + struct udp_media_addr local = {0}; + struct udp_port_cfg udp_conf = {0}; + struct udp_tunnel_sock_cfg tuncfg = {NULL}; + + ub = kzalloc(sizeof(*ub), GFP_ATOMIC); + if (!ub) + return -ENOMEM; + + remote = (struct udp_media_addr *)&b->bcast_addr.value; + memset(remote, 0, sizeof(struct udp_media_addr)); + err = parse_options(attrs, ub, &local, remote); + if (err) + goto err; + + b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP; + b->bcast_addr.broadcast = 1; + rcu_assign_pointer(b->media_ptr, ub); + rcu_assign_pointer(ub->bearer, b); + tipc_udp_media_addr_set(&b->addr, &local); + if (local.proto == htons(ETH_P_IP)) { + struct net_device *dev; + + dev = __ip_dev_find(net, local.ipv4.s_addr, false); + if (!dev) { + err = -ENODEV; + goto err; + } + udp_conf.family = AF_INET; + udp_conf.local_ip.s_addr = htonl(INADDR_ANY); + udp_conf.use_udp_checksums = false; + ub->ifindex = dev->ifindex; + b->mtu = dev->mtu - sizeof(struct iphdr) + - sizeof(struct udphdr); +#if IS_ENABLED(CONFIG_IPV6) + } else if (local.proto == htons(ETH_P_IPV6)) { + udp_conf.family = AF_INET6; + udp_conf.use_udp6_tx_checksums = true; + udp_conf.use_udp6_rx_checksums = true; + udp_conf.local_ip6 = in6addr_any; + b->mtu = 1280; +#endif + } else { + err = -EAFNOSUPPORT; + goto err; + } + udp_conf.local_udp_port = local.udp_port; + err = udp_sock_create(net, &udp_conf, &ub->ubsock); + if (err) + goto err; + tuncfg.sk_user_data = ub; + tuncfg.encap_type = 1; + tuncfg.encap_rcv = tipc_udp_recv; + tuncfg.encap_destroy = NULL; + setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); + + if (enable_mcast(ub, remote)) + goto err; + return 0; +err: + kfree(ub); + return err; +} + +/* cleanup_bearer - break the socket/bearer association */ +static void cleanup_bearer(struct work_struct *work) +{ + struct udp_bearer *ub = container_of(work, struct udp_bearer, work); + + if (ub->ubsock) + udp_tunnel_sock_release(ub->ubsock); + synchronize_net(); + kfree(ub); +} + +/* tipc_udp_disable - detach bearer from socket */ +static void tipc_udp_disable(struct tipc_bearer *b) +{ + struct udp_bearer *ub; + + ub = rcu_dereference_rtnl(b->media_ptr); + if (!ub) { + pr_err("UDP bearer instance not found\n"); + return; + } + if (ub->ubsock) + sock_set_flag(ub->ubsock->sk, SOCK_DEAD); + RCU_INIT_POINTER(b->media_ptr, NULL); + RCU_INIT_POINTER(ub->bearer, NULL); + + /* sock_release need to be done outside of rtnl lock */ + INIT_WORK(&ub->work, cleanup_bearer); + schedule_work(&ub->work); +} + +struct tipc_media udp_media_info = { + .send_msg = tipc_udp_send_msg, + .enable_media = tipc_udp_enable, + .disable_media = tipc_udp_disable, + .addr2str = tipc_udp_addr2str, + .addr2msg = tipc_udp_addr2msg, + .msg2addr = tipc_udp_msg2addr, + .priority = TIPC_DEF_LINK_PRI, + .tolerance = TIPC_DEF_LINK_TOL, + .window = TIPC_DEF_LINK_WIN, + .type_id = TIPC_MEDIA_TYPE_UDP, + .hwaddr_len = 0, + .name = "udp" +}; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 526b6ed..433f287 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -516,20 +516,15 @@ static unsigned int unix_dgram_poll(struct file *, struct socket *, poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); static int unix_shutdown(struct socket *, int); -static int unix_stream_sendmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t); -static int unix_stream_recvmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t, int); -static int unix_dgram_sendmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t); -static int unix_dgram_recvmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t, int); +static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); +static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); +static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); +static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); static int unix_dgram_connect(struct socket *, struct sockaddr *, int, int); -static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t); -static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t, int); +static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); +static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, + int); static int unix_set_peek_off(struct sock *sk, int val) { @@ -1442,8 +1437,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, * Send AF_UNIX data. */ -static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); @@ -1622,8 +1617,8 @@ out: */ #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) -static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk = sock->sk; struct sock *other = NULL; @@ -1725,8 +1720,8 @@ out_err: return sent ? : err; } -static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { int err; struct sock *sk = sock->sk; @@ -1741,19 +1736,18 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock, if (msg->msg_namelen) msg->msg_namelen = 0; - return unix_dgram_sendmsg(kiocb, sock, msg, len); + return unix_dgram_sendmsg(sock, msg, len); } -static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, - int flags) +static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; - return unix_dgram_recvmsg(iocb, sock, msg, size, flags); + return unix_dgram_recvmsg(sock, msg, size, flags); } static void unix_copy_addr(struct msghdr *msg, struct sock *sk) @@ -1766,9 +1760,8 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk) } } -static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, - int flags) +static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct scm_cookie scm; struct sock *sk = sock->sk; @@ -1900,9 +1893,8 @@ static unsigned int unix_skb_len(const struct sk_buff *skb) return skb->len - UNIXCB(skb).consumed; } -static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, - int flags) +static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) { struct scm_cookie scm; struct sock *sk = sock->sk; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 1d0e39c..2ec86e6 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -949,8 +949,8 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock, return mask; } -static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { int err; struct sock *sk; @@ -1062,11 +1062,10 @@ out: return err; } -static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) +static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) { - return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len, - flags); + return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags); } static const struct proto_ops vsock_dgram_ops = { @@ -1505,8 +1504,8 @@ static int vsock_stream_getsockopt(struct socket *sock, return 0; } -static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) { struct sock *sk; struct vsock_sock *vsk; @@ -1644,9 +1643,8 @@ out: static int -vsock_stream_recvmsg(struct kiocb *kiocb, - struct socket *sock, - struct msghdr *msg, size_t len, int flags) +vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sock *sk; struct vsock_sock *vsk; diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 7f32550..c294da0 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1730,8 +1730,7 @@ static int vmci_transport_dgram_enqueue( return err - sizeof(*dg); } -static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, - struct vsock_sock *vsk, +static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index e24fc58..6309b9c 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -533,7 +533,7 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev, else if (wdev->wext.ibss.bssid) memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); else - memset(ap_addr->sa_data, 0, ETH_ALEN); + eth_zero_addr(ap_addr->sa_data); wdev_unlock(wdev); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index be25015..864b782 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5683,8 +5683,8 @@ static int nl80211_parse_random_mac(struct nlattr **attrs, int i; if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) { - memset(mac_addr, 0, ETH_ALEN); - memset(mac_addr_mask, 0, ETH_ALEN); + eth_zero_addr(mac_addr); + eth_zero_addr(mac_addr_mask); mac_addr[0] = 0x2; mac_addr_mask[0] = 0x3; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b17b369..a00ee88 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -7,6 +7,7 @@ #include <linux/tracepoint.h> #include <linux/rtnetlink.h> +#include <linux/etherdevice.h> #include <net/cfg80211.h> #include "core.h" @@ -15,7 +16,7 @@ if (given_mac) \ memcpy(__entry->entry_mac, given_mac, ETH_ALEN); \ else \ - memset(__entry->entry_mac, 0, ETH_ALEN); \ + eth_zero_addr(__entry->entry_mac); \ } while (0) #define MAC_PR_FMT "%pM" #define MAC_PR_ARG(entry_mac) (__entry->entry_mac) @@ -1077,7 +1078,7 @@ TRACE_EVENT(rdev_auth, if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else - memset(__entry->bssid, 0, ETH_ALEN); + eth_zero_addr(__entry->bssid); __entry->auth_type = req->auth_type; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT, @@ -1103,7 +1104,7 @@ TRACE_EVENT(rdev_assoc, if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else - memset(__entry->bssid, 0, ETH_ALEN); + eth_zero_addr(__entry->bssid); MAC_ASSIGN(prev_bssid, req->prev_bssid); __entry->use_mfp = req->use_mfp; __entry->flags = req->flags; @@ -1153,7 +1154,7 @@ TRACE_EVENT(rdev_disassoc, if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else - memset(__entry->bssid, 0, ETH_ALEN); + eth_zero_addr(__entry->bssid); __entry->reason_code = req->reason_code; __entry->local_state_change = req->local_state_change; ), diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 368611c..a4e8af3 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -322,7 +322,7 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev, if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else - memset(ap_addr->sa_data, 0, ETH_ALEN); + eth_zero_addr(ap_addr->sa_data); wdev_unlock(wdev); return 0; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d9149b6..c3ab230 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1077,8 +1077,7 @@ out_clear_request: goto out; } -static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) +static int x25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); @@ -1252,8 +1251,7 @@ out_kfree_skb: } -static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, +static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h index 58c5fe1..a6bb7e9c 100644 --- a/samples/bpf/libbpf.h +++ b/samples/bpf/libbpf.h @@ -92,7 +92,9 @@ extern char bpf_log_buf[LOG_BUF_SIZE]; .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) -#define BPF_PSEUDO_MAP_FD 1 +#ifndef BPF_PSEUDO_MAP_FD +# define BPF_PSEUDO_MAP_FD 1 +#endif /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index b96175e..7b56b59 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -288,7 +288,8 @@ static struct bpf_test tests[] = { BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), /* should be able to access R0 = *(R2 + 8) */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), + /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), BPF_EXIT_INSN(), }, .result = ACCEPT, @@ -687,7 +688,7 @@ static int test(void) } printf("#%d %s ", i, tests[i].descr); - prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog, + prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, prog_len * sizeof(struct bpf_insn), "GPL"); diff --git a/samples/pktgen/pktgen.conf-1-1 b/samples/pktgen/pktgen.conf-1-1 new file mode 100755 index 0000000..f91daad --- /dev/null +++ b/samples/pktgen/pktgen.conf-1-1 @@ -0,0 +1,59 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. One CPU example. We add eth1. + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + + +# device config +# delay 0 means maximum speed. + +CLONE_SKB="clone_skb 1000000" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst 10.10.11.2" + pgset "dst_mac 00:04:23:08:91:dc" + + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 diff --git a/samples/pktgen/pktgen.conf-1-1-flows b/samples/pktgen/pktgen.conf-1-1-flows new file mode 100755 index 0000000..081749c --- /dev/null +++ b/samples/pktgen/pktgen.conf-1-1-flows @@ -0,0 +1,67 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. One CPU example. We add eth1. + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + + +# device config +# delay 0 +# We need to do alloc for every skb since we cannot clone here. + +CLONE_SKB="clone_skb 0" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + # Random address with in the min-max range + pgset "flag IPDST_RND" + pgset "dst_min 10.0.0.0" + pgset "dst_max 10.255.255.255" + + # 8k Concurrent flows at 4 pkts + pgset "flows 8192" + pgset "flowlen 4" + + pgset "dst_mac 00:04:23:08:91:dc" + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 diff --git a/samples/pktgen/pktgen.conf-1-1-ip6 b/samples/pktgen/pktgen.conf-1-1-ip6 new file mode 100755 index 0000000..0b9ffd4 --- /dev/null +++ b/samples/pktgen/pktgen.conf-1-1-ip6 @@ -0,0 +1,60 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. One CPU example. We add eth1. +# IPv6. Note increase in minimal packet length + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + + +# device config +# delay 0 + +CLONE_SKB="clone_skb 1000000" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 66" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst6 fec0::1" + pgset "src6 fec0::2" + pgset "dst_mac 00:04:23:08:91:dc" + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 diff --git a/samples/pktgen/pktgen.conf-1-1-ip6-rdos b/samples/pktgen/pktgen.conf-1-1-ip6-rdos new file mode 100755 index 0000000..ad98e5f --- /dev/null +++ b/samples/pktgen/pktgen.conf-1-1-ip6-rdos @@ -0,0 +1,63 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. One CPU example. We add eth1. +# IPv6. Note increase in minimal packet length + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + + +# device config +# delay 0 means maximum speed. + +# We need to do alloc for every skb since we cannot clone here. +CLONE_SKB="clone_skb 0" + +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 66" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst6_min fec0::1" + pgset "dst6_max fec0::FFFF:FFFF" + + pgset "dst_mac 00:04:23:08:91:dc" + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 diff --git a/samples/pktgen/pktgen.conf-1-1-rdos b/samples/pktgen/pktgen.conf-1-1-rdos new file mode 100755 index 0000000..c7553be --- /dev/null +++ b/samples/pktgen/pktgen.conf-1-1-rdos @@ -0,0 +1,64 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. One CPU example. We add eth1. + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + + +# device config +# delay 0 + +# We need to do alloc for every skb since we cannot clone here. + +CLONE_SKB="clone_skb 0" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + # Random address with in the min-max range + pgset "flag IPDST_RND" + pgset "dst_min 10.0.0.0" + pgset "dst_max 10.255.255.255" + + pgset "dst_mac 00:04:23:08:91:dc" + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 diff --git a/samples/pktgen/pktgen.conf-1-2 b/samples/pktgen/pktgen.conf-1-2 new file mode 100755 index 0000000..ba4eb26 --- /dev/null +++ b/samples/pktgen/pktgen.conf-1-2 @@ -0,0 +1,69 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# One CPU means one thread. One CPU example. We add eth1, eth2 respectivly. + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + echo "Adding eth2" + pgset "add_device eth2" + + +# device config +# delay 0 means maximum speed. + +CLONE_SKB="clone_skb 1000000" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst 10.10.11.2" + pgset "dst_mac 00:04:23:08:91:dc" + +PGDEV=/proc/net/pktgen/eth2 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst 192.168.2.2" + pgset "dst_mac 00:04:23:08:91:de" + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2 diff --git a/samples/pktgen/pktgen.conf-2-1 b/samples/pktgen/pktgen.conf-2-1 new file mode 100755 index 0000000..e108e97 --- /dev/null +++ b/samples/pktgen/pktgen.conf-2-1 @@ -0,0 +1,66 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. Two CPU example. We add eth1 to the first +# and leave the second idle. + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + +# We need to remove old config since we dont use this thread. We can only +# one NIC on one CPU due to affinity reasons. + +PGDEV=/proc/net/pktgen/kpktgend_1 + echo "Removing all devices" + pgset "rem_device_all" + +# device config +# delay 0 means maximum speed. + +CLONE_SKB="clone_skb 1000000" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst 10.10.11.2" + pgset "dst_mac 00:04:23:08:91:dc" + + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 diff --git a/samples/pktgen/pktgen.conf-2-2 b/samples/pktgen/pktgen.conf-2-2 new file mode 100755 index 0000000..acea155 --- /dev/null +++ b/samples/pktgen/pktgen.conf-2-2 @@ -0,0 +1,73 @@ +#!/bin/bash + +#modprobe pktgen + + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has its own thread. Two CPU example. We add eth1, eth2 respectively. + +PGDEV=/proc/net/pktgen/kpktgend_0 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + +PGDEV=/proc/net/pktgen/kpktgend_1 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth2" + pgset "add_device eth2" + + +# device config +# delay 0 means maximum speed. + +CLONE_SKB="clone_skb 1000000" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 10000000" +DELAY="delay 0" + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst 10.10.11.2" + pgset "dst_mac 00:04:23:08:91:dc" + +PGDEV=/proc/net/pktgen/eth2 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DELAY" + pgset "dst 192.168.2.2" + pgset "dst_mac 00:04:23:08:91:de" + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + trap true INT + pgset "start" + echo "Done" + cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2 |