summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/arm-smccc.h52
-rw-r--r--include/linux/async_tx.h2
-rw-r--r--include/linux/audit.h32
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h3
-rw-r--r--include/linux/bitfield.h13
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blk_types.h38
-rw-r--r--include/linux/blkdev.h143
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bpf-cgroup.h13
-rw-r--r--include/linux/bpf.h27
-rw-r--r--include/linux/bpf_trace.h7
-rw-r--r--include/linux/brcmphy.h19
-rw-r--r--include/linux/bsg-lib.h5
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/can/core.h7
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/can/rx-offload.h59
-rw-r--r--include/linux/cdrom.h5
-rw-r--r--include/linux/clockchips.h9
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h20
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/cpuhotplug.h6
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/cputime.h7
-rw-r--r--include/linux/cryptohash.h2
-rw-r--r--include/linux/debugfs.h11
-rw-r--r--include/linux/delay.h11
-rw-r--r--include/linux/delayacct.h1
-rw-r--r--include/linux/devfreq.h3
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/dma-iommu.h10
-rw-r--r--include/linux/dma-mapping.h7
-rw-r--r--include/linux/dma/dw.h2
-rw-r--r--include/linux/dmaengine.h11
-rw-r--r--include/linux/edac.h4
-rw-r--r--include/linux/efi-bgrt.h11
-rw-r--r--include/linux/efi.h58
-rw-r--r--include/linux/elevator.h63
-rw-r--r--include/linux/etherdevice.h65
-rw-r--r--include/linux/export.h17
-rw-r--r--include/linux/extcon.h71
-rw-r--r--include/linux/extcon/extcon-adc-jack.h2
-rw-r--r--include/linux/filter.h131
-rw-r--r--include/linux/fpga/fpga-mgr.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fscache-cache.h1
-rw-r--r--include/linux/fscrypt_common.h146
-rw-r--r--include/linux/fscrypt_notsupp.h168
-rw-r--r--include/linux/fscrypt_supp.h66
-rw-r--r--include/linux/fscrypto.h345
-rw-r--r--include/linux/fsi.h50
-rw-r--r--include/linux/fsl_ifc.h8
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/genhd.h17
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio/driver.h107
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid-sensor-ids.h4
-rw-r--r--include/linux/hrtimer.h11
-rw-r--r--include/linux/hyperv.h160
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/ide.h58
-rw-r--r--include/linux/ieee80211.h51
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/if_frad.h2
-rw-r--r--include/linux/if_macvlan.h17
-rw-r--r--include/linux/if_tap.h75
-rw-r--r--include/linux/iio/buffer.h160
-rw-r--r--include/linux/iio/buffer_impl.h162
-rw-r--r--include/linux/iio/common/st_sensors.h12
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h9
-rw-r--r--include/linux/iio/kfifo_buf.h5
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h62
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/init_task.h40
-rw-r--r--include/linux/intel-iommu.h17
-rw-r--r--include/linux/intel_pmic_gpio.h15
-rw-r--r--include/linux/iommu.h138
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h36
-rw-r--r--include/linux/irqchip/arm-gic-v3.h5
-rw-r--r--include/linux/irqdomain.h36
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/jump_label_ratelimit.h5
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/kmod.h7
-rw-r--r--include/linux/kprobes.h30
-rw-r--r--include/linux/kref.h78
-rw-r--r--include/linux/leds.h16
-rw-r--r--include/linux/libata.h10
-rw-r--r--include/linux/lightnvm.h138
-rw-r--r--include/linux/list.h13
-rw-r--r--include/linux/llist.h37
-rw-r--r--include/linux/log2.h13
-rw-r--r--include/linux/lsm_hooks.h25
-rw-r--r--include/linux/marvell_phy.h7
-rw-r--r--include/linux/math64.h26
-rw-r--r--include/linux/mdev.h56
-rw-r--r--include/linux/mdio.h26
-rw-r--r--include/linux/memcontrol.h26
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/mfd/axp20x.h31
-rw-r--r--include/linux/mfd/cros_ec_commands.h3
-rw-r--r--include/linux/mfd/lpc_ich.h3
-rw-r--r--include/linux/mfd/stm32-timers.h71
-rw-r--r--include/linux/mfd/tmio.h6
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx4/device.h10
-rw-r--r--include/linux/mlx5/cq.h5
-rw-r--r--include/linux/mlx5/device.h108
-rw-r--r--include/linux/mlx5/doorbell.h6
-rw-r--r--include/linux/mlx5/driver.h197
-rw-r--r--include/linux/mlx5/mlx5_ifc.h271
-rw-r--r--include/linux/mlx5/qp.h92
-rw-r--r--include/linux/mlx5/vport.h1
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mmc/boot.h7
-rw-r--r--include/linux/mmc/card.h246
-rw-r--r--include/linux/mmc/core.h86
-rw-r--r--include/linux/mmc/dw_mmc.h274
-rw-r--r--include/linux/mmc/host.h84
-rw-r--r--include/linux/mmc/mmc.h63
-rw-r--r--include/linux/mmc/sdio_ids.h8
-rw-r--r--include/linux/mmc/sh_mmcif.h5
-rw-r--r--include/linux/mmc/slot-gpio.h3
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/mroute.h59
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/msi.h11
-rw-r--r--include/linux/mtd/fsmc.h156
-rw-r--r--include/linux/mtd/mtd.h16
-rw-r--r--include/linux/mtd/nand.h9
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/spi-nor.h34
-rw-r--r--include/linux/mutex.h9
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h117
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/x_tables.h9
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/nvme.h3
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/of_device.h6
-rw-r--r--include/linux/of_iommu.h11
-rw-r--r--include/linux/parman.h76
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/linux/percpu-rwsem.h8
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/phy.h40
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h52
-rw-r--r--include/linux/pinctrl/pinctrl.h15
-rw-r--r--include/linux/platform_data/dma-dw.h2
-rw-r--r--include/linux/platform_data/intel-spi.h31
-rw-r--r--include/linux/platform_data/media/ir-rx51.h6
-rw-r--r--include/linux/platform_data/mmc-mxcmmc.h1
-rw-r--r--include/linux/platform_data/spi-ep93xx.h17
-rw-r--r--include/linux/platform_data/ti-aemif.h23
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/pm_opp.h72
-rw-r--r--include/linux/pm_qos.h1
-rw-r--r--include/linux/poison.h1
-rw-r--r--include/linux/posix-timers.h14
-rw-r--r--include/linux/power/bq27xxx_battery.h12
-rw-r--r--include/linux/property.h19
-rw-r--r--include/linux/ptr_ring.h36
-rw-r--r--include/linux/pxa2xx_ssp.h14
-rw-r--r--include/linux/qed/common_hsi.h43
-rw-r--r--include/linux/qed/eth_common.h32
-rw-r--r--include/linux/qed/fcoe_common.h715
-rw-r--r--include/linux/qed/iscsi_common.h32
-rw-r--r--include/linux/qed/qed_chain.h34
-rw-r--r--include/linux/qed/qed_eth_if.h56
-rw-r--r--include/linux/qed/qed_fcoe_if.h145
-rw-r--r--include/linux/qed/qed_if.h76
-rw-r--r--include/linux/qed/qed_iov_if.h34
-rw-r--r--include/linux/qed/qed_iscsi_if.h32
-rw-r--r--include/linux/qed/qed_ll2_if.h31
-rw-r--r--include/linux/qed/qed_roce_if.h2
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/qed/rdma_common.h32
-rw-r--r--include/linux/qed/roce_common.h32
-rw-r--r--include/linux/qed/storage_common.h32
-rw-r--r--include/linux/qed/tcp_common.h32
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/rcupdate.h16
-rw-r--r--include/linux/rcutiny.h6
-rw-r--r--include/linux/rcuwait.h63
-rw-r--r--include/linux/refcount.h294
-rw-r--r--include/linux/regmap.h115
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/rfkill-regulator.h48
-rw-r--r--include/linux/rhashtable.h78
-rw-r--r--include/linux/sbitmap.h30
-rw-r--r--include/linux/sched.h140
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sctp.h143
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/sed-opal.h70
-rw-r--r--include/linux/serdev.h262
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/serial_sci.h15
-rw-r--r--include/linux/siphash.h140
-rw-r--r--include/linux/skbuff.h66
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/soc/qcom/smem_state.h2
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h10
-rw-r--r--include/linux/soc/ti/knav_dma.h2
-rw-r--r--include/linux/socket.h13
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/sram.h27
-rw-r--r--include/linux/srcu.h10
-rw-r--r--include/linux/stmmac.h8
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/tcp.h27
-rw-r--r--include/linux/timer.h45
-rw-r--r--include/linux/timerfd.h20
-rw-r--r--include/linux/trace_events.h3
-rw-r--r--include/linux/tty.h12
-rw-r--r--include/linux/usb/chipidea.h9
-rw-r--r--include/linux/uuid.h24
-rw-r--r--include/linux/virtio.h4
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/linux/vme.h1
-rw-r--r--include/linux/vmw_vmci_defs.h7
-rw-r--r--include/linux/vtime.h7
-rw-r--r--include/linux/ww_mutex.h32
253 files changed, 6228 insertions, 3114 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5b36974..673acda 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -291,7 +291,8 @@ bool acpi_processor_validate_proc_id(int proc_id);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
+ int *pcpu);
int acpi_unmap_cpu(int cpu);
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -1153,4 +1154,14 @@ int parse_spcr(bool earlycon);
static inline int parse_spcr(bool earlycon) { return 0; }
#endif
+#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
+int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
+#else
+static inline
+int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index b5abfda..4c5bca38 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,9 +14,6 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
-#include <linux/linkage.h>
-#include <linux/types.h>
-
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
@@ -60,6 +57,13 @@
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+#define ARM_SMCCC_QUIRK_NONE 0
+#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/types.h>
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -72,33 +76,59 @@ struct arm_smccc_res {
};
/**
- * arm_smccc_smc() - make SMC calls
+ * struct arm_smccc_quirk - Contains quirk information
+ * @id: quirk identification
+ * @state: quirk specific information
+ * @a6: Qualcomm quirk entry for returning post-smc call contents of a6
+ */
+struct arm_smccc_quirk {
+ int id;
+ union {
+ unsigned long a6;
+ } state;
+};
+
+/**
+ * __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
+ * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
*
* This function is used to make SMC calls following SMC Calling Convention.
* The content of the supplied param are copied to registers 0 to 7 prior
* to the SMC instruction. The return values are updated with the content
- * from register 0 to 3 on return from the SMC instruction.
+ * from register 0 to 3 on return from the SMC instruction. An optional
+ * quirk structure provides vendor specific behavior.
*/
-asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
+asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res);
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
/**
- * arm_smccc_hvc() - make HVC calls
+ * __arm_smccc_hvc() - make HVC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
+ * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
*
* This function is used to make HVC calls following SMC Calling
* Convention. The content of the supplied param are copied to registers 0
* to 7 prior to the HVC instruction. The return values are updated with
- * the content from register 0 to 3 on return from the HVC instruction.
+ * the content from register 0 to 3 on return from the HVC instruction. An
+ * optional quirk structure provides vendor specific behavior.
*/
-asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
+asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res);
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+
+#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL)
+
+#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__)
+
+#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL)
+
+#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 388574e..28e3cf1 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -87,7 +87,7 @@ struct async_submit_ctl {
void *scribble;
};
-#ifdef CONFIG_DMA_ENGINE
+#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
#define async_tx_issue_pending_all dma_issue_pending_all
/**
diff --git a/include/linux/audit.h b/include/linux/audit.h
index f51fca8d..504e784 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -360,6 +360,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
+extern void __audit_log_kern_module(char *name);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -387,6 +388,20 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
return __audit_socketcall(nargs, args);
return 0;
}
+
+static inline int audit_socketcall_compat(int nargs, u32 *args)
+{
+ unsigned long a[AUDITSC_ARGS];
+ int i;
+
+ if (audit_dummy_context())
+ return 0;
+
+ for (i = 0; i < nargs; i++)
+ a[i] = (unsigned long)args[i];
+ return __audit_socketcall(nargs, a);
+}
+
static inline int audit_sockaddr(int len, void *addr)
{
if (unlikely(!audit_dummy_context()))
@@ -436,6 +451,12 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
+static inline void audit_log_kern_module(char *name)
+{
+ if (!audit_dummy_context())
+ __audit_log_kern_module(name);
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else /* CONFIG_AUDITSYSCALL */
@@ -513,6 +534,12 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
{
return 0;
}
+
+static inline int audit_socketcall_compat(int nargs, u32 *args)
+{
+ return 0;
+}
+
static inline void audit_fd_pair(int fd1, int fd2)
{ }
static inline int audit_sockaddr(int len, void *addr)
@@ -541,6 +568,11 @@ static inline void audit_log_capset(const struct cred *new,
{ }
static inline void audit_mmap_fd(int fd, int flags)
{ }
+
+static inline void audit_log_kern_module(char *name)
+{
+}
+
static inline void audit_ptrace(struct task_struct *t)
{ }
#define audit_n_rules 0
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index e850e76..ad95581 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -10,6 +10,7 @@
#include <linux/flex_proportions.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
struct page;
struct device;
@@ -144,6 +145,7 @@ struct backing_dev_info {
char *name;
+ struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 43b93a9..c52a48c 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,7 +18,14 @@
#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_exit(struct backing_dev_info *bdi);
+
+static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
+{
+ kref_get(&bdi->refcnt);
+ return bdi;
+}
+
+void bdi_put(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -29,6 +36,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_destroy(struct backing_dev_info *bdi);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
@@ -183,7 +191,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
sb = inode->i_sb;
#ifdef CONFIG_BLOCK
if (sb_is_blkdev_sb(sb))
- return blk_get_backing_dev_info(I_BDEV(inode));
+ return I_BDEV(inode)->bd_bdi;
#endif
return sb->s_bdi;
}
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index b20e3d5..2f1c690 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -593,9 +593,6 @@ struct bcma_sflash {
u32 blocksize;
u16 numblocks;
u32 size;
-
- struct mtd_info *mtd;
- void *priv;
};
#endif
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index f6505d8..8b9d6ff 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -63,6 +63,19 @@
})
/**
+ * FIELD_FIT() - check if value fits in the field
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to test against the field
+ *
+ * Return: true if @_val can fit inside @_mask, false if @_val is too big.
+ */
+#define FIELD_FIT(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
+ })
+
+/**
* FIELD_PREP() - prepare a bitfield element
* @_mask: shifted mask defining the field's length and position
* @_val: value to put in the field
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 4a2ab5d9..8e4df3d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -22,6 +22,7 @@ struct blk_mq_hw_ctx {
unsigned long flags; /* BLK_MQ_F_* flags */
+ void *sched_data;
struct request_queue *queue;
struct blk_flush_queue *fq;
@@ -35,6 +36,7 @@ struct blk_mq_hw_ctx {
atomic_t wait_index;
struct blk_mq_tags *tags;
+ struct blk_mq_tags *sched_tags;
struct srcu_struct queue_rq_srcu;
@@ -60,7 +62,7 @@ struct blk_mq_hw_ctx {
struct blk_mq_tag_set {
unsigned int *mq_map;
- struct blk_mq_ops *ops;
+ const struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
@@ -151,11 +153,13 @@ enum {
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5,
+ BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
+ BLK_MQ_S_SCHED_RESTART = 2,
BLK_MQ_MAX_DEPTH = 10240,
@@ -179,14 +183,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_free_request(struct request *rq);
-void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
enum {
BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
+ BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
};
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 519ea2c..d703acb 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -162,6 +162,13 @@ enum req_opf {
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 8,
+ /* SCSI passthrough using struct scsi_request */
+ REQ_OP_SCSI_IN = 32,
+ REQ_OP_SCSI_OUT = 33,
+ /* Driver private requests */
+ REQ_OP_DRV_IN = 34,
+ REQ_OP_DRV_OUT = 35,
+
REQ_OP_LAST,
};
@@ -221,6 +228,15 @@ static inline bool op_is_write(unsigned int op)
}
/*
+ * Check if the bio or request is one that needs special treatment in the
+ * flush state machine.
+ */
+static inline bool op_is_flush(unsigned int op)
+{
+ return op & (REQ_FUA | REQ_PREFLUSH);
+}
+
+/*
* Reads are always treated as synchronous, as are requests with the FUA or
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
@@ -232,22 +248,29 @@ static inline bool op_is_sync(unsigned int op)
}
typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_SHIFT 16
+#define BLK_QC_T_NONE -1U
+#define BLK_QC_T_SHIFT 16
+#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
return cookie != BLK_QC_T_NONE;
}
-static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
+static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
+ bool internal)
{
- return tag | (queue_num << BLK_QC_T_SHIFT);
+ blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
+
+ if (internal)
+ ret |= BLK_QC_T_INTERNAL;
+
+ return ret;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
{
- return cookie >> BLK_QC_T_SHIFT;
+ return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
}
static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
@@ -255,6 +278,11 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
}
+static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
+{
+ return (cookie & BLK_QC_T_INTERNAL) != 0;
+}
+
struct blk_issue_stat {
u64 time;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8369564..aecca0e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -71,15 +71,6 @@ struct request_list {
};
/*
- * request command types
- */
-enum rq_cmd_type_bits {
- REQ_TYPE_FS = 1, /* fs request */
- REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_DRV_PRIV, /* driver defined types from here */
-};
-
-/*
* request flags */
typedef __u32 __bitwise req_flags_t;
@@ -128,8 +119,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
-#define BLK_MAX_CDB 16
-
/*
* Try to put the fields that are referenced together in the same cacheline.
*
@@ -147,13 +136,16 @@ struct request {
struct blk_mq_ctx *mq_ctx;
int cpu;
- unsigned cmd_type;
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
+
+ int internal_tag;
+
unsigned long atomic_flags;
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
+ int tag;
sector_t __sector; /* sector cursor */
struct bio *bio;
@@ -222,20 +214,9 @@ struct request {
void *special; /* opaque pointer available for LLD use */
- int tag;
int errors;
- /*
- * when request is used as a packet command carrier
- */
- unsigned char __cmd[BLK_MAX_CDB];
- unsigned char *cmd;
- unsigned short cmd_len;
-
unsigned int extra_len; /* length of alignment and padding */
- unsigned int sense_len;
- unsigned int resid_len; /* residual count */
- void *sense;
unsigned long deadline;
struct list_head timeout_list;
@@ -252,6 +233,21 @@ struct request {
struct request *next_rq;
};
+static inline bool blk_rq_is_scsi(struct request *rq)
+{
+ return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_rq_is_private(struct request *rq)
+{
+ return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
+}
+
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -271,6 +267,8 @@ typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
typedef int (bsg_job_fn) (struct bsg_job *);
+typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
+typedef void (exit_rq_fn)(struct request_queue *, struct request *);
enum blk_eh_timer_return {
BLK_EH_NOT_HANDLED,
@@ -333,6 +331,7 @@ struct queue_limits {
unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
+ unsigned short max_discard_segments;
unsigned char misaligned;
unsigned char discard_misaligned;
@@ -406,8 +405,10 @@ struct request_queue {
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
+ init_rq_fn *init_rq_fn;
+ exit_rq_fn *exit_rq_fn;
- struct blk_mq_ops *mq_ops;
+ const struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
@@ -432,7 +433,8 @@ struct request_queue {
*/
struct delayed_work delay_work;
- struct backing_dev_info backing_dev_info;
+ struct backing_dev_info *backing_dev_info;
+ struct disk_devt *disk_devt;
/*
* The queue owner gets to use this for whatever they like.
@@ -569,7 +571,15 @@ struct request_queue {
struct list_head tag_set_list;
struct bio_set *bio_split;
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *mq_debugfs_dir;
+#endif
+
bool mq_sysfs_init_done;
+
+ size_t cmd_size;
+ void *rq_alloc_data;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -600,6 +610,7 @@ struct request_queue {
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
+#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -695,9 +706,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
-#define blk_account_rq(rq) \
- (((rq)->rq_flags & RQF_STARTED) && \
- ((rq)->cmd_type == REQ_TYPE_FS))
+static inline bool blk_account_rq(struct request *rq)
+{
+ return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
+}
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
@@ -739,7 +751,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
-static inline unsigned int blk_queue_zone_size(struct request_queue *q)
+static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@@ -772,7 +784,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
static inline bool rq_mergeable(struct request *rq)
{
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
return false;
if (req_op(rq) == REQ_OP_FLUSH)
@@ -910,7 +922,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
-extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -1000,6 +1011,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@@ -1034,7 +1058,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type != REQ_TYPE_FS))
+ if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors ||
@@ -1116,14 +1140,15 @@ extern void blk_unprep_request(struct request *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
- request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_discard_segments(struct request_queue *,
+ unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
@@ -1166,8 +1191,16 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
{
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
@@ -1175,6 +1208,15 @@ static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
return rq->nr_phys_segments;
}
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
@@ -1363,6 +1405,11 @@ static inline unsigned short queue_max_segments(struct request_queue *q)
return q->limits.max_segments;
}
+static inline unsigned short queue_max_discard_segments(struct request_queue *q)
+{
+ return q->limits.max_discard_segments;
+}
+
static inline unsigned int queue_max_segment_size(struct request_queue *q)
{
return q->limits.max_segment_size;
@@ -1536,12 +1583,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
-static inline unsigned int bdev_zone_size(struct block_device *bdev)
+static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
- return blk_queue_zone_size(q);
+ return blk_queue_zone_sectors(q);
return 0;
}
@@ -1607,6 +1654,25 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
return __bvec_gap_to_prev(q, bprv, offset);
}
+/*
+ * Check if the two bvecs from two bios can be merged to one segment.
+ * If yes, no need to check gap between the two bios since the 1st bio
+ * and the 1st bvec in the 2nd bio can be handled in one segment.
+ */
+static inline bool bios_segs_mergeable(struct request_queue *q,
+ struct bio *prev, struct bio_vec *prev_last_bv,
+ struct bio_vec *next_first_bv)
+{
+ if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
+ return false;
+ if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
+ return false;
+ if (prev->bi_seg_back_size + next_first_bv->bv_len >
+ queue_max_segment_size(q))
+ return false;
+ return true;
+}
+
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
@@ -1616,7 +1682,8 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ if (!bios_segs_mergeable(q, prev, &pb, &nb))
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
}
return false;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index e417f08..d2e9085 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -30,9 +30,6 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern int do_blk_trace_setup(struct request_queue *q, char *name,
- dev_t dev, struct block_device *bdev,
- struct blk_user_trace_setup *buts);
extern __printf(2, 3)
void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -80,7 +77,6 @@ extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
-# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
# define blk_add_driver_data(q, rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
@@ -110,16 +106,16 @@ struct compat_blk_user_trace_setup {
#endif
-#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
+extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
-static inline int blk_cmd_buf_len(struct request *rq)
+static inline sector_t blk_rq_trace_sector(struct request *rq)
{
- return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1;
+ return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
}
-extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
-
-#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
+static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
+{
+ return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
+}
#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 92bc89a..c970a25 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -21,20 +21,19 @@ struct cgroup_bpf {
*/
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
+ bool disallow_override[MAX_BPF_ATTACH_TYPE];
};
void cgroup_bpf_put(struct cgroup *cgrp);
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
-void __cgroup_bpf_update(struct cgroup *cgrp,
- struct cgroup *parent,
- struct bpf_prog *prog,
- enum bpf_attach_type type);
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+ struct bpf_prog *prog, enum bpf_attach_type type,
+ bool overridable);
/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
-void cgroup_bpf_update(struct cgroup *cgrp,
- struct bpf_prog *prog,
- enum bpf_attach_type type);
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+ enum bpf_attach_type type, bool overridable);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f74ae68..909fc03 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -8,10 +8,12 @@
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
#include <linux/err.h>
+#include <linux/rbtree_latch.h>
struct perf_event;
struct bpf_map;
@@ -69,14 +71,14 @@ enum bpf_arg_type {
/* the following constraints used to prototype bpf_memcmp() and other
* functions that access data on eBPF program stack
*/
- ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
- ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
- * need to be initialized, helper function must fill
- * all bytes or clear them in error case.
+ ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
+ ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
+ * helper function must fill all bytes or clear
+ * them in error case.
*/
- ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
- ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
+ ARG_CONST_SIZE, /* number of bytes accessed from memory */
+ ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -161,9 +163,10 @@ struct bpf_verifier_ops {
enum bpf_reg_type *reg_type);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
- u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
- struct bpf_insn *insn, struct bpf_prog *prog);
+ u32 (*convert_ctx_access)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog);
};
struct bpf_prog_type_list {
@@ -176,6 +179,8 @@ struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ struct latch_tree_node ksym_tnode;
+ struct list_head ksym_lnode;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
@@ -216,7 +221,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
-int bpf_prog_calc_digest(struct bpf_prog *fp);
+int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -247,6 +252,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
new file mode 100644
index 0000000..b22efbd
--- /dev/null
+++ b/include/linux/bpf_trace.h
@@ -0,0 +1,7 @@
+#ifndef __LINUX_BPF_TRACE_H__
+#define __LINUX_BPF_TRACE_H__
+
+#include <trace/events/bpf.h>
+#include <trace/events/xdp.h>
+
+#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 4f7d8be..55e5171 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -17,6 +17,7 @@
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
+#define PHY_ID_BCM54210E 0x600d84a0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54612E 0x03625e60
@@ -24,6 +25,7 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7278 0xae0251a0
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
@@ -31,6 +33,7 @@
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
+#define PHY_ID_BCM74371 0xae0252e0
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
@@ -103,19 +106,17 @@
/*
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
*/
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
/*
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 657a718..e34dde2 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -66,9 +66,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
-int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
- bsg_job_fn *job_fn, int dd_job_size);
-void bsg_request_fn(struct request_queue *q);
+struct request_queue *bsg_setup_queue(struct device *dev, char *name,
+ bsg_job_fn *job_fn, int dd_job_size);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d67ab83..79591c3 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
- if (err == -EFAULT)
+ if (err == -EFAULT || err == -EAGAIN)
return VM_FAULT_NOPAGE;
if (err == -ENOMEM)
return VM_FAULT_OOM;
- if (err == -EAGAIN)
- return VM_FAULT_RETRY;
/* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a087500..df08a41 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@ struct can_proto {
extern int can_proto_register(const struct can_proto *cp);
extern void can_proto_unregister(const struct can_proto *cp);
-extern int can_rx_register(struct net_device *dev, canid_t can_id,
- canid_t mask,
- void (*func)(struct sk_buff *, void *),
- void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ void (*func)(struct sk_buff *, void *),
+ void *data, char *ident, struct sock *sk);
extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
canid_t mask,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5f52709..141b05a 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,6 +38,13 @@ struct can_priv {
struct can_bittiming bittiming, data_bittiming;
const struct can_bittiming_const *bittiming_const,
*data_bittiming_const;
+ const u16 *termination_const;
+ unsigned int termination_const_cnt;
+ u16 termination;
+ const u32 *bitrate_const;
+ unsigned int bitrate_const_cnt;
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
struct can_clock clock;
enum can_state state;
@@ -53,6 +60,7 @@ struct can_priv {
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+ int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
new file mode 100644
index 0000000..cb31683
--- /dev/null
+++ b/include/linux/can/rx-offload.h
@@ -0,0 +1,59 @@
+/*
+ * linux/can/rx-offload.h
+ *
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_RX_OFFLOAD_H
+#define _CAN_RX_OFFLOAD_H
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+
+struct can_rx_offload {
+ struct net_device *dev;
+
+ unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
+ u32 *timestamp, unsigned int mb);
+
+ struct sk_buff_head skb_queue;
+ u32 skb_queue_len_max;
+
+ unsigned int mb_first;
+ unsigned int mb_last;
+
+ struct napi_struct napi;
+
+ bool inc;
+};
+
+int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
+int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
+int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+void can_rx_offload_reset(struct can_rx_offload *offload);
+void can_rx_offload_del(struct can_rx_offload *offload);
+void can_rx_offload_enable(struct can_rx_offload *offload);
+
+static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
+{
+ napi_schedule(&offload->napi);
+}
+
+static inline void can_rx_offload_disable(struct can_rx_offload *offload)
+{
+ napi_disable(&offload->napi);
+}
+
+#endif /* !_CAN_RX_OFFLOAD_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 8609d57..6e8f209 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -36,7 +36,7 @@ struct packet_command
/* Uniform cdrom data structures for cdrom.c */
struct cdrom_device_info {
- struct cdrom_device_ops *ops; /* link to device_ops */
+ const struct cdrom_device_ops *ops; /* link to device_ops */
struct list_head list; /* linked list of all device_info */
struct gendisk *disk; /* matching block layer disk */
void *handle; /* driver-dependent data */
@@ -87,7 +87,6 @@ struct cdrom_device_ops {
/* driver specifications */
const int capability; /* capability flags */
- int n_minors; /* number of active minor devices */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
@@ -123,6 +122,8 @@ extern int cdrom_mode_sense(struct cdrom_device_info *cdi,
int page_code, int page_control);
extern void init_cdrom_command(struct packet_command *cgc,
void *buffer, int len, int type);
+extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
+ struct packet_command *cgc);
/* The SCSI spec says there could be 256 slots. */
#define CDROM_MAX_SLOTS 256
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0d442e3..5d3053c 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -224,4 +224,13 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+#define CLOCKEVENT_OF_DECLARE(name, compat, fn) \
+ OF_DECLARE_1_RET(clkevt, name, compat, fn)
+
+#ifdef CONFIG_CLKEVT_PROBE
+extern int clockevent_probe(void);
+#els
+static inline int clockevent_probe(void) { return 0; }
+#endif
+
#endif /* _LINUX_CLOCKCHIPS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index e315d04..cfc7584 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -62,6 +62,8 @@ struct module;
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
+ * @mark_unstable: Optional function to inform the clocksource driver that
+ * the watchdog marked the clocksource unstable
* @owner: module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
@@ -93,6 +95,7 @@ struct clocksource {
unsigned long flags;
void (*suspend)(struct clocksource *cs);
void (*resume)(struct clocksource *cs);
+ void (*mark_unstable)(struct clocksource *cs);
/* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 6360939..9e40be5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
-#else
+/**
+ * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the compat_timeval representation of the nsec parameter.
+ */
+static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+{
+ struct timeval tv;
+ struct compat_timeval ctv;
+
+ tv = ns_to_timeval(nsec);
+ ctv.tv_sec = tv.tv_sec;
+ ctv.tv_usec = tv.tv_usec;
+
+ return ctv;
+}
+
+#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
static inline bool in_compat_syscall(void) { return false; }
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index cf0fa5d..91c30cb 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -27,7 +27,11 @@ extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
-# define __user
+# ifdef STRUCTLEAK_PLUGIN
+# define __user __attribute__((user))
+# else
+# define __user
+# endif
# define __kernel
# define __safe
# define __force
diff --git a/include/linux/console.h b/include/linux/console.h
index 9c26c66..5949d18 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -73,6 +73,10 @@ struct consw {
u16 *(*con_screen_pos)(struct vc_data *, int);
unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
/*
+ * Flush the video console driver's scrollback buffer
+ */
+ void (*con_flush_scrollback)(struct vc_data *);
+ /*
* Prepare the console for the debugger. This includes, but is not
* limited to, unblanking the console, loading an appropriate
* palette, and allowing debugger generated output.
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a12..28ffa94 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@ struct coredump_params;
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7e05c5e..87165f0 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -31,7 +31,7 @@
#define CPUFREQ_ETERNAL (-1)
#define CPUFREQ_NAME_LEN 16
-/* Print length for names. Extra 1 space for accomodating '\n' in prints */
+/* Print length for names. Extra 1 space for accommodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
struct cpufreq_governor;
@@ -115,7 +115,7 @@ struct cpufreq_policy {
* guarantee that frequency can be changed on any CPU sharing the
* policy and that the change will affect all of the policy CPUs then.
* - fast_switch_enabled is to be set by governors that support fast
- * freqnency switching with the help of cpufreq_enable_fast_switch().
+ * frequency switching with the help of cpufreq_enable_fast_switch().
*/
bool fast_switch_possible;
bool fast_switch_enabled;
@@ -415,9 +415,6 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_NOTIFY (1)
-#define CPUFREQ_START (2)
-#define CPUFREQ_CREATE_POLICY (3)
-#define CPUFREQ_REMOVE_POLICY (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefb..bb790c4 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,9 +8,7 @@ enum cpuhp_state {
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
- CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
- CPUHP_PERF_X86_RAPL_PREP,
CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
@@ -74,6 +72,8 @@ enum cpuhp_state {
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_MIPS_SOC_PREPARE,
+ CPUHP_BP_PREPARE_DYN,
+ CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
@@ -84,7 +84,6 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
@@ -138,6 +137,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index da346f2..fc1e5d7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -62,6 +62,7 @@ struct cpuidle_state {
};
/* Idle State Flags */
+#define CPUIDLE_FLAG_NONE (0x00)
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c717f5e..96f1e88 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
- nr_cpu_ids);
+ nr_cpumask_bits);
}
/**
@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
+ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -649,11 +649,15 @@ static inline size_t cpumask_size(void)
* used. Please use this_cpu_cpumask_var_t in those cases. The direct use
* of this_cpu_ptr() or this_cpu_read() will lead to failures when the
* other type of cpumask_var_t implementation is configured.
+ *
+ * Please also note that __cpumask_var_read_mostly can be used to declare
+ * a cpumask_var_t variable itself (not its content) as read mostly.
*/
#ifdef CONFIG_CPUMASK_OFFSTACK
typedef struct cpumask *cpumask_var_t;
-#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+#define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
@@ -667,6 +671,7 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
typedef struct cpumask cpumask_var_t[1];
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
+#define __cpumask_var_read_mostly
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
diff --git a/include/linux/cputime.h b/include/linux/cputime.h
index f2eb2ee..a691dc4 100644
--- a/include/linux/cputime.h
+++ b/include/linux/cputime.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_CPUTIME_H
#define __LINUX_CPUTIME_H
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/cputime.h>
#ifndef cputime_to_nsecs
@@ -8,9 +9,5 @@
(cputime_to_usecs(__ct) * NSEC_PER_USEC)
#endif
-#ifndef nsecs_to_cputime
-# define nsecs_to_cputime(__nsecs) \
- usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
-#endif
-
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __LINUX_CPUTIME_H */
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index f475428..3252799 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -15,6 +15,4 @@ void sha_transform(__u32 *digest, const char *data, __u32 *W);
void md5_transform(__u32 *hash, __u32 const *in);
-__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
-
#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 014cc56..9d571ac 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -52,8 +52,7 @@ extern struct srcu_struct debugfs_srcu;
* Must only be called under the protection established by
* debugfs_use_file_start().
*/
-static inline const struct file_operations *
-debugfs_real_fops(const struct file *filp)
+static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
__must_hold(&debugfs_srcu)
{
/*
@@ -80,6 +79,8 @@ static const struct file_operations __fops = { \
#if defined(CONFIG_DEBUG_FS)
+struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
+
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -181,6 +182,12 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
* want to duplicate the design decision mistakes of procfs and devfs again.
*/
+static inline struct dentry *debugfs_lookup(const char *name,
+ struct dentry *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index a6ecb34..2ecb3c4 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -5,6 +5,17 @@
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
+ *
+ * Please note that ndelay(), udelay() and mdelay() may return early for
+ * several reasons:
+ * 1. computed loops_per_jiffy too low (due to the time taken to
+ * execute the timer interrupt.)
+ * 2. cache behaviour affecting the time it takes to execute the
+ * loop function.
+ * 3. CPU clock rate changes.
+ *
+ * Please see this thread:
+ * http://lists.openwall.net/linux-kernel/2011/01/09/56
*/
#include <linux/kernel.h>
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6cee17c..00e60f7 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -17,6 +17,7 @@
#ifndef _LINUX_DELAYACCT_H
#define _LINUX_DELAYACCT_H
+#include <uapi/linux/taskstats.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2de4e2e..e0acb0e 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -104,6 +104,8 @@ struct devfreq_dev_profile {
* struct devfreq_governor - Devfreq policy governor
* @node: list node - contains registered devfreq governors
* @name: Governor's name
+ * @immutable: Immutable flag for governor. If the value is 1,
+ * this govenror is never changeable to other governor.
* @get_target_freq: Returns desired operating frequency for the device.
* Basically, get_target_freq will run
* devfreq_dev_profile.get_dev_status() to get the
@@ -121,6 +123,7 @@ struct devfreq_governor {
struct list_head node;
const char name[DEVFREQ_NAME_LEN];
+ const unsigned int immutable;
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
int (*event_handler)(struct devfreq *devfreq,
unsigned int event, void *data);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ef7962e..a7e6903 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -55,8 +55,6 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
* = 2: The target wants to push back the io
*/
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
-typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
- union map_info *map_context);
typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
struct request *rq,
union map_info *map_context,
@@ -163,7 +161,6 @@ struct target_type {
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
- dm_map_request_fn map_rq;
dm_clone_and_map_request_fn clone_and_map_rq;
dm_release_clone_request_fn release_clone_rq;
dm_endio_fn end_io;
diff --git a/include/linux/device.h b/include/linux/device.h
index 491b4c0c..bd684fc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
*
* @suspend: Called when a device on this bus wants to go to sleep mode.
* @resume: Called to bring a device on this bus out of sleep mode.
+ * @num_vf: Called to find out how many virtual functions a device on this
+ * bus supports.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -127,6 +129,8 @@ struct bus_type {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*num_vf)(struct device *dev);
+
const struct dev_pm_ops *pm;
const struct iommu_ops *iommu_ops;
@@ -1140,6 +1144,13 @@ extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+static inline int dev_num_vf(struct device *dev)
+{
+ if (dev->bus && dev->bus->num_vf)
+ return dev->bus->num_vf(dev);
+ return 0;
+}
+
/*
* Root device objects for grouping under /sys/devices
*/
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 7f7e9a7..5725c94 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -27,6 +27,7 @@ int iommu_dma_init(void);
/* Domain management interface for IOMMU drivers */
int iommu_get_dma_cookie(struct iommu_domain *domain);
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */
-int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
+int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+ unsigned long attrs);
/*
* These implement the bulk of the relevant DMA mapping callbacks, but require
@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
return -ENODEV;
}
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 10c5a17..c24721a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -63,6 +63,13 @@
#define DMA_ATTR_NO_WARN (1UL << 8)
/*
+ * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
+ * accessible at an elevated privilege level (and ideally inaccessible or
+ * at least read-only at lesser-privileged levels).
+ */
+#define DMA_ATTR_PRIVILEGED (1UL << 9)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot
* reference a dma_addr_t directly because there may be translation between
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index ccfd0c3..b63b258 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -23,6 +23,7 @@ struct dw_dma;
/**
* struct dw_dma_chip - representation of DesignWare DMA controller hardware
* @dev: struct device of the DMA controller
+ * @id: instance ID
* @irq: irq line
* @regs: memory mapped I/O space
* @clk: hclk clock
@@ -31,6 +32,7 @@ struct dw_dma;
*/
struct dw_dma_chip {
struct device *dev;
+ int id;
int irq;
void __iomem *regs;
struct clk *clk;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index feee6ec..5336808 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -894,6 +894,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
len, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
+ return NULL;
+
+ return chan->device->device_prep_dma_memcpy(chan, dest, src,
+ len, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 07c52c0..5b6adf9 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -190,8 +190,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* part of the memory details to the memory controller.
* @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
* @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
- * Those memories are labed as "PC2-" instead of "PC" to
- * differenciate from DDR.
+ * Those memories are labeled as "PC2-" instead of "PC" to
+ * differentiate from DDR.
* @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205
* and JESD206.
* Those memories are accessed per DIMM slot, and not by
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index 051b21f..2fd3993 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -1,20 +1,19 @@
#ifndef _LINUX_EFI_BGRT_H
#define _LINUX_EFI_BGRT_H
-#ifdef CONFIG_ACPI_BGRT
-
#include <linux/acpi.h>
-void efi_bgrt_init(void);
+#ifdef CONFIG_ACPI_BGRT
+
+void efi_bgrt_init(struct acpi_table_header *table);
/* The BGRT data itself; only valid if bgrt_image != NULL. */
-extern void *bgrt_image;
extern size_t bgrt_image_size;
-extern struct acpi_table_bgrt *bgrt_tab;
+extern struct acpi_table_bgrt bgrt_tab;
#else /* !CONFIG_ACPI_BGRT */
-static inline void efi_bgrt_init(void) {}
+static inline void efi_bgrt_init(struct acpi_table_header *table) {}
#endif /* !CONFIG_ACPI_BGRT */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index a07a476..94d34e0 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -103,6 +103,7 @@ typedef struct {
#define EFI_PAGE_SHIFT 12
#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
+#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@@ -508,24 +509,6 @@ typedef struct {
u64 query_variable_info;
} efi_runtime_services_64_t;
-typedef struct {
- efi_table_hdr_t hdr;
- void *get_time;
- void *set_time;
- void *get_wakeup_time;
- void *set_wakeup_time;
- void *set_virtual_address_map;
- void *convert_pointer;
- void *get_variable;
- void *get_next_variable;
- void *set_variable;
- void *get_next_high_mono_count;
- void *reset_system;
- void *update_capsule;
- void *query_capsule_caps;
- void *query_variable_info;
-} efi_runtime_services_t;
-
typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
typedef efi_status_t efi_set_time_t (efi_time_t *tm);
typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -560,6 +543,24 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
unsigned long size,
bool nonblocking);
+typedef struct {
+ efi_table_hdr_t hdr;
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_set_virtual_address_map_t *set_virtual_address_map;
+ void *convert_pointer;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_query_variable_info_t *query_variable_info;
+} efi_runtime_services_t;
+
void efi_native_runtime_setup(void);
/*
@@ -610,6 +611,9 @@ void efi_native_runtime_setup(void);
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
+#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
+
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
@@ -950,6 +954,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
@@ -1063,6 +1068,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_ARCH_1 7 /* First arch-specific bit */
#define EFI_DBG 8 /* Print additional debug info at runtime */
#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
+#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
#ifdef CONFIG_EFI
/*
@@ -1238,17 +1244,17 @@ struct efivar_entry {
bool deleting;
};
-struct efi_simple_text_output_protocol_32 {
+typedef struct {
u32 reset;
u32 output_string;
u32 test_string;
-};
+} efi_simple_text_output_protocol_32_t;
-struct efi_simple_text_output_protocol_64 {
+typedef struct {
u64 reset;
u64 output_string;
u64 test_string;
-};
+} efi_simple_text_output_protocol_64_t;
struct efi_simple_text_output_protocol {
void *reset;
@@ -1474,6 +1480,14 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
bool efi_runtime_disabled(void);
extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+enum efi_secureboot_mode {
+ efi_secureboot_mode_unset,
+ efi_secureboot_mode_unknown,
+ efi_secureboot_mode_disabled,
+ efi_secureboot_mode_enabled,
+};
+enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index b276e9e..aebecc4 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -9,12 +9,22 @@
struct io_cq;
struct elevator_type;
-typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
+/*
+ * Return values from elevator merger
+ */
+enum elv_merge {
+ ELEVATOR_NO_MERGE = 0,
+ ELEVATOR_FRONT_MERGE = 1,
+ ELEVATOR_BACK_MERGE = 2,
+ ELEVATOR_DISCARD_MERGE = 3,
+};
+
+typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
struct bio *);
typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
+typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
struct request *, struct bio *);
@@ -77,6 +87,34 @@ struct elevator_ops
elevator_registered_fn *elevator_registered_fn;
};
+struct blk_mq_alloc_data;
+struct blk_mq_hw_ctx;
+
+struct elevator_mq_ops {
+ int (*init_sched)(struct request_queue *, struct elevator_type *);
+ void (*exit_sched)(struct elevator_queue *);
+
+ bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+ bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
+ int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
+ void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
+ void (*requests_merged)(struct request_queue *, struct request *, struct request *);
+ struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
+ void (*put_request)(struct request *);
+ void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
+ struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
+ bool (*has_work)(struct blk_mq_hw_ctx *);
+ void (*completed_request)(struct blk_mq_hw_ctx *, struct request *);
+ void (*started_request)(struct request *);
+ void (*requeue_request)(struct request *);
+ struct request *(*former_request)(struct request_queue *, struct request *);
+ struct request *(*next_request)(struct request_queue *, struct request *);
+ int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
+ void (*put_rq_priv)(struct request_queue *, struct request *);
+ void (*init_icq)(struct io_cq *);
+ void (*exit_icq)(struct io_cq *);
+};
+
#define ELV_NAME_MAX (16)
struct elv_fs_entry {
@@ -94,12 +132,16 @@ struct elevator_type
struct kmem_cache *icq_cache;
/* fields provided by elevator implementation */
- struct elevator_ops ops;
+ union {
+ struct elevator_ops sq;
+ struct elevator_mq_ops mq;
+ } ops;
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
+ bool uses_mq;
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
@@ -123,6 +165,7 @@ struct elevator_queue
struct kobject kobj;
struct mutex sysfs_lock;
unsigned int registered:1;
+ unsigned int uses_mq:1;
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
};
@@ -133,12 +176,15 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *);
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
extern void elv_add_request(struct request_queue *, struct request *, int);
extern void __elv_add_request(struct request_queue *, struct request *, int);
-extern int elv_merge(struct request_queue *, struct request **, struct bio *);
+extern enum elv_merge elv_merge(struct request_queue *, struct request **,
+ struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_merged_request(struct request_queue *, struct request *,
+ enum elv_merge);
extern void elv_bio_merged(struct request_queue *q, struct request *,
struct bio *);
+extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
@@ -185,13 +231,6 @@ extern void elv_rb_del(struct rb_root *, struct request *);
extern struct request *elv_rb_find(struct rb_root *, sector_t);
/*
- * Return values from elevator merger
- */
-#define ELEVATOR_NO_MERGE 0
-#define ELEVATOR_FRONT_MERGE 1
-#define ELEVATOR_BACK_MERGE 2
-
-/*
* Insertion selection
*/
#define ELEVATOR_INSERT_FRONT 1
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 6fec9e8..c62b709 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+ unsigned int txqs,
+ unsigned int rxqs);
+#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
+
struct sk_buff **eth_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
@@ -397,6 +402,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
}
/**
+ * ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return a u64 value of the address
+ */
+static inline u64 ether_addr_to_u64(const u8 *addr)
+{
+ u64 u = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ u = u << 8 | addr[i];
+
+ return u;
+}
+
+/**
+ * u64_to_ether_addr - Convert a u64 to an Ethernet address.
+ * @u: u64 to convert to an Ethernet MAC address
+ * @addr: Pointer to a six-byte array to contain the Ethernet address
+ */
+static inline void u64_to_ether_addr(u64 u, u8 *addr)
+{
+ int i;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--) {
+ addr[i] = u & 0xff;
+ u = u >> 8;
+ }
+}
+
+/**
+ * eth_addr_dec - Decrement the given MAC address
+ *
+ * @addr: Pointer to a six-byte array containing Ethernet address to decrement
+ */
+static inline void eth_addr_dec(u8 *addr)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u--;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
+ * ether_addr_greater - Compare two Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true addr1 is greater than addr2
+ */
+static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
+{
+ u64 u1 = ether_addr_to_u64(addr1);
+ u64 u2 = ether_addr_to_u64(addr2);
+
+ return u1 > u2;
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/export.h b/include/linux/export.h
index 2a0f61f..1a1dfdb 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,12 +43,19 @@ extern struct module __this_module;
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
+#if defined(CONFIG_MODULE_REL_CRCS)
#define __CRC_SYMBOL(sym, sec) \
- extern __visible void *__crc_##sym __attribute__((weak)); \
- static const unsigned long __kcrctab_##sym \
- __used \
- __attribute__((section("___kcrctab" sec "+" #sym), used)) \
- = (unsigned long) &__crc_##sym;
+ asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
+ " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
+ " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
+ " .previous \n");
+#else
+#define __CRC_SYMBOL(sym, sec) \
+ asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
+ " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
+ " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
+ " .previous \n");
+#endif
#else
#define __CRC_SYMBOL(sym, sec)
#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b871c0c..7010fb0 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -46,7 +46,18 @@
#define EXTCON_USB 1
#define EXTCON_USB_HOST 2
-/* Charging external connector */
+/*
+ * Charging external connector
+ *
+ * When one SDP charger connector was reported, we should also report
+ * the USB connector, which means EXTCON_CHG_USB_SDP should always
+ * appear together with EXTCON_USB. The same as ACA charger connector,
+ * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST.
+ *
+ * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of
+ * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at
+ * least 1A of current at 5V.
+ */
#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */
#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */
#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */
@@ -54,6 +65,7 @@
#define EXTCON_CHG_USB_FAST 9
#define EXTCON_CHG_USB_SLOW 10
#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
+#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */
/* Jack external connector */
#define EXTCON_JACK_MICROPHONE 20
@@ -160,62 +172,7 @@ union extcon_property_value {
};
struct extcon_cable;
-
-/**
- * struct extcon_dev - An extcon device represents one external connector.
- * @name: The name of this extcon device. Parent device name is
- * used if NULL.
- * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
- * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
- * be attached simultaneously. The array should be
- * ending with NULL or be NULL (no mutually exclusive
- * cables). For example, if it is { 0x7, 0x30, 0}, then,
- * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
- * be attached simulataneously. {0x7, 0} is equivalent to
- * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
- * can be no simultaneous connections.
- * @dev: Device of this extcon.
- * @state: Attach/detach state of this extcon. Do not provide at
- * register-time.
- * @nh: Notifier for the state change events from this extcon
- * @entry: To support list of extcon devices so that users can
- * search for extcon devices based on the extcon name.
- * @lock:
- * @max_supported: Internal value to store the number of cables.
- * @extcon_dev_type: Device_type struct to provide attribute_groups
- * customized for each extcon device.
- * @cables: Sysfs subdirectories. Each represents one cable.
- *
- * In most cases, users only need to provide "User initializing data" of
- * this struct when registering an extcon. In some exceptional cases,
- * optional callbacks may be needed. However, the values in "internal data"
- * are overwritten by register function.
- */
-struct extcon_dev {
- /* Optional user initializing data */
- const char *name;
- const unsigned int *supported_cable;
- const u32 *mutually_exclusive;
-
- /* Internal data. Please do not set. */
- struct device dev;
- struct raw_notifier_head *nh;
- struct list_head entry;
- int max_supported;
- spinlock_t lock; /* could be called by irq handler */
- u32 state;
-
- /* /sys/class/extcon/.../cable.n/... */
- struct device_type extcon_dev_type;
- struct extcon_cable *cables;
-
- /* /sys/class/extcon/.../mutually_exclusive/... */
- struct attribute_group attr_g_muex;
- struct attribute **attrs_muex;
- struct device_attribute *d_attrs_muex;
-};
+struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index a0e03b1..2aa3207 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -59,7 +59,7 @@ struct adc_jack_pdata {
const char *name;
const char *consumer_channel;
- const enum extcon *cable_names;
+ const unsigned int *cable_names;
/* The last entry's state should be 0 */
struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a0934e6..0c167fd 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -54,9 +54,17 @@ struct bpf_prog_aux;
#define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+/* As per nm, we expose JITed images as text (code) section for
+ * kallsyms. That way, tools like perf can find it to match
+ * addresses.
+ */
+#define BPF_SYM_ELF_TYPE 't'
+
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
+#define BPF_TAG_SIZE 8
+
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +416,7 @@ struct bpf_prog {
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
- u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
+ u8 tag[BPF_TAG_SIZE];
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
@@ -519,7 +527,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
@@ -543,7 +551,7 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
set_memory_ro((unsigned long)fp, fp->pages);
@@ -553,6 +561,16 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
set_memory_rw((unsigned long)fp, fp->pages);
}
+
+static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+{
+ set_memory_ro((unsigned long)hdr, hdr->pages);
+}
+
+static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+{
+ set_memory_rw((unsigned long)hdr, hdr->pages);
+}
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
@@ -561,7 +579,24 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
}
-#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
+
+static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+{
+}
+
+static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
+
+static inline struct bpf_binary_header *
+bpf_jit_binary_hdr(const struct bpf_prog *fp)
+{
+ unsigned long real_start = (unsigned long)fp->bpf_func;
+ unsigned long addr = real_start & PAGE_MASK;
+
+ return (void *)addr;
+}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
@@ -605,6 +640,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -614,6 +650,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
+extern int bpf_jit_kallsyms;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -623,7 +660,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
-void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
@@ -649,6 +685,11 @@ static inline bool bpf_jit_is_ebpf(void)
# endif
}
+static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
+{
+ return fp->jited && bpf_jit_is_ebpf();
+}
+
static inline bool bpf_jit_blinding_enabled(void)
{
/* These are the prerequisites, should someone ever have the
@@ -666,15 +707,91 @@ static inline bool bpf_jit_blinding_enabled(void)
return true;
}
-#else
-static inline void bpf_jit_compile(struct bpf_prog *fp)
+
+static inline bool bpf_jit_kallsyms_enabled(void)
+{
+ /* There are a couple of corner cases where kallsyms should
+ * not be enabled f.e. on hardening.
+ */
+ if (bpf_jit_harden)
+ return false;
+ if (!bpf_jit_kallsyms)
+ return false;
+ if (bpf_jit_kallsyms == 1)
+ return true;
+
+ return false;
+}
+
+const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym);
+bool is_bpf_text_address(unsigned long addr);
+int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym);
+
+static inline const char *
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym)
+{
+ const char *ret = __bpf_address_lookup(addr, size, off, sym);
+
+ if (ret && modname)
+ *modname = NULL;
+ return ret;
+}
+
+void bpf_prog_kallsyms_add(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del(struct bpf_prog *fp);
+
+#else /* CONFIG_BPF_JIT */
+
+static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
+ return false;
}
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
}
+
+static inline bool bpf_jit_kallsyms_enabled(void)
+{
+ return false;
+}
+
+static inline const char *
+__bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym)
+{
+ return NULL;
+}
+
+static inline bool is_bpf_text_address(unsigned long addr)
+{
+ return false;
+}
+
+static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *sym)
+{
+ return -ERANGE;
+}
+
+static inline const char *
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym)
+{
+ return NULL;
+}
+
+static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
+{
+}
#endif /* CONFIG_BPF_JIT */
#define BPF_ANC BIT(15)
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 16551d5..57beb5d 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -22,6 +22,7 @@
#define _LINUX_FPGA_MGR_H
struct fpga_manager;
+struct sg_table;
/**
* enum fpga_mgr_states - fpga framework states
@@ -88,6 +89,7 @@ struct fpga_image_info {
* @state: returns an enum value of the FPGA's state
* @write_init: prepare the FPGA to receive confuration data
* @write: write count bytes of configuration data to the FPGA
+ * @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
*
@@ -102,6 +104,7 @@ struct fpga_manager_ops {
struct fpga_image_info *info,
const char *buf, size_t count);
int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
+ int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
void (*fpga_remove)(struct fpga_manager *mgr);
@@ -129,6 +132,8 @@ struct fpga_manager {
int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
const char *buf, size_t count);
+int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
+ struct sg_table *sgt);
int fpga_mgr_firmware_load(struct fpga_manager *mgr,
struct fpga_image_info *info,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2ba0743..c930cbc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -423,6 +423,7 @@ struct block_device {
int bd_invalidated;
struct gendisk * bd_disk;
struct request_queue * bd_queue;
+ struct backing_dev_info *bd_bdi;
struct list_head bd_list;
/*
* Private data. You must have bd_claim'ed the block_device
@@ -2342,6 +2343,7 @@ extern struct kmem_cache *names_cachep;
#ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
+extern void bdev_unhash_inode(dev_t dev);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552..4c467ef 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
new file mode 100644
index 0000000..547f815
--- /dev/null
+++ b/include/linux/fscrypt_common.h
@@ -0,0 +1,146 @@
+/*
+ * fscrypt_common.h: common declarations for per-file encryption
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPT_COMMON_H
+#define _LINUX_FSCRYPT_COMMON_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_CRYPTO_BLOCK_SIZE 16
+
+struct fscrypt_info;
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+};
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 fscrypt_symlink_data_len(u32 l)
+{
+ if (l < FS_CRYPTO_BLOCK_SIZE)
+ l = FS_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct fscrypt_symlink_data) - 1);
+}
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ unsigned int flags;
+ const char *key_prefix;
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*prepare_context)(struct inode *);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ int (*dummy_context)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(const struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return (inode->i_crypt_info != NULL);
+#else
+ return 0;
+#endif
+}
+
+#endif /* _LINUX_FSCRYPT_COMMON_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
new file mode 100644
index 0000000..3511ca7
--- /dev/null
+++ b/include/linux/fscrypt_notsupp.h
@@ -0,0 +1,168 @@
+/*
+ * fscrypt_notsupp.h
+ *
+ * This stubs out the fscrypt functions for filesystems configured without
+ * encryption support.
+ */
+
+#ifndef _LINUX_FSCRYPT_NOTSUPP_H
+#define _LINUX_FSCRYPT_NOTSUPP_H
+
+#include <linux/fscrypt_common.h>
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
+ gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+ return;
+}
+
+static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_decrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num)
+{
+ return -EOPNOTSUPP;
+}
+
+
+static inline void fscrypt_restore_control_page(struct page *page)
+{
+ return;
+}
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+ return;
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+ return;
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+ struct inode *child)
+{
+ return 0;
+}
+
+static inline int fscrypt_inherit_context(struct inode *parent,
+ struct inode *child,
+ void *fs_data, bool preload)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_get_encryption_info(struct inode *inode)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode,
+ struct fscrypt_info *ci)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (dir->i_sb->s_cop->is_encrypted(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
+ u32 ilen)
+{
+ /* never happens */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
+ u32 ilen,
+ struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+/* bio.c */
+static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
+ struct bio *bio)
+{
+ return;
+}
+
+static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+ return;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
new file mode 100644
index 0000000..a140f47
--- /dev/null
+++ b/include/linux/fscrypt_supp.h
@@ -0,0 +1,66 @@
+/*
+ * fscrypt_supp.h
+ *
+ * This is included by filesystems configured with encryption support.
+ */
+
+#ifndef _LINUX_FSCRYPT_SUPP_H
+#define _LINUX_FSCRYPT_SUPP_H
+
+#include <linux/fscrypt_common.h>
+
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+ unsigned int, unsigned int,
+ u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+ unsigned int, u64);
+extern void fscrypt_restore_control_page(struct page *);
+
+extern const struct dentry_operations fscrypt_d_ops;
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+ d_set_d_op(dentry, &fscrypt_d_ops);
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+}
+
+/* policy.c */
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+extern void fscrypt_free_filename(struct fscrypt_name *);
+extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+ struct fscrypt_str *);
+
+/* bio.c */
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
+ unsigned int);
+
+#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
deleted file mode 100644
index c074b67..0000000
--- a/include/linux/fscrypto.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * General per-file encryption definition
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * Written by Michael Halcrow, 2015.
- * Modified by Jaegeuk Kim, 2015.
- */
-
-#ifndef _LINUX_FSCRYPTO_H
-#define _LINUX_FSCRYPTO_H
-
-#include <linux/key.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
-
-#define FS_CRYPTO_BLOCK_SIZE 16
-
-struct fscrypt_info;
-
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
- u8 mode; /* Encryption mode for tfm */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __packed;
-
-/**
- * This function is used to calculate the disk space required to
- * store a filename of length l in encrypted symlink format.
- */
-static inline u32 fscrypt_symlink_data_len(u32 l)
-{
- if (l < FS_CRYPTO_BLOCK_SIZE)
- l = FS_CRYPTO_BLOCK_SIZE;
- return (l + sizeof(struct fscrypt_symlink_data) - 1);
-}
-
-struct fscrypt_str {
- unsigned char *name;
- u32 len;
-};
-
-struct fscrypt_name {
- const struct qstr *usr_fname;
- struct fscrypt_str disk_name;
- u32 hash;
- u32 minor_hash;
- struct fscrypt_str crypto_buf;
-};
-
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- int (*get_context)(struct inode *, void *, size_t);
- int (*key_prefix)(struct inode *, u8 **);
- int (*prepare_context)(struct inode *);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- int (*dummy_context)(struct inode *);
- bool (*is_encrypted)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
-};
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- if (inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode))
- return true;
- return false;
-}
-
-static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
-{
- return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
-}
-
-static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
-{
- return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-#else
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-#endif
-}
-
-static inline int fscrypt_has_encryption_key(const struct inode *inode)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return (inode->i_crypt_info != NULL);
-#else
- return 0;
-#endif
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
- spin_unlock(&dentry->d_lock);
-#endif
-}
-
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
-extern const struct dentry_operations fscrypt_d_ops;
-#endif
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- d_set_d_op(dentry, &fscrypt_d_ops);
-#endif
-}
-
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
-/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
- unsigned int, unsigned int,
- u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
- unsigned int, u64);
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
-extern void fscrypt_pullback_bio_page(struct page **, bool);
-extern void fscrypt_restore_control_page(struct page *);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
- unsigned int);
-/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
- void *, bool);
-/* keyinfo.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
-
-/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct fscrypt_name *);
-extern void fscrypt_free_filename(struct fscrypt_name *);
-extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
- struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
- const struct fscrypt_str *, struct fscrypt_str *);
-extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct fscrypt_str *);
-#endif
-
-/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i,
- gfp_t f)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
-{
- return;
-}
-
-static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i,
- struct page *p,
- unsigned int len,
- unsigned int offs,
- u64 lblk_num, gfp_t f)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p,
- unsigned int len, unsigned int offs,
- u64 lblk_num)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
- struct bio *b)
-{
- return;
-}
-
-static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
-{
- return;
-}
-
-static inline void fscrypt_notsupp_restore_control_page(struct page *p)
-{
- return;
-}
-
-static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p,
- sector_t s, unsigned int f)
-{
- return -EOPNOTSUPP;
-}
-
-/* policy.c */
-static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f,
- const void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f,
- void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
- struct inode *i)
-{
- return 0;
-}
-
-static inline int fscrypt_notsupp_inherit_context(struct inode *p,
- struct inode *i, void *v, bool b)
-{
- return -EOPNOTSUPP;
-}
-
-/* keyinfo.c */
-static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
- struct fscrypt_info *f)
-{
- return;
-}
-
- /* fname.c */
-static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct fscrypt_name *fname)
-{
- if (dir->i_sb->s_cop->is_encrypted(dir))
- return -EOPNOTSUPP;
-
- memset(fname, 0, sizeof(struct fscrypt_name));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
-}
-
-static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
-{
- return;
-}
-
-static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
-{
- /* never happens */
- WARN_ON(1);
- return 0;
-}
-
-static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
- u32 ilen, struct fscrypt_str *crypto_str)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
-{
- return;
-}
-
-static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
- u32 hash, u32 minor_hash,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
new file mode 100644
index 0000000..273cbf6
--- /dev/null
+++ b/include/linux/fsi.h
@@ -0,0 +1,50 @@
+/* FSI device & driver interfaces
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef LINUX_FSI_H
+#define LINUX_FSI_H
+
+#include <linux/device.h>
+
+struct fsi_device {
+ struct device dev;
+ u8 engine_type;
+ u8 version;
+};
+
+struct fsi_device_id {
+ u8 engine_type;
+ u8 version;
+};
+
+#define FSI_VERSION_ANY 0
+
+#define FSI_DEVICE(t) \
+ .engine_type = (t), .version = FSI_VERSION_ANY,
+
+#define FSI_DEVICE_VERSIONED(t, v) \
+ .engine_type = (t), .version = (v),
+
+
+struct fsi_driver {
+ struct device_driver drv;
+ const struct fsi_device_id *id_table;
+};
+
+#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
+#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+
+extern struct bus_type fsi_bus_type;
+
+#endif /* LINUX_FSI_H */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 3f9778c..c332f0a 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -733,8 +733,12 @@ struct fsl_ifc_nand {
__be32 nand_erattr1;
u32 res19[0x10];
__be32 nand_fsr;
- u32 res20[0x3];
- __be32 nand_eccstat[6];
+ u32 res20;
+ /* The V1 nand_eccstat is actually 4 words that overlaps the
+ * V2 nand_eccstat.
+ */
+ __be32 v1_nand_eccstat[2];
+ __be32 v2_nand_eccstat[6];
u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0cf34d6..4872465 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
/* find (and take a reference) to a mark associated with group and vfsmount */
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
-/* copy the values from old into new */
-extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
/* set the ignored_mask of a mark */
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* set the mask of a mark (might pin the object into memory */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e0341af..a999d28 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -146,15 +146,6 @@ enum {
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
};
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
-struct blk_scsi_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
- struct kobject kobj;
-};
-
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
@@ -176,6 +167,13 @@ struct blk_integrity {
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+struct disk_devt {
+ atomic_t count;
+ void (*release)(struct disk_devt *disk_devt);
+};
+
+void put_disk_devt(struct disk_devt *disk_devt);
+void get_disk_devt(struct disk_devt *disk_devt);
struct gendisk {
/* major, first_minor and minors are input parameters only,
@@ -185,6 +183,7 @@ struct gendisk {
int first_minor;
int minors; /* maximum number of minors, =1 for
* disks that can't be partitioned. */
+ struct disk_devt *disk_devt;
char disk_name[DISK_NAME_LEN]; /* name of major driver */
char *(*devnode)(struct gendisk *gd, umode_t *mode);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4175dca..0fe0b62 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -38,9 +38,8 @@ struct vm_area_struct;
#define ___GFP_ACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_OTHER_NODE 0x800000u
-#define ___GFP_WRITE 0x1000000u
-#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_WRITE 0x800000u
+#define ___GFP_KSWAPD_RECLAIM 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -172,11 +171,6 @@ struct vm_area_struct;
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
- *
- * __GFP_OTHER_NODE is for allocations that are on a remote node but that
- * should not be accounted for as a remote allocation in vmstat. A
- * typical user would be khugepaged collapsing a huge page on a remote
- * node.
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
@@ -184,10 +178,9 @@ struct vm_area_struct;
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 25
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache;
-extern void __page_frag_drain(struct page *page, unsigned int order,
- unsigned int count);
-extern void *__alloc_page_frag(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
-extern void __free_page_frag(void *addr);
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
+extern void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748ac..846f3b9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -8,6 +8,7 @@
#include <linux/irqdomain.h>
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
struct gpio_desc;
struct of_phandle_args;
@@ -19,18 +20,6 @@ struct module;
#ifdef CONFIG_GPIOLIB
/**
- * enum single_ended_mode - mode for single ended operation
- * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low
- * @LINE_MODE_OPEN_DRAIN: set line to be open drain
- * @LINE_MODE_OPEN_SOURCE: set line to be open source
- */
-enum single_ended_mode {
- LINE_MODE_PUSH_PULL,
- LINE_MODE_OPEN_DRAIN,
- LINE_MODE_OPEN_SOURCE,
-};
-
-/**
* struct gpio_chip - abstract a GPIO controller
* @label: a functional name for the GPIO device, such as a part
* number or the name of the SoC IP-block implementing it.
@@ -48,16 +37,8 @@ enum single_ended_mode {
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @set: assigns output value for signal "offset"
* @set_multiple: assigns output values for multiple signals defined by "mask"
- * @set_debounce: optional hook for setting debounce time for specified gpio in
- * interrupt triggered gpio chips
- * @set_single_ended: optional hook for setting a line as open drain, open
- * source, or non-single ended (restore from open drain/source to normal
- * push-pull mode) this should be implemented if the hardware supports
- * open drain or open source settings. The GPIOlib will otherwise try
- * to emulate open drain/source by not actively driving lines high/low
- * if a consumer request this. The driver may return -ENOTSUPP if e.g.
- * it supports just open drain but not open source and is called
- * with LINE_MODE_OPEN_SOURCE as mode argument.
+ * @set_config: optional hook for all kinds of settings. Uses the same
+ * packed config format as generic pinconf.
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
@@ -150,13 +131,9 @@ struct gpio_chip {
void (*set_multiple)(struct gpio_chip *chip,
unsigned long *mask,
unsigned long *bits);
- int (*set_debounce)(struct gpio_chip *chip,
- unsigned offset,
- unsigned debounce);
- int (*set_single_ended)(struct gpio_chip *chip,
- unsigned offset,
- enum single_ended_mode mode);
-
+ int (*set_config)(struct gpio_chip *chip,
+ unsigned offset,
+ unsigned long config);
int (*to_irq)(struct gpio_chip *chip,
unsigned offset);
@@ -274,42 +251,74 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
int parent_irq);
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ bool nested,
+ struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ static struct lock_class_key key;
+
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key);
+ unsigned int type)
+{
+
+ static struct lock_class_key key;
+
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, false, NULL);
+}
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
- return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
- handler, type, true, NULL);
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, true, NULL);
}
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...) \
-( \
- ({ \
- static struct lock_class_key _key; \
- _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
- }) \
-)
-#else
-#define gpiochip_irqchip_add(...) \
- _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
#endif /* CONFIG_GPIOLIB_IRQCHIP */
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
+int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config);
#ifdef CONFIG_PINCTRL
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index dd85f35..7ef111d 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -232,6 +232,7 @@ struct hid_sensor_common {
atomic_t data_ready;
atomic_t user_requested_state;
struct iio_trigger *trigger;
+ int timestamp_ns_scale;
struct hid_sensor_hub_attribute_info poll;
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
@@ -271,4 +272,7 @@ int hid_sensor_format_scale(u32 usage_id,
s32 hid_sensor_read_poll_value(struct hid_sensor_common *st);
+int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st,
+ int64_t raw_value);
+
#endif
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index f2ee90a..30c7dc4 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -52,6 +52,9 @@
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
+/* Gravity vector */
+#define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B
+
/* ORIENTATION: Compass 3D: (200083) */
#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470
@@ -95,6 +98,7 @@
#define HID_USAGE_SENSOR_TIME_HOUR 0x200525
#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526
#define HID_USAGE_SENSOR_TIME_SECOND 0x200527
+#define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529
/* Units */
#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index cdab81b..e52b427 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -88,12 +88,6 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
- * @start_pid: timer statistics field to store the pid of the task which
- * started the timer
- * @start_site: timer statistics field to store the site where the timer
- * was started
- * @start_comm: timer statistics field to store the name of the process which
- * started the timer
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -104,11 +98,6 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
-#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
-#endif
};
/**
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 42fe43f..62bbf3c 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -32,11 +32,10 @@
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/timer.h>
-#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
-
+#include <linux/interrupt.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -128,6 +127,7 @@ struct hv_ring_buffer_info {
u32 ring_data_startoffset;
u32 priv_write_index;
u32 priv_read_index;
+ u32 cached_read_index;
};
/*
@@ -138,8 +138,8 @@ struct hv_ring_buffer_info {
* for the specified ring buffer
*/
static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
- u32 *read, u32 *write)
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+ u32 *read, u32 *write)
{
u32 read_loc, write_loc, dsize;
@@ -153,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
*read = dsize - *write;
}
-static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
u32 read_loc, write_loc, dsize, read;
@@ -167,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
return read;
}
-static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
{
u32 read_loc, write_loc, dsize, write;
@@ -180,6 +180,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
return write;
}
+static inline u32 hv_get_cached_bytes_to_write(
+ const struct hv_ring_buffer_info *rbi)
+{
+ u32 read_loc, write_loc, dsize, write;
+
+ dsize = rbi->ring_datasize;
+ read_loc = rbi->cached_read_index;
+ write_loc = rbi->ring_buffer->write_index;
+
+ write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ return write;
+}
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -627,6 +640,7 @@ struct vmbus_channel_msginfo {
/* Synchronize the request/response if needed */
struct completion waitevent;
+ struct vmbus_channel *waiting_channel;
union {
struct vmbus_channel_version_supported version_supported;
struct vmbus_channel_open_result open_result;
@@ -669,11 +683,6 @@ struct hv_input_signal_event_buffer {
struct hv_input_signal_event event;
};
-enum hv_signal_policy {
- HV_SIGNAL_POLICY_DEFAULT = 0,
- HV_SIGNAL_POLICY_EXPLICIT,
-};
-
enum hv_numa_policy {
HV_BALANCED = 0,
HV_LOCALIZED,
@@ -733,26 +742,27 @@ struct vmbus_channel {
struct vmbus_close_msg close_msg;
- /* Channel callback are invoked in this workqueue context */
- /* HANDLE dataWorkQueue; */
-
+ /* Channel callback's invoked in softirq context */
+ struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
void *channel_callback_context;
/*
- * A channel can be marked for efficient (batched)
- * reading:
- * If batched_reading is set to "true", we read until the
- * channel is empty and hold off interrupts from the host
- * during the entire read process.
- * If batched_reading is set to "false", the client is not
- * going to perform batched reading.
- *
- * By default we will enable batched reading; specific
- * drivers that don't want this behavior can turn it off.
+ * A channel can be marked for one of three modes of reading:
+ * BATCHED - callback called from taslket and should read
+ * channel until empty. Interrupts from the host
+ * are masked while read is in process (default).
+ * DIRECT - callback called from tasklet (softirq).
+ * ISR - callback called in interrupt context and must
+ * invoke its own deferred processing.
+ * Host interrupts are disabled and must be re-enabled
+ * when ring is empty.
*/
-
- bool batched_reading;
+ enum hv_callback_mode {
+ HV_CALL_BATCHED,
+ HV_CALL_DIRECT,
+ HV_CALL_ISR
+ } callback_mode;
bool is_dedicated_interrupt;
struct hv_input_signal_event_buffer sig_buf;
@@ -836,23 +846,6 @@ struct vmbus_channel {
*/
struct list_head percpu_list;
/*
- * Host signaling policy: The default policy will be
- * based on the ring buffer state. We will also support
- * a policy where the client driver can have explicit
- * signaling control.
- */
- enum hv_signal_policy signal_policy;
- /*
- * On the channel send side, many of the VMBUS
- * device drivers explicity serialize access to the
- * outgoing ring buffer. Give more control to the
- * VMBUS device drivers in terms how to serialize
- * accesss to the outgoing ring buffer.
- * The default behavior will be to aquire the
- * ring lock to preserve the current behavior.
- */
- bool acquire_ring_lock;
- /*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
* at the expense of latency:
@@ -892,32 +885,22 @@ struct vmbus_channel {
};
-static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
-{
- c->acquire_ring_lock = state;
-}
-
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
return !!(c->offermsg.offer.chn_flags &
VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
}
-static inline void set_channel_signal_state(struct vmbus_channel *c,
- enum hv_signal_policy policy)
-{
- c->signal_policy = policy;
-}
-
static inline void set_channel_affinity_state(struct vmbus_channel *c,
enum hv_numa_policy policy)
{
c->affinity_policy = policy;
}
-static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+static inline void set_channel_read_mode(struct vmbus_channel *c,
+ enum hv_callback_mode mode)
{
- c->batched_reading = state;
+ c->callback_mode = mode;
}
static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
@@ -1040,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
u32 bufferLen,
u64 requestid,
enum vmbus_packet_type type,
- u32 flags,
- bool kick_q);
+ u32 flags);
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
@@ -1056,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
void *buffer,
u32 bufferlen,
u64 requestid,
- u32 flags,
- bool kick_q);
+ u32 flags);
extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *mpb,
@@ -1444,9 +1425,10 @@ struct hyperv_service_callback {
};
#define MAX_SRV_VER 0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
- struct icmsg_negotiate *, u8 *, int,
- int);
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+ const int *fw_version, int fw_vercnt,
+ const int *srv_version, int srv_vercnt,
+ int *nego_fw_version, int *nego_srv_version);
void hv_event_tasklet_disable(struct vmbus_channel *channel);
void hv_event_tasklet_enable(struct vmbus_channel *channel);
@@ -1466,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
static inline void *
-hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
{
- return (void *)ring_info->ring_buffer->buffer;
+ return ring_info->ring_buffer->buffer;
}
/*
@@ -1488,7 +1470,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
static inline void hv_signal_on_read(struct vmbus_channel *channel)
{
- u32 cur_write_sz;
+ u32 cur_write_sz, cached_write_sz;
u32 pending_sz;
struct hv_ring_buffer_info *rbi = &channel->inbound;
@@ -1512,12 +1494,54 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
cur_write_sz = hv_get_bytes_to_write(rbi);
- if (cur_write_sz >= pending_sz)
+ if (cur_write_sz < pending_sz)
+ return;
+
+ cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+ if (cached_write_sz < pending_sz)
vmbus_setevent(channel);
return;
}
+static inline void
+init_cached_read_index(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ rbi->cached_read_index = rbi->ring_buffer->read_index;
+}
+
+/*
+ * Mask off host interrupt callback notifications
+ */
+static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+ rbi->ring_buffer->interrupt_mask = 1;
+
+ /* make sure mask update is not reordered */
+ virt_mb();
+}
+
+/*
+ * Re-enable host callback and return number of outstanding bytes
+ */
+static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+
+ rbi->ring_buffer->interrupt_mask = 0;
+
+ /* make sure mask update is not reordered */
+ virt_mb();
+
+ /*
+ * Now check to see if the ring buffer is still empty.
+ * If it is not, we raced and we need to process new
+ * incoming messages.
+ */
+ return hv_get_bytes_to_read(rbi);
+}
+
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
@@ -1569,6 +1593,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
* This call commits the read index and potentially signals the host.
* Here is the pattern for using the "in-place" consumption APIs:
*
+ * init_cached_read_index();
+ *
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b2109c5..7b23a33 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -51,6 +51,7 @@ enum i2c_slave_event;
typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
struct module;
+struct property_entry;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
/*
@@ -299,6 +300,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
+ * @properties: additional device properties for the device
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -320,6 +322,7 @@ struct i2c_board_info {
struct dev_archdata *archdata;
struct device_node *of_node;
struct fwnode_handle *fwnode;
+ const struct property_entry *properties;
int irq;
};
@@ -665,6 +668,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index a633898..2f51c17 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
/* for request_sense */
#include <linux/cdrom.h>
+#include <scsi/scsi_cmnd.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -39,18 +40,53 @@
struct device;
-/* IDE-specific values for req->cmd_type */
-enum ata_cmd_type_bits {
- REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
- REQ_TYPE_ATA_PC,
- REQ_TYPE_ATA_SENSE, /* sense request */
- REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
- REQ_TYPE_ATA_PM_RESUME, /* resume request */
+/* values for ide_request.type */
+enum ata_priv_type {
+ ATA_PRIV_MISC,
+ ATA_PRIV_TASKFILE,
+ ATA_PRIV_PC,
+ ATA_PRIV_SENSE, /* sense request */
+ ATA_PRIV_PM_SUSPEND, /* suspend request */
+ ATA_PRIV_PM_RESUME, /* resume request */
};
-#define ata_pm_request(rq) \
- ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
- (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
+struct ide_request {
+ struct scsi_request sreq;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u8 type;
+};
+
+static inline struct ide_request *ide_req(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+static inline bool ata_misc_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
+}
+
+static inline bool ata_taskfile_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
+}
+
+static inline bool ata_pc_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
+}
+
+static inline bool ata_sense_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
+}
+
+static inline bool ata_pm_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) &&
+ (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
+ ide_req(rq)->type == ATA_PRIV_PM_RESUME);
+}
/* Error codes returned in rq->errors to the higher part of the driver. */
enum {
@@ -579,7 +615,7 @@ struct ide_drive_s {
/* current sense rq and buffer */
bool sense_rq_armed;
- struct request sense_rq;
+ struct request *sense_rq;
struct request_sense sense_data;
};
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index fe84932..0dd9498 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* number of user priorities 802.11 uses */
#define IEEE80211_NUM_UPS 8
+/* number of ACs */
+#define IEEE80211_NUM_ACS 4
#define IEEE80211_QOS_CTL_LEN 2
/* 1d tag mask */
@@ -1041,8 +1043,9 @@ struct ieee80211_mgmt {
} u;
} __packed __aligned(2);
-/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
+#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -2322,31 +2325,33 @@ enum ieee80211_sa_query_action {
};
+#define SUITE(oui, id) (((oui) << 8) | (id))
+
/* cipher suite selectors */
-#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
-#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
-#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02
-/* reserved: 0x000FAC03 */
-#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
-#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
-#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
-#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
-#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
-#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
-#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
-#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
-#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
-
-#define WLAN_CIPHER_SUITE_SMS4 0x00147201
+#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0)
+#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1)
+#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2)
+/* reserved: SUITE(0x000FAC, 3) */
+#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4)
+#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5)
+#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6)
+#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8)
+#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9)
+#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13)
+
+#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
/* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X 0x000FAC01
-#define WLAN_AKM_SUITE_PSK 0x000FAC02
-#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05
-#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06
-#define WLAN_AKM_SUITE_TDLS 0x000FAC07
-#define WLAN_AKM_SUITE_SAE 0x000FAC08
-#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09
+#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
#define WLAN_MAX_KEY_LEN 32
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c6587c0..c5847dc 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,6 +46,8 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
#define BR_MCAST_FLOOD BIT(11)
+#define BR_MULTICAST_TO_UNICAST BIT(12)
+#define BR_VLAN_TUNNEL BIT(13)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 4316aa1..46df7e5 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -66,8 +66,6 @@ struct dlci_local
struct frad_local
{
- struct net_device_stats stats;
-
/* devices which this FRAD is slaved to */
struct net_device *master[CONFIG_DLCI_MAX];
short dlci[CONFIG_DLCI_MAX];
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a4ccc31..c9ec134 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -9,19 +9,6 @@
#include <net/netlink.h>
#include <linux/u64_stats_sync.h>
-#if IS_ENABLED(CONFIG_MACVTAP)
-struct socket *macvtap_get_socket(struct file *);
-#else
-#include <linux/err.h>
-#include <linux/errno.h>
-struct file;
-struct socket;
-static inline struct socket *macvtap_get_socket(struct file *f)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif /* CONFIG_MACVTAP */
-
struct macvlan_port;
struct macvtap_queue;
@@ -29,7 +16,7 @@ struct macvtap_queue;
* Maximum times a macvtap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
*/
-#define MAX_MACVTAP_QUEUES 256
+#define MAX_TAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
@@ -49,7 +36,7 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
/* This array tracks active taps. */
- struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES];
+ struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
/* This list tracks all taps (both enabled and disabled) */
struct list_head queue_list;
int numvtaps;
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
new file mode 100644
index 0000000..3482c3c
--- /dev/null
+++ b/include/linux/if_tap.h
@@ -0,0 +1,75 @@
+#ifndef _LINUX_IF_TAP_H_
+#define _LINUX_IF_TAP_H_
+
+#if IS_ENABLED(CONFIG_TAP)
+struct socket *tap_get_socket(struct file *);
+#else
+#include <linux/err.h>
+#include <linux/errno.h>
+struct file;
+struct socket;
+static inline struct socket *tap_get_socket(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_TAP */
+
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+#define MAX_TAP_QUEUES 256
+
+struct tap_queue;
+
+struct tap_dev {
+ struct net_device *dev;
+ u16 flags;
+ /* This array tracks active taps. */
+ struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
+ /* This list tracks all taps (both enabled and disabled) */
+ struct list_head queue_list;
+ int numvtaps;
+ int numqueues;
+ netdev_features_t tap_features;
+ int minor;
+
+ void (*update_features)(struct tap_dev *tap, netdev_features_t features);
+ void (*count_tx_dropped)(struct tap_dev *tap);
+ void (*count_rx_dropped)(struct tap_dev *tap);
+};
+
+/*
+ * A tap queue is the central object of tap module, it connects
+ * an open character device to virtual interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * tap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ */
+
+struct tap_queue {
+ struct sock sk;
+ struct socket sock;
+ struct socket_wq wq;
+ int vnet_hdr_sz;
+ struct tap_dev __rcu *tap;
+ struct file *file;
+ unsigned int flags;
+ u16 queue_index;
+ bool enabled;
+ struct list_head next;
+ struct skb_array skb_array;
+};
+
+rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
+void tap_del_queues(struct tap_dev *tap);
+int tap_get_minor(dev_t major, struct tap_dev *tap);
+void tap_free_minor(dev_t major, struct tap_dev *tap);
+int tap_queue_resize(struct tap_dev *tap);
+int tap_create_cdev(struct cdev *tap_cdev,
+ dev_t *tap_major, const char *device_name);
+void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
+
+#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 70a5164..48767c7 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,139 +11,15 @@
#define _IIO_BUFFER_GENERIC_H_
#include <linux/sysfs.h>
#include <linux/iio/iio.h>
-#include <linux/kref.h>
-
-#ifdef CONFIG_IIO_BUFFER
struct iio_buffer;
-/**
- * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
- * configured. It has a fixed value which will be buffer specific.
- */
-#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0)
-
-/**
- * struct iio_buffer_access_funcs - access functions for buffers.
- * @store_to: actually store stuff to the buffer
- * @read_first_n: try to get a specified number of bytes (must exist)
- * @data_available: indicates how much data is available for reading from
- * the buffer.
- * @request_update: if a parameter change has been marked, update underlying
- * storage.
- * @set_bytes_per_datum:set number of bytes per datum
- * @set_length: set number of datums in buffer
- * @enable: called if the buffer is attached to a device and the
- * device starts sampling. Calls are balanced with
- * @disable.
- * @disable: called if the buffer is attached to a device and the
- * device stops sampling. Calles are balanced with @enable.
- * @release: called when the last reference to the buffer is dropped,
- * should free all resources allocated by the buffer.
- * @modes: Supported operating modes by this buffer type
- * @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
- *
- * The purpose of this structure is to make the buffer element
- * modular as event for a given driver, different usecases may require
- * different buffer designs (space efficiency vs speed for example).
- *
- * It is worth noting that a given buffer implementation may only support a
- * small proportion of these functions. The core code 'should' cope fine with
- * any of them not existing.
- **/
-struct iio_buffer_access_funcs {
- int (*store_to)(struct iio_buffer *buffer, const void *data);
- int (*read_first_n)(struct iio_buffer *buffer,
- size_t n,
- char __user *buf);
- size_t (*data_available)(struct iio_buffer *buffer);
-
- int (*request_update)(struct iio_buffer *buffer);
-
- int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*set_length)(struct iio_buffer *buffer, int length);
-
- int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
- int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
-
- void (*release)(struct iio_buffer *buffer);
-
- unsigned int modes;
- unsigned int flags;
-};
-
-/**
- * struct iio_buffer - general buffer structure
- * @length: [DEVICE] number of datums in buffer
- * @bytes_per_datum: [DEVICE] size of individual datum including timestamp
- * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
- * control method is used
- * @scan_mask: [INTERN] bitmask used in masking scan mode elements
- * @scan_timestamp: [INTERN] does the scan mode include a timestamp
- * @access: [DRIVER] buffer access functions associated with the
- * implementation.
- * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
- * @buffer_group: [INTERN] attributes of the buffer group
- * @scan_el_group: [DRIVER] attribute group for those attributes not
- * created from the iio_chan_info array.
- * @pollq: [INTERN] wait queue to allow for polling on the buffer.
- * @stufftoread: [INTERN] flag to indicate new data.
- * @attrs: [INTERN] standard attributes of the buffer
- * @demux_list: [INTERN] list of operations required to demux the scan.
- * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
- * @buffer_list: [INTERN] entry in the devices list of current buffers.
- * @ref: [INTERN] reference count of the buffer.
- * @watermark: [INTERN] number of datums to wait for poll/read.
- */
-struct iio_buffer {
- int length;
- int bytes_per_datum;
- struct attribute_group *scan_el_attrs;
- long *scan_mask;
- bool scan_timestamp;
- const struct iio_buffer_access_funcs *access;
- struct list_head scan_el_dev_attr_list;
- struct attribute_group buffer_group;
- struct attribute_group scan_el_group;
- wait_queue_head_t pollq;
- bool stufftoread;
- const struct attribute **attrs;
- struct list_head demux_list;
- void *demux_bounce;
- struct list_head buffer_list;
- struct kref ref;
- unsigned int watermark;
-};
-
-/**
- * iio_update_buffers() - add or remove buffer from active list
- * @indio_dev: device to add buffer to
- * @insert_buffer: buffer to insert
- * @remove_buffer: buffer_to_remove
- *
- * Note this will tear down the all buffering and build it up again
- */
-int iio_update_buffers(struct iio_dev *indio_dev,
- struct iio_buffer *insert_buffer,
- struct iio_buffer *remove_buffer);
-
-/**
- * iio_buffer_init() - Initialize the buffer structure
- * @buffer: buffer to be initialized
- **/
-void iio_buffer_init(struct iio_buffer *buffer);
+void iio_buffer_set_attrs(struct iio_buffer *buffer,
+ const struct attribute **attrs);
-int iio_scan_mask_query(struct iio_dev *indio_dev,
- struct iio_buffer *buffer, int bit);
-
-/**
- * iio_push_to_buffers() - push to a registered buffer.
- * @indio_dev: iio_dev structure for device.
- * @data: Full scan.
- */
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
-/*
+/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
* @data: sample data
@@ -168,34 +44,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
-int iio_update_demux(struct iio_dev *indio_dev);
-
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
- const unsigned long *mask);
-
-struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
-void iio_buffer_put(struct iio_buffer *buffer);
-
-/**
- * iio_device_attach_buffer - Attach a buffer to a IIO device
- * @indio_dev: The device the buffer should be attached to
- * @buffer: The buffer to attach to the device
- *
- * This function attaches a buffer to a IIO device. The buffer stays attached to
- * the device until the device is freed. The function should only be called at
- * most once per device.
- */
-static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
-{
- indio_dev->buffer = iio_buffer_get(buffer);
-}
-
-#else /* CONFIG_IIO_BUFFER */
-
-static inline void iio_buffer_get(struct iio_buffer *buffer) {}
-static inline void iio_buffer_put(struct iio_buffer *buffer) {}
+ const unsigned long *mask);
-#endif /* CONFIG_IIO_BUFFER */
+void iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer);
#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
new file mode 100644
index 0000000..8daba19
--- /dev/null
+++ b/include/linux/iio/buffer_impl.h
@@ -0,0 +1,162 @@
+#ifndef _IIO_BUFFER_GENERIC_IMPL_H_
+#define _IIO_BUFFER_GENERIC_IMPL_H_
+#include <linux/sysfs.h>
+#include <linux/kref.h>
+
+#ifdef CONFIG_IIO_BUFFER
+
+struct iio_dev;
+struct iio_buffer;
+
+/**
+ * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
+ * configured. It has a fixed value which will be buffer specific.
+ */
+#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0)
+
+/**
+ * struct iio_buffer_access_funcs - access functions for buffers.
+ * @store_to: actually store stuff to the buffer
+ * @read_first_n: try to get a specified number of bytes (must exist)
+ * @data_available: indicates how much data is available for reading from
+ * the buffer.
+ * @request_update: if a parameter change has been marked, update underlying
+ * storage.
+ * @set_bytes_per_datum:set number of bytes per datum
+ * @set_length: set number of datums in buffer
+ * @enable: called if the buffer is attached to a device and the
+ * device starts sampling. Calls are balanced with
+ * @disable.
+ * @disable: called if the buffer is attached to a device and the
+ * device stops sampling. Calles are balanced with @enable.
+ * @release: called when the last reference to the buffer is dropped,
+ * should free all resources allocated by the buffer.
+ * @modes: Supported operating modes by this buffer type
+ * @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
+ *
+ * The purpose of this structure is to make the buffer element
+ * modular as event for a given driver, different usecases may require
+ * different buffer designs (space efficiency vs speed for example).
+ *
+ * It is worth noting that a given buffer implementation may only support a
+ * small proportion of these functions. The core code 'should' cope fine with
+ * any of them not existing.
+ **/
+struct iio_buffer_access_funcs {
+ int (*store_to)(struct iio_buffer *buffer, const void *data);
+ int (*read_first_n)(struct iio_buffer *buffer,
+ size_t n,
+ char __user *buf);
+ size_t (*data_available)(struct iio_buffer *buffer);
+
+ int (*request_update)(struct iio_buffer *buffer);
+
+ int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
+ int (*set_length)(struct iio_buffer *buffer, int length);
+
+ int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
+ int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
+
+ void (*release)(struct iio_buffer *buffer);
+
+ unsigned int modes;
+ unsigned int flags;
+};
+
+/**
+ * struct iio_buffer - general buffer structure
+ *
+ * Note that the internals of this structure should only be of interest to
+ * those writing new buffer implementations.
+ */
+struct iio_buffer {
+ /** @length: Number of datums in buffer. */
+ int length;
+
+ /** @bytes_per_datum: Size of individual datum including timestamp. */
+ int bytes_per_datum;
+
+ /**
+ * @access: Buffer access functions associated with the
+ * implementation.
+ */
+ const struct iio_buffer_access_funcs *access;
+
+ /** @scan_mask: Bitmask used in masking scan mode elements. */
+ long *scan_mask;
+
+ /** @demux_list: List of operations required to demux the scan. */
+ struct list_head demux_list;
+
+ /** @pollq: Wait queue to allow for polling on the buffer. */
+ wait_queue_head_t pollq;
+
+ /** @watermark: Number of datums to wait for poll/read. */
+ unsigned int watermark;
+
+ /* private: */
+ /*
+ * @scan_el_attrs: Control of scan elements if that scan mode
+ * control method is used.
+ */
+ struct attribute_group *scan_el_attrs;
+
+ /* @scan_timestamp: Does the scan mode include a timestamp. */
+ bool scan_timestamp;
+
+ /* @scan_el_dev_attr_list: List of scan element related attributes. */
+ struct list_head scan_el_dev_attr_list;
+
+ /* @buffer_group: Attributes of the buffer group. */
+ struct attribute_group buffer_group;
+
+ /*
+ * @scan_el_group: Attribute group for those attributes not
+ * created from the iio_chan_info array.
+ */
+ struct attribute_group scan_el_group;
+
+ /* @stufftoread: Flag to indicate new data. */
+ bool stufftoread;
+
+ /* @attrs: Standard attributes of the buffer. */
+ const struct attribute **attrs;
+
+ /* @demux_bounce: Buffer for doing gather from incoming scan. */
+ void *demux_bounce;
+
+ /* @buffer_list: Entry in the devices list of current buffers. */
+ struct list_head buffer_list;
+
+ /* @ref: Reference count of the buffer. */
+ struct kref ref;
+};
+
+/**
+ * iio_update_buffers() - add or remove buffer from active list
+ * @indio_dev: device to add buffer to
+ * @insert_buffer: buffer to insert
+ * @remove_buffer: buffer_to_remove
+ *
+ * Note this will tear down the all buffering and build it up again
+ */
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer);
+
+/**
+ * iio_buffer_init() - Initialize the buffer structure
+ * @buffer: buffer to be initialized
+ **/
+void iio_buffer_init(struct iio_buffer *buffer);
+
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
+void iio_buffer_put(struct iio_buffer *buffer);
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline void iio_buffer_get(struct iio_buffer *buffer) {}
+static inline void iio_buffer_put(struct iio_buffer *buffer) {}
+
+#endif /* CONFIG_IIO_BUFFER */
+#endif /* _IIO_BUFFER_GENERIC_IMPL_H_ */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 228bd44..497f2b3 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -116,6 +116,16 @@ struct st_sensor_bdu {
};
/**
+ * struct st_sensor_das - ST sensor device data alignment selection
+ * @addr: address of the register.
+ * @mask: mask to write the das flag for left alignment.
+ */
+struct st_sensor_das {
+ u8 addr;
+ u8 mask;
+};
+
+/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
* @addr: address of the register.
* @mask_int1: mask to enable/disable IRQ on INT1 pin.
@@ -185,6 +195,7 @@ struct st_sensor_transfer_function {
* @enable_axis: Enable one or more axis of the sensor.
* @fs: Full scale register and full scale list available.
* @bdu: Block data update register.
+ * @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
@@ -200,6 +211,7 @@ struct st_sensor_settings {
struct st_sensor_axis enable_axis;
struct st_sensor_fullscale fs;
struct st_sensor_bdu bdu;
+ struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
bool multi_read_bit;
unsigned int bootime;
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 1796af0..254de3c 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -28,4 +28,13 @@ static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
}
#endif
+#ifdef CONFIG_ACPI
+int st_sensors_match_acpi_device(struct device *dev);
+#else
+static inline int st_sensors_match_acpi_device(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 1683bc7..027cfa9 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -1,9 +1,8 @@
#ifndef __LINUX_IIO_KFIFO_BUF_H__
#define __LINUX_IIO_KFIFO_BUF_H__
-#include <linux/kfifo.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
+struct iio_buffer;
+struct device;
struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
new file mode 100644
index 0000000..55535ae
--- /dev/null
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) STMicroelectronics 2016
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STM32_TIMER_TRIGGER_H_
+#define _STM32_TIMER_TRIGGER_H_
+
+#define TIM1_TRGO "tim1_trgo"
+#define TIM1_CH1 "tim1_ch1"
+#define TIM1_CH2 "tim1_ch2"
+#define TIM1_CH3 "tim1_ch3"
+#define TIM1_CH4 "tim1_ch4"
+
+#define TIM2_TRGO "tim2_trgo"
+#define TIM2_CH1 "tim2_ch1"
+#define TIM2_CH2 "tim2_ch2"
+#define TIM2_CH3 "tim2_ch3"
+#define TIM2_CH4 "tim2_ch4"
+
+#define TIM3_TRGO "tim3_trgo"
+#define TIM3_CH1 "tim3_ch1"
+#define TIM3_CH2 "tim3_ch2"
+#define TIM3_CH3 "tim3_ch3"
+#define TIM3_CH4 "tim3_ch4"
+
+#define TIM4_TRGO "tim4_trgo"
+#define TIM4_CH1 "tim4_ch1"
+#define TIM4_CH2 "tim4_ch2"
+#define TIM4_CH3 "tim4_ch3"
+#define TIM4_CH4 "tim4_ch4"
+
+#define TIM5_TRGO "tim5_trgo"
+#define TIM5_CH1 "tim5_ch1"
+#define TIM5_CH2 "tim5_ch2"
+#define TIM5_CH3 "tim5_ch3"
+#define TIM5_CH4 "tim5_ch4"
+
+#define TIM6_TRGO "tim6_trgo"
+
+#define TIM7_TRGO "tim7_trgo"
+
+#define TIM8_TRGO "tim8_trgo"
+#define TIM8_CH1 "tim8_ch1"
+#define TIM8_CH2 "tim8_ch2"
+#define TIM8_CH3 "tim8_ch3"
+#define TIM8_CH4 "tim8_ch4"
+
+#define TIM9_TRGO "tim9_trgo"
+#define TIM9_CH1 "tim9_ch1"
+#define TIM9_CH2 "tim9_ch2"
+
+#define TIM12_TRGO "tim12_trgo"
+#define TIM12_CH1 "tim12_ch1"
+#define TIM12_CH2 "tim12_ch2"
+
+bool is_stm32_timer_trigger(struct iio_trigger *trig);
+
+#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index 885c3e6..79af096 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -126,10 +126,10 @@ void prepare_namespace(void);
void __init load_default_modules(void);
int __init init_rootfs(void);
-#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
#endif
-#ifdef CONFIG_DEBUG_RODATA
+#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void);
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 325f649..3a85d61 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -42,6 +42,27 @@ extern struct fs_struct init_fs;
#define INIT_PREV_CPUTIME(x)
#endif
+#ifdef CONFIG_POSIX_TIMERS
+#define INIT_POSIX_TIMERS(s) \
+ .posix_timers = LIST_HEAD_INIT(s.posix_timers),
+#define INIT_CPU_TIMERS(s) \
+ .cpu_timers = { \
+ LIST_HEAD_INIT(s.cpu_timers[0]), \
+ LIST_HEAD_INIT(s.cpu_timers[1]), \
+ LIST_HEAD_INIT(s.cpu_timers[2]), \
+ },
+#define INIT_CPUTIMER(s) \
+ .cputimer = { \
+ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
+ .running = false, \
+ .checking_timer = false, \
+ },
+#else
+#define INIT_POSIX_TIMERS(s)
+#define INIT_CPU_TIMERS(s)
+#define INIT_CPUTIMER(s)
+#endif
+
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -49,14 +70,10 @@ extern struct fs_struct init_fs;
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
.signal = {{0}}}, \
- .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
- .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
+ INIT_POSIX_TIMERS(sig) \
+ INIT_CPU_TIMERS(sig) \
.rlim = INIT_RLIMITS, \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
- }, \
+ INIT_CPUTIMER(sig) \
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
@@ -247,7 +264,7 @@ extern struct task_group root_task_group;
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
- .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ INIT_CPU_TIMERS(tsk) \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
@@ -274,13 +291,6 @@ extern struct task_group root_task_group;
}
-#define INIT_CPU_TIMERS(cpu_timers) \
-{ \
- LIST_HEAD_INIT(cpu_timers[0]), \
- LIST_HEAD_INIT(cpu_timers[1]), \
- LIST_HEAD_INIT(cpu_timers[2]), \
-}
-
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data..init_task")))
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d49e26c..c573a52 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
#include <linux/dma_remapping.h>
#include <linux/mmu_notifier.h>
#include <linux/list.h>
+#include <linux/iommu.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
+#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
+#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
+#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
@@ -316,8 +317,8 @@ enum {
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -439,7 +440,7 @@ struct intel_iommu {
struct irq_domain *ir_domain;
struct irq_domain *ir_msi_domain;
#endif
- struct device *iommu_dev; /* IOMMU-sysfs device */
+ struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
};
diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h
deleted file mode 100644
index 920109a..0000000
--- a/include/linux/intel_pmic_gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef LINUX_INTEL_PMIC_H
-#define LINUX_INTEL_PMIC_H
-
-struct intel_pmic_gpio_platform_data {
- /* the first IRQ of the chip */
- unsigned irq_base;
- /* number assigned to the first GPIO */
- unsigned gpio_base;
- /* sram address for gpiointr register, the langwell chip will map
- * the PMIC spi GPIO expander's GPIOINTR register in sram.
- */
- unsigned gpiointr;
-};
-
-#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0ff5111..6a6de18 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,6 +31,13 @@
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
+/*
+ * This is to make the IOMMU API setup privileged
+ * mapppings accessible by the master only at higher
+ * privileged execution level and inaccessible at
+ * less privileged levels.
+ */
+#define IOMMU_PRIV (1 << 5)
struct iommu_ops;
struct iommu_group;
@@ -117,18 +124,25 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
+/* These are the possible reserved region types */
+#define IOMMU_RESV_DIRECT (1 << 0)
+#define IOMMU_RESV_RESERVED (1 << 1)
+#define IOMMU_RESV_MSI (1 << 2)
+
/**
- * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * struct iommu_resv_region - descriptor for a reserved memory region
* @list: Linked list pointers
* @start: System physical start address of the region
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
+ * @type: Type of the reserved region
*/
-struct iommu_dm_region {
+struct iommu_resv_region {
struct list_head list;
phys_addr_t start;
size_t length;
int prot;
+ int type;
};
#ifdef CONFIG_IOMMU_API
@@ -150,9 +164,9 @@ struct iommu_dm_region {
* @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
- * @get_dm_regions: Request list of direct mapping requirements for a device
- * @put_dm_regions: Free list of direct mapping requirements for a device
- * @apply_dm_region: Temporary helper call-back for iova reserved ranges
+ * @get_resv_regions: Request list of reserved regions for a device
+ * @put_resv_regions: Free list of reserved regions for a device
+ * @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @domain_set_windows: Set the number of windows for a domain
@@ -184,11 +198,12 @@ struct iommu_ops {
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
- /* Request/Free a list of direct mapping requirements for a device */
- void (*get_dm_regions)(struct device *dev, struct list_head *list);
- void (*put_dm_regions)(struct device *dev, struct list_head *list);
- void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
- struct iommu_dm_region *region);
+ /* Request/Free a list of reserved regions for a device */
+ void (*get_resv_regions)(struct device *dev, struct list_head *list);
+ void (*put_resv_regions)(struct device *dev, struct list_head *list);
+ void (*apply_resv_region)(struct device *dev,
+ struct iommu_domain *domain,
+ struct iommu_resv_region *region);
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
@@ -204,6 +219,42 @@ struct iommu_ops {
unsigned long pgsize_bitmap;
};
+/**
+ * struct iommu_device - IOMMU core representation of one IOMMU hardware
+ * instance
+ * @list: Used by the iommu-core to keep a list of registered iommus
+ * @ops: iommu-ops for talking to this iommu
+ * @dev: struct device for sysfs handling
+ */
+struct iommu_device {
+ struct list_head list;
+ const struct iommu_ops *ops;
+ struct fwnode_handle *fwnode;
+ struct device dev;
+};
+
+int iommu_device_register(struct iommu_device *iommu);
+void iommu_device_unregister(struct iommu_device *iommu);
+int iommu_device_sysfs_add(struct iommu_device *iommu,
+ struct device *parent,
+ const struct attribute_group **groups,
+ const char *fmt, ...) __printf(4, 5);
+void iommu_device_sysfs_remove(struct iommu_device *iommu);
+int iommu_device_link(struct iommu_device *iommu, struct device *link);
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+
+static inline void iommu_device_set_ops(struct iommu_device *iommu,
+ const struct iommu_ops *ops)
+{
+ iommu->ops = ops;
+}
+
+static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+ struct fwnode_handle *fwnode)
+{
+ iommu->fwnode = fwnode;
+}
+
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
-extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
-extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev);
+extern struct iommu_resv_region *
+iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
+extern int iommu_get_group_resv_regions(struct iommu_group *group,
+ struct list_head *head);
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
-struct device *iommu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...) __printf(4, 5);
-void iommu_device_destroy(struct device *dev);
-int iommu_device_link(struct device *dev, struct device *link);
-void iommu_device_unlink(struct device *dev, struct device *link);
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
-void iommu_register_instance(struct fwnode_handle *fwnode,
- const struct iommu_ops *ops);
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
struct iommu_group {};
struct iommu_fwspec {};
+struct iommu_device {};
static inline bool iommu_present(struct bus_type *bus)
{
@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
-static inline void iommu_get_dm_regions(struct device *dev,
+static inline void iommu_get_resv_regions(struct device *dev,
struct list_head *list)
{
}
-static inline void iommu_put_dm_regions(struct device *dev,
+static inline void iommu_put_resv_regions(struct device *dev,
struct list_head *list)
{
}
+static inline int iommu_get_group_resv_regions(struct iommu_group *group,
+ struct list_head *head)
+{
+ return -ENODEV;
+}
+
static inline int iommu_request_dm_for_dev(struct device *dev)
{
return -ENODEV;
@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
return -EINVAL;
}
-static inline struct device *iommu_device_create(struct device *parent,
- void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...)
+static inline int iommu_device_register(struct iommu_device *iommu)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_device_set_ops(struct iommu_device *iommu,
+ const struct iommu_ops *ops)
+{
+}
+
+static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+ struct fwnode_handle *fwnode)
+{
+}
+
+static inline void iommu_device_unregister(struct iommu_device *iommu)
{
- return ERR_PTR(-ENODEV);
}
-static inline void iommu_device_destroy(struct device *dev)
+static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
+ struct device *parent,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
}
@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
return -ENODEV;
}
-static inline void iommu_register_instance(struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
-{
-}
-
static inline
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
return NULL;
}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 671d014..71be5b3 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -69,6 +69,7 @@ struct ipv6_devconf {
__s32 seg6_require_hmac;
#endif
__u32 enhanced_dad;
+ __u32 addr_gen_mode;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e798755..f887351 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -184,6 +184,7 @@ struct irq_data {
*
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
+ * IRQD_ACTIVATED - Interrupt has already been activated
* IRQD_NO_BALANCING - Balancing disabled for this IRQ
* IRQD_PER_CPU - Interrupt is per cpu
* IRQD_AFFINITY_SET - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
enum {
IRQD_TRIGGER_MASK = 0xf,
IRQD_SETAFFINITY_PENDING = (1 << 8),
+ IRQD_ACTIVATED = (1 << 9),
IRQD_NO_BALANCING = (1 << 10),
IRQD_PER_CPU = (1 << 11),
IRQD_AFFINITY_SET = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
}
+static inline bool irqd_is_activated(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_ACTIVATED;
+}
+
+static inline void irqd_set_activated(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_ACTIVATED;
+}
+
+static inline void irqd_clr_activated(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_ACTIVATED;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -715,6 +732,10 @@ unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner, const struct cpumask *affinity);
+int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
+ unsigned int cnt, int node, struct module *owner,
+ const struct cpumask *affinity);
+
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
@@ -731,6 +752,21 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
#define irq_alloc_descs_from(from, cnt, node) \
irq_alloc_descs(-1, from, cnt, node)
+#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
+ __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
+
+#define devm_irq_alloc_desc(dev, node) \
+ devm_irq_alloc_descs(dev, -1, 0, 1, node)
+
+#define devm_irq_alloc_desc_at(dev, at, node) \
+ devm_irq_alloc_descs(dev, at, at, 1, node)
+
+#define devm_irq_alloc_desc_from(dev, from, node) \
+ devm_irq_alloc_descs(dev, -1, from, 1, node)
+
+#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
+ devm_irq_alloc_descs(dev, -1, from, cnt, node)
+
void irq_free_descs(unsigned int irq, unsigned int cnt);
static inline void irq_free_desc(unsigned int irq)
{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index e808f8a..725e86b 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,7 +73,6 @@
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
-#define GICD_TYPER_LPIS (1U << 17)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -306,7 +305,7 @@
#define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1
#define GITS_BASER_TYPE_VCPU 2
-#define GITS_BASER_TYPE_CPU 3
+#define GITS_BASER_TYPE_RESERVED3 3
#define GITS_BASER_TYPE_COLLECTION 4
#define GITS_BASER_TYPE_RESERVED5 5
#define GITS_BASER_TYPE_RESERVED6 6
@@ -320,8 +319,6 @@
#define GITS_CMD_MAPD 0x08
#define GITS_CMD_MAPC 0x09
#define GITS_CMD_MAPTI 0x0a
-/* older GIC documentation used MAPVI for this command */
-#define GITS_CMD_MAPVI GITS_CMD_MAPTI
#define GITS_CMD_MAPI 0x0b
#define GITS_CMD_MOVI 0x01
#define GITS_CMD_DISCARD 0x0f
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ffb8460..188eced 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -183,6 +183,12 @@ enum {
/* Irq domain is an IPI domain with single virq */
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
+ /* Irq domain implements MSIs */
+ IRQ_DOMAIN_FLAG_MSI = (1 << 4),
+
+ /* Irq domain implements MSI remapping */
+ IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
+extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
}
+
+static inline bool irq_domain_is_msi(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI;
+}
+
+static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
+}
+
+extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
{
return false;
}
+
+static inline bool irq_domain_is_msi(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool
+irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+{
+ return false;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 589d14e..624215c 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
+extern u64 jiffies64_to_nsecs(u64 j);
+
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a0547c5..b63d6b7 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -402,6 +402,6 @@ extern bool ____wrong_branch_error(void);
#define static_branch_enable(x) static_key_enable(&(x)->key)
#define static_branch_disable(x) static_key_disable(&(x)->key)
-#endif /* _LINUX_JUMP_LABEL_H */
-
#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 089f70f..23da3af 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -14,6 +14,7 @@ struct static_key_deferred {
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
+static inline void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84..cb09238 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
#define TAINT_FLAGS_COUNT 16
struct taint_flag {
- char true; /* character printed when tainted */
- char false; /* character printed when not tainted */
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
bool module; /* also show as a per-module taint flag */
};
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 00f7768..66be8b6 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -9,7 +9,6 @@
#include <linux/sched.h>
#include <linux/vtime.h>
#include <asm/irq.h>
-#include <linux/cputime.h>
/*
* 'kernel_stat.h' contains the definitions needed for doing
@@ -78,15 +77,18 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
-extern void account_user_time(struct task_struct *, cputime_t);
-extern void account_system_time(struct task_struct *, int, cputime_t);
-extern void account_steal_time(cputime_t);
-extern void account_idle_time(cputime_t);
+extern void account_user_time(struct task_struct *, u64);
+extern void account_guest_time(struct task_struct *, u64);
+extern void account_system_time(struct task_struct *, int, u64);
+extern void account_system_index_time(struct task_struct *, u64,
+ enum cpu_usage_stat);
+extern void account_steal_time(u64);
+extern void account_idle_time(u64);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
{
- vtime_account_user(tsk);
+ vtime_flush(tsk);
}
#else
extern void account_process_tick(struct task_struct *, int user);
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index fcfd2bf..c4e441e 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -56,7 +56,7 @@ struct file;
struct subprocess_info {
struct work_struct work;
struct completion *complete;
- char *path;
+ const char *path;
char **argv;
char **envp;
int wait;
@@ -67,10 +67,11 @@ struct subprocess_info {
};
extern int
-call_usermodehelper(char *path, char **argv, char **envp, int wait);
+call_usermodehelper(const char *path, char **argv, char **envp, int wait);
extern struct subprocess_info *
-call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask,
+call_usermodehelper_setup(const char *path, char **argv, char **envp,
+ gfp_t gfp_mask,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f68490..16ddfb8 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -278,9 +278,13 @@ struct kprobe_insn_cache {
int nr_garbage;
};
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
extern void __free_insn_slot(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, int dirty);
+/* sleep-less address checking routine */
+extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
+ unsigned long addr);
#define DEFINE_INSN_CACHE_OPS(__name) \
extern struct kprobe_insn_cache kprobe_##__name##_slots; \
@@ -294,6 +298,18 @@ static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
{ \
__free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
} \
+ \
+static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
+{ \
+ return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
+}
+#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#define DEFINE_INSN_CACHE_OPS(__name) \
+static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
+{ \
+ return 0; \
+}
+#endif
DEFINE_INSN_CACHE_OPS(insn);
@@ -330,7 +346,6 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
int write, void __user *buffer,
size_t *length, loff_t *ppos);
#endif
-
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
@@ -481,6 +496,19 @@ static inline int enable_jprobe(struct jprobe *jp)
return enable_kprobe(&jp->kp);
}
+#ifndef CONFIG_KPROBES
+static inline bool is_kprobe_insn_slot(unsigned long addr)
+{
+ return false;
+}
+#endif
+#ifndef CONFIG_OPTPROBES
+static inline bool is_kprobe_optinsn_slot(unsigned long addr)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_KPROBES
/*
* Blacklist ganerating macro. Specify functions which is not probed
diff --git a/include/linux/kref.h b/include/linux/kref.h
index e15828f..f4156f8 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -15,22 +15,27 @@
#ifndef _KREF_H_
#define _KREF_H_
-#include <linux/bug.h>
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/refcount.h>
struct kref {
- atomic_t refcount;
+ refcount_t refcount;
};
+#define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
+
/**
* kref_init - initialize object.
* @kref: object in question.
*/
static inline void kref_init(struct kref *kref)
{
- atomic_set(&kref->refcount, 1);
+ refcount_set(&kref->refcount, 1);
+}
+
+static inline unsigned int kref_read(const struct kref *kref)
+{
+ return refcount_read(&kref->refcount);
}
/**
@@ -39,17 +44,12 @@ static inline void kref_init(struct kref *kref)
*/
static inline void kref_get(struct kref *kref)
{
- /* If refcount was 0 before incrementing then we have a race
- * condition when this kref is freeing by some other thread right now.
- * In this case one should use kref_get_unless_zero()
- */
- WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
+ refcount_inc(&kref->refcount);
}
/**
- * kref_sub - subtract a number of refcounts for object.
+ * kref_put - decrement refcount for object.
* @kref: object.
- * @count: Number of recounts to subtract.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
@@ -58,57 +58,43 @@ static inline void kref_get(struct kref *kref)
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
- * Subtract @count from the refcount, and if 0, call release().
+ * Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
-static inline int kref_sub(struct kref *kref, unsigned int count,
- void (*release)(struct kref *kref))
+static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
- if (atomic_sub_and_test((int) count, &kref->refcount)) {
+ if (refcount_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
}
return 0;
}
-/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- * last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function. If the caller does pass kfree to this
- * function, you will be publicly mocked mercilessly by the kref
- * maintainer, and anyone else who happens to notice it. You have
- * been warned.
- *
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0. Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory. Only use the return value if you want to see if the kref is now
- * gone, not present.
- */
-static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
-{
- return kref_sub(kref, 1, release);
-}
-
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
WARN_ON(release == NULL);
- if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
- mutex_lock(lock);
- if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
- mutex_unlock(lock);
- return 0;
- }
+
+ if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int kref_put_lock(struct kref *kref,
+ void (*release)(struct kref *kref),
+ spinlock_t *lock)
+{
+ WARN_ON(release == NULL);
+
+ if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
return 1;
}
@@ -133,6 +119,6 @@ static inline int kref_put_mutex(struct kref *kref,
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
- return atomic_add_unless(&kref->refcount, 1, 0);
+ return refcount_inc_not_zero(&kref->refcount);
}
#endif /* _KREF_H_ */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 569cb53..38c0bd7 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,6 +13,7 @@
#define __LINUX_LEDS_H_INCLUDED
#include <linux/device.h>
+#include <linux/kernfs.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
@@ -27,6 +28,7 @@ struct device;
enum led_brightness {
LED_OFF = 0,
+ LED_ON = 1,
LED_HALF = 127,
LED_FULL = 255,
};
@@ -46,6 +48,7 @@ struct led_classdev {
#define LED_DEV_CAP_FLASH (1 << 18)
#define LED_HW_PLUGGABLE (1 << 19)
#define LED_PANIC_INDICATOR (1 << 20)
+#define LED_BRIGHT_HW_CHANGED (1 << 21)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -110,6 +113,11 @@ struct led_classdev {
bool activated;
#endif
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+ int brightness_hw_changed;
+ struct kernfs_node *brightness_hw_changed_kn;
+#endif
+
/* Ensures consistent access to the LED Flash Class device */
struct mutex led_access;
};
@@ -422,4 +430,12 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
}
#endif
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+extern void led_classdev_notify_brightness_hw_changed(
+ struct led_classdev *led_cdev, enum led_brightness brightness);
+#else
+static inline void led_classdev_notify_brightness_hw_changed(
+ struct led_classdev *led_cdev, enum led_brightness brightness) { }
+#endif
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c170be5..c9a69fc 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -968,7 +968,7 @@ struct ata_port_operations {
void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
void (*sff_exec_command)(struct ata_port *ap,
const struct ata_taskfile *tf);
- unsigned int (*sff_data_xfer)(struct ata_device *dev,
+ unsigned int (*sff_data_xfer)(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
void (*sff_irq_on)(struct ata_port *);
bool (*sff_irq_check)(struct ata_port *);
@@ -1130,6 +1130,7 @@ extern int ata_sas_port_start(struct ata_port *ap);
extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern int sata_scr_valid(struct ata_link *link);
extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -1355,6 +1356,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
+ .eh_timed_out = ata_scsi_timed_out, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity, \
.sdev_attrs = ata_common_sdev_attrs
@@ -1823,11 +1825,11 @@ extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf);
-extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
+extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
-extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
+extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
-extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
+extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
extern void ata_sff_irq_clear(struct ata_port *ap);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 7c273bb..ca45e4a 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -80,8 +80,6 @@ struct nvm_dev_ops {
unsigned int max_phys_sect;
};
-
-
#ifdef CONFIG_NVM
#include <linux/blkdev.h>
@@ -109,6 +107,7 @@ enum {
NVM_RSP_ERR_FAILWRITE = 0x40ff,
NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
NVM_RSP_ERR_FAILECC = 0x4281,
+ NVM_RSP_ERR_FAILCRC = 0x4004,
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
@@ -202,11 +201,10 @@ struct nvm_addr_format {
struct nvm_id {
u8 ver_id;
u8 vmnt;
- u8 cgrps;
u32 cap;
u32 dom;
struct nvm_addr_format ppaf;
- struct nvm_id_group groups[4];
+ struct nvm_id_group grp;
} __packed;
struct nvm_target {
@@ -216,10 +214,6 @@ struct nvm_target {
struct gendisk *disk;
};
-struct nvm_tgt_instance {
- struct nvm_tgt_type *tt;
-};
-
#define ADDR_EMPTY (~0ULL)
#define NVM_VERSION_MAJOR 1
@@ -230,7 +224,6 @@ struct nvm_rq;
typedef void (nvm_end_io_fn)(struct nvm_rq *);
struct nvm_rq {
- struct nvm_tgt_instance *ins;
struct nvm_tgt_dev *dev;
struct bio *bio;
@@ -254,6 +247,8 @@ struct nvm_rq {
u64 ppa_status; /* ppa media status */
int error;
+
+ void *private;
};
static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
@@ -272,15 +267,6 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
-/* system block cpu representation */
-struct nvm_sb_info {
- unsigned long seqnr;
- unsigned long erase_cnt;
- unsigned int version;
- char mmtype[NVM_MMTYPE_LEN];
- struct ppa_addr fs_ppa;
-};
-
/* Device generic information */
struct nvm_geo {
int nr_chnls;
@@ -308,6 +294,7 @@ struct nvm_geo {
int sec_per_lun;
};
+/* sub-device structure */
struct nvm_tgt_dev {
/* Device information */
struct nvm_geo geo;
@@ -329,17 +316,10 @@ struct nvm_dev {
struct list_head devices;
- /* Media manager */
- struct nvmm_type *mt;
- void *mp;
-
- /* System blocks */
- struct nvm_sb_info sb;
-
/* Device information */
struct nvm_geo geo;
- /* lower page table */
+ /* lower page table */
int lps_per_blk;
int *lptbl;
@@ -359,6 +339,10 @@ struct nvm_dev {
struct mutex mlock;
spinlock_t lock;
+
+ /* target management */
+ struct list_head area_list;
+ struct list_head targets;
};
static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
@@ -391,10 +375,10 @@ static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
return l;
}
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
- struct ppa_addr r)
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr r)
{
- struct nvm_geo *geo = &dev->geo;
+ struct nvm_geo *geo = &tgt_dev->geo;
struct ppa_addr l;
l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
@@ -407,10 +391,10 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
return l;
}
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr r)
{
- struct nvm_geo *geo = &dev->geo;
+ struct nvm_geo *geo = &tgt_dev->geo;
struct ppa_addr l;
l.ppa = 0;
@@ -452,15 +436,12 @@ static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
(ppa1.g.blk == ppa2.g.blk));
}
-static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
-{
- return dev->lptbl[slc_pg];
-}
-
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
typedef void (nvm_tgt_exit_fn)(void *);
+typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
+typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
struct nvm_tgt_type {
const char *name;
@@ -469,12 +450,15 @@ struct nvm_tgt_type {
/* target entry points */
nvm_tgt_make_rq_fn *make_rq;
nvm_tgt_capacity_fn *capacity;
- nvm_end_io_fn *end_io;
/* module-specific init/teardown */
nvm_tgt_init_fn *init;
nvm_tgt_exit_fn *exit;
+ /* sysfs */
+ nvm_tgt_sysfs_init_fn *sysfs_init;
+ nvm_tgt_sysfs_exit_fn *sysfs_exit;
+
/* For internal use */
struct list_head list;
};
@@ -487,103 +471,29 @@ extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-typedef int (nvmm_register_fn)(struct nvm_dev *);
-typedef void (nvmm_unregister_fn)(struct nvm_dev *);
-
-typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
-typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
-typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *);
-typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int);
-typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
-typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
-typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *,
- struct ppa_addr, int);
-typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int);
-
-enum {
- TRANS_TGT_TO_DEV = 0x0,
- TRANS_DEV_TO_TGT = 0x1,
-};
-
-struct nvmm_type {
- const char *name;
- unsigned int version[3];
-
- nvmm_register_fn *register_mgr;
- nvmm_unregister_fn *unregister_mgr;
-
- nvmm_create_tgt_fn *create_tgt;
- nvmm_remove_tgt_fn *remove_tgt;
-
- nvmm_submit_io_fn *submit_io;
- nvmm_erase_blk_fn *erase_blk;
-
- nvmm_get_area_fn *get_area;
- nvmm_put_area_fn *put_area;
-
- nvmm_trans_ppa_fn *trans_ppa;
- nvmm_part_to_tgt_fn *part_to_tgt;
-
- struct list_head list;
-};
-
-extern int nvm_register_mgr(struct nvmm_type *);
-extern void nvm_unregister_mgr(struct nvmm_type *);
-
extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
-extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int);
extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
-extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
-extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
-extern void nvm_end_io(struct nvm_rq *, int);
-extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
- void *, int);
-extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
- int, void *, int);
+extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
-extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-/* sysblk.c */
-#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
-
-/* system block on disk representation */
-struct nvm_system_block {
- __be32 magic; /* magic signature */
- __be32 seqnr; /* sequence number */
- __be32 erase_cnt; /* erase count */
- __be16 version; /* version number */
- u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
- __be64 fs_ppa; /* PPA for media manager
- * superblock */
-};
-
-extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
-extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
-extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
-
extern int nvm_dev_factory(struct nvm_dev *, int flags);
-#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \
- for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \
- (chid)++, (ppa).g.ch = (chid)) \
- for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \
- (lunid)++, (ppa).g.lun = (lunid))
+extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/list.h b/include/linux/list.h
index d1039ec..ae537fa 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -527,6 +527,19 @@ static inline void list_splice_tail_init(struct list_head *list,
pos = list_next_entry(pos, member))
/**
+ * list_for_each_entry_from_reverse - iterate backwards over list of given type
+ * from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from_reverse(pos, head, member) \
+ for (; &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fd4ca0b..171baa9 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -3,28 +3,33 @@
/*
* Lock-less NULL terminated single linked list
*
- * If there are multiple producers and multiple consumers, llist_add
- * can be used in producers and llist_del_all can be used in
- * consumers. They can work simultaneously without lock. But
- * llist_del_first can not be used here. Because llist_del_first
- * depends on list->first->next does not changed if list->first is not
- * changed during its operation, but llist_del_first, llist_add,
- * llist_add (or llist_del_all, llist_add, llist_add) sequence in
- * another consumer may violate that.
- *
- * If there are multiple producers and one consumer, llist_add can be
- * used in producers and llist_del_all or llist_del_first can be used
- * in the consumer.
- *
- * This can be summarized as follow:
+ * Cases where locking is not needed:
+ * If there are multiple producers and multiple consumers, llist_add can be
+ * used in producers and llist_del_all can be used in consumers simultaneously
+ * without locking. Also a single consumer can use llist_del_first while
+ * multiple producers simultaneously use llist_add, without any locking.
+ *
+ * Cases where locking is needed:
+ * If we have multiple consumers with llist_del_first used in one consumer, and
+ * llist_del_first or llist_del_all used in other consumers, then a lock is
+ * needed. This is because llist_del_first depends on list->first->next not
+ * changing, but without lock protection, there's no way to be sure about that
+ * if a preemption happens in the middle of the delete operation and on being
+ * preempted back, the list->first is the same as before causing the cmpxchg in
+ * llist_del_first to succeed. For example, while a llist_del_first operation
+ * is in progress in one consumer, then a llist_del_first, llist_add,
+ * llist_add (or llist_del_all, llist_add, llist_add) sequence in another
+ * consumer may cause violations.
+ *
+ * This can be summarized as follows:
*
* | add | del_first | del_all
* add | - | - | -
* del_first | | L | L
* del_all | | | -
*
- * Where "-" stands for no lock is needed, while "L" stands for lock
- * is needed.
+ * Where, a particular row's operation can happen concurrently with a column's
+ * operation, with "-" being no lock needed, while "L" being lock is needed.
*
* The list entries deleted via llist_del_all can be traversed with
* traversing function such as llist_for_each etc. But the list
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d..ef3d4f6 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ... and so on.
*/
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+#define order_base_2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0 || (n) == 1) ? 0 : \
+ ilog2((n) - 1) + 1) : \
+ __order_base_2(n) \
+)
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 558adfa..e29d4c6 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -352,8 +352,7 @@
* Return 0 if permission is granted.
* @inode_getattr:
* Check permission before obtaining file attributes.
- * @mnt is the vfsmount where the dentry was looked up
- * @dentry contains the dentry structure for the file.
+ * @path contains the path structure for the file.
* Return 0 if permission is granted.
* @inode_setxattr:
* Check permission before setting the extended attributes
@@ -666,11 +665,6 @@
* @sig contains the signal value.
* @secid contains the sid of the process where the signal originated
* Return 0 if permission is granted.
- * @task_wait:
- * Check permission before allowing a process to reap a child process @p
- * and collect its status information.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
* @task_prctl:
* Check permission before performing a process control operation on the
* current process.
@@ -1507,7 +1501,6 @@ union security_list_options {
int (*task_movememory)(struct task_struct *p);
int (*task_kill)(struct task_struct *p, struct siginfo *info,
int sig, u32 secid);
- int (*task_wait)(struct task_struct *p);
int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void (*task_to_inode)(struct task_struct *p, struct inode *inode);
@@ -1547,8 +1540,7 @@ union security_list_options {
void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
int (*getprocattr)(struct task_struct *p, char *name, char **value);
- int (*setprocattr)(struct task_struct *p, char *name, void *value,
- size_t size);
+ int (*setprocattr)(const char *name, void *value, size_t size);
int (*ismaclabel)(const char *name);
int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
@@ -1768,7 +1760,6 @@ struct security_hook_heads {
struct list_head task_getscheduler;
struct list_head task_movememory;
struct list_head task_kill;
- struct list_head task_wait;
struct list_head task_prctl;
struct list_head task_to_inode;
struct list_head ipc_permission;
@@ -1876,6 +1867,7 @@ struct security_hook_list {
struct list_head list;
struct list_head *head;
union security_list_options hook;
+ char *lsm;
};
/*
@@ -1888,15 +1880,10 @@ struct security_hook_list {
{ .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
extern struct security_hook_heads security_hook_heads;
+extern char *lsm_names;
-static inline void security_add_hooks(struct security_hook_list *hooks,
- int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- list_add_tail_rcu(&hooks[i].list, hooks[i].head);
-}
+extern void security_add_hooks(struct security_hook_list *hooks, int count,
+ char *lsm);
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
/*
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index a57f0df..4055cf8 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -17,8 +17,15 @@
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
#define MARVELL_PHY_ID_88E1540 0x01410eb0
+#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+ * not have a model ID. So the switch driver traps reads to the ID2
+ * register and returns the switch family ID
+ */
+#define MARVELL_PHY_ID_88E6390 0x01410f90
+
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6e8b5b2..80690c9 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#ifndef mul_u32_u32
+/*
+ * Many a GCC version messes this up and generates a 64x64 mult :-(
+ */
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#endif
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
al = a;
ah = a >> 32;
- ret = ((u64)al * mul) >> shift;
+ ret = mul_u32_u32(al, mul) >> shift;
if (ah)
- ret += ((u64)ah * mul) << (32 - shift);
+ ret += mul_u32_u32(ah, mul) << (32 - shift);
return ret;
}
@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
a0.ll = a;
b0.ll = b;
- rl.ll = (u64)a0.l.low * b0.l.low;
- rm.ll = (u64)a0.l.low * b0.l.high;
- rn.ll = (u64)a0.l.high * b0.l.low;
- rh.ll = (u64)a0.l.high * b0.l.high;
+ rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
+ rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
+ rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
+ rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
/*
* Each of these lines computes a 64-bit intermediate result into "c",
@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
} u, rl, rh;
u.ll = a;
- rl.ll = (u64)u.l.low * mul;
- rh.ll = (u64)u.l.high * mul + rl.l.high;
+ rl.ll = mul_u32_u32(u.l.low, mul);
+ rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
/* Bits 32-63 of the result will be in rh.l.low. */
rl.l.high = do_div(rh.ll, divisor);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index ec819e9..b6e048e 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -13,34 +13,10 @@
#ifndef MDEV_H
#define MDEV_H
-/* Parent device */
-struct parent_device {
- struct device *dev;
- const struct parent_ops *ops;
-
- /* internal */
- struct kref ref;
- struct mutex lock;
- struct list_head next;
- struct kset *mdev_types_kset;
- struct list_head type_list;
-};
-
-/* Mediated device */
-struct mdev_device {
- struct device dev;
- struct parent_device *parent;
- uuid_le uuid;
- void *driver_data;
-
- /* internal */
- struct kref ref;
- struct list_head next;
- struct kobject *type_kobj;
-};
+struct mdev_device;
/**
- * struct parent_ops - Structure to be registered for each parent device to
+ * struct mdev_parent_ops - Structure to be registered for each parent device to
* register the device to mdev module.
*
* @owner: The module owner.
@@ -86,10 +62,9 @@ struct mdev_device {
* @mdev: mediated device structure
* @vma: vma structure
* Parent device that support mediated device should be registered with mdev
- * module with parent_ops structure.
+ * module with mdev_parent_ops structure.
**/
-
-struct parent_ops {
+struct mdev_parent_ops {
struct module *owner;
const struct attribute_group **dev_attr_groups;
const struct attribute_group **mdev_attr_groups;
@@ -103,7 +78,7 @@ struct parent_ops {
size_t count, loff_t *ppos);
ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
size_t count, loff_t *ppos);
- ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
+ long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg);
int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
};
@@ -142,27 +117,22 @@ struct mdev_driver {
};
#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
-
-static inline void *mdev_get_drvdata(struct mdev_device *mdev)
-{
- return mdev->driver_data;
-}
-static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
-{
- mdev->driver_data = data;
-}
+extern void *mdev_get_drvdata(struct mdev_device *mdev);
+extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
+extern uuid_le mdev_uuid(struct mdev_device *mdev);
extern struct bus_type mdev_bus_type;
-#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
-
extern int mdev_register_device(struct device *dev,
- const struct parent_ops *ops);
+ const struct mdev_parent_ops *ops);
extern void mdev_unregister_device(struct device *dev);
extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
extern void mdev_unregister_driver(struct mdev_driver *drv);
+extern struct device *mdev_parent_dev(struct mdev_device *mdev);
+extern struct device *mdev_dev(struct mdev_device *mdev);
+extern struct mdev_device *mdev_from_dev(struct device *dev);
+
#endif /* MDEV_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index bf9d1d7..ca08ab1 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -10,6 +10,7 @@
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/mod_devicetable.h>
struct mii_bus;
@@ -29,6 +30,7 @@ struct mdio_device {
const struct dev_pm_ops *pm_ops;
struct mii_bus *bus;
+ char modalias[MDIO_NAME_SIZE];
int (*bus_match)(struct device *dev, struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
@@ -71,6 +73,7 @@ int mdio_device_register(struct mdio_device *mdiodev);
void mdio_device_remove(struct mdio_device *mdiodev);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
+int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
static inline bool mdio_phy_id_is_c45(int phy_id)
{
@@ -130,6 +133,10 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
struct ethtool_cmd *ecmd,
u32 npage_adv, u32 npage_lpa);
+extern void
+mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
+ struct ethtool_link_ksettings *cmd,
+ u32 npage_adv, u32 npage_lpa);
/**
* mdio45_ethtool_gset - get settings for ETHTOOL_GSET
@@ -147,6 +154,23 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
}
+/**
+ * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
+ * @mdio: MDIO interface
+ * @cmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. Use
+ * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits
+ * from next pages.
+ */
+static inline void
+mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio,
+ struct ethtool_link_ksettings *cmd)
+{
+ mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0);
+}
+
extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
struct mii_ioctl_data *mii_data, int cmd);
@@ -244,7 +268,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
- * module_mdio_driver() - Helper macro for registering mdio drivers
+ * mdio_module_driver() - Helper macro for registering mdio drivers
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 61d20c1..2546988 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
- unsigned long lru_size[NR_LRU_LISTS];
+ unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int nr_pages);
+ int zid, int nr_pages);
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask);
@@ -441,9 +441,23 @@ static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
struct mem_cgroup_per_node *mz;
+ unsigned long nr_pages = 0;
+ int zid;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_size[lru];
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ nr_pages += mz->lru_zone_size[zid][lru];
+ return nr_pages;
+}
+
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
+{
+ struct mem_cgroup_per_node *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ return mz->lru_zone_size[zone_idx][lru];
}
void mem_cgroup_handle_over_high(void);
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
+{
+ return 0;
+}
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fa..134a2f6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned long *valid_start, unsigned long *valid_end);
extern void __offline_isolated_pages(unsigned long, unsigned long);
typedef void (*online_page_callback_t)(struct page *page);
@@ -284,7 +285,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+ enum zone_type target, int *zone_shift);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index a4860bc..f848ee8 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -13,7 +13,7 @@
#include <linux/regmap.h>
-enum {
+enum axp20x_variants {
AXP152_ID = 0,
AXP202_ID,
AXP209_ID,
@@ -532,35 +532,6 @@ struct axp20x_dev {
const struct regmap_irq_chip *regmap_irq_chip;
};
-#define BATTID_LEN 64
-#define OCV_CURVE_SIZE 32
-#define MAX_THERM_CURVE_SIZE 25
-#define PD_DEF_MIN_TEMP 0
-#define PD_DEF_MAX_TEMP 55
-
-struct axp20x_fg_pdata {
- char battid[BATTID_LEN + 1];
- int design_cap;
- int min_volt;
- int max_volt;
- int max_temp;
- int min_temp;
- int cap1;
- int cap0;
- int rdc1;
- int rdc0;
- int ocv_curve[OCV_CURVE_SIZE];
- int tcsz;
- int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
-};
-
-struct axp20x_chrg_pdata {
- int max_cc;
- int max_cv;
- int def_cc;
- int def_cv;
-};
-
struct axp288_extcon_pdata {
/* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
struct gpio_desc *gpio_mux_cntl;
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 1683003..098c350 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1441,7 +1441,8 @@ enum motionsensor_type {
MOTIONSENSE_TYPE_PROX = 3,
MOTIONSENSE_TYPE_LIGHT = 4,
MOTIONSENSE_TYPE_ACTIVITY = 5,
- MOTIONSENSE_TYPE_MAX
+ MOTIONSENSE_TYPE_BARO = 6,
+ MOTIONSENSE_TYPE_MAX,
};
/* List of motion sensor locations. */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 2b300b4..fba8fcb 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,6 +20,8 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
+#include <linux/platform_data/intel-spi.h>
+
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
@@ -40,6 +42,7 @@ struct lpc_ich_info {
char name[32];
unsigned int iTCO_version;
unsigned int gpio_version;
+ enum intel_spi_type spi_type;
u8 use_gpio;
};
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
new file mode 100644
index 0000000..d030004
--- /dev/null
+++ b/include/linux/mfd/stm32-timers.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) STMicroelectronics 2016
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _LINUX_STM32_GPTIMER_H_
+#define _LINUX_STM32_GPTIMER_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#define TIM_CR1 0x00 /* Control Register 1 */
+#define TIM_CR2 0x04 /* Control Register 2 */
+#define TIM_SMCR 0x08 /* Slave mode control reg */
+#define TIM_DIER 0x0C /* DMA/interrupt register */
+#define TIM_SR 0x10 /* Status register */
+#define TIM_EGR 0x14 /* Event Generation Reg */
+#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
+#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
+#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_PSC 0x28 /* Prescaler */
+#define TIM_ARR 0x2c /* Auto-Reload Register */
+#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
+#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */
+#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
+#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
+#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+
+#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
+#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
+#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
+#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
+#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
+#define TIM_EGR_UG BIT(0) /* Update Generation */
+#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
+#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
+#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
+#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
+#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
+#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
+#define TIM_BDTR_BKE BIT(12) /* Break input enable */
+#define TIM_BDTR_BKP BIT(13) /* Break input polarity */
+#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
+#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
+#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19))
+#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23))
+#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */
+#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */
+
+#define MAX_TIM_PSC 0xFFFF
+#define TIM_CR2_MMS_SHIFT 4
+#define TIM_SMCR_TS_SHIFT 4
+#define TIM_BDTR_BKF_MASK 0xF
+#define TIM_BDTR_BKF_SHIFT 16
+#define TIM_BDTR_BK2F_SHIFT 20
+
+struct stm32_timers {
+ struct clk *clk;
+ struct regmap *regmap;
+ u32 max_arr;
+};
+#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index fba44ab..a1520d8 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -94,10 +94,8 @@
*/
#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
-/*
- * Some controllers needs to set 1 on SDIO status reserved bits
- */
-#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
+/* Controller has some SDIO status bits which must be 1 */
+#define TMIO_MMC_SDIO_STATUS_SETBITS (1 << 8)
/*
* Some controllers have a 32-bit wide data port register
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e..f541da6 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
+#define PHY_ID_KSZ8795 0x00221550
+
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ed30d5d..0590263 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -22,6 +22,7 @@
/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
#define WATCHDOG_MINOR 130 /* Watchdog timer */
#define TEMP_MINOR 131 /* Temperature Sensor */
+#define APM_MINOR_DEV 134
#define RTC_MINOR 135
#define EFI_RTC_MINOR 136 /* EFI Time services */
#define VHCI_MINOR 137
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 93bdb34..7e66e4f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
@@ -1384,6 +1385,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
@@ -1537,8 +1540,13 @@ enum mlx4_ptys_proto {
MLX4_PTYS_EN = 1<<2,
};
+enum mlx4_ptys_flags {
+ MLX4_PTYS_AN_DISABLE_CAP = 1 << 5,
+ MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6,
+};
+
struct mlx4_ptys_reg {
- u8 resrvd1;
+ u8 flags;
u8 local_port;
u8 resrvd2;
u8 proto_mask;
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 7c3c0d3..9589884 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -42,13 +42,13 @@ struct mlx5_core_cq {
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
+ struct mlx5_uars_page *uar;
atomic_t refcount;
struct completion free;
unsigned vector;
unsigned int irqn;
void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
- struct mlx5_uar *uar;
u32 cons_index;
unsigned arm_sn;
struct mlx5_rsc_debug *dbg;
@@ -144,7 +144,6 @@ enum {
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page,
- spinlock_t *doorbell_lock,
u32 cons_index)
{
__be32 doorbell[2];
@@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn);
- mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
}
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 9f48936..dd9a263 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -67,10 +67,11 @@
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
+ u32 _v = v; \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
- (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+ (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
@@ -212,10 +213,20 @@ enum {
};
enum {
- MLX5_BF_REGS_PER_PAGE = 4,
- MLX5_MAX_UAR_PAGES = 1 << 8,
- MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
- MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
+ MLX5_ADAPTER_PAGE_SHIFT = 12,
+ MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
+};
+
+enum {
+ MLX5_BFREGS_PER_UAR = 4,
+ MLX5_MAX_UARS = 1 << 8,
+ MLX5_NON_FP_BFREGS_PER_UAR = 2,
+ MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
+ MLX5_NON_FP_BFREGS_PER_UAR,
+ MLX5_MAX_BFREGS = MLX5_MAX_UARS *
+ MLX5_NON_FP_BFREGS_PER_UAR,
+ MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
+ MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
};
enum {
@@ -279,6 +290,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
@@ -389,11 +401,6 @@ enum {
};
enum {
- MLX5_ADAPTER_PAGE_SHIFT = 12,
- MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
-};
-
-enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
@@ -534,7 +541,9 @@ struct mlx5_eqe_page_fault {
__be16 wqe_index;
u16 reserved2;
__be16 packet_length;
- u8 reserved3[12];
+ __be32 token;
+ u8 reserved4[8];
+ __be32 pftype_wq;
} __packed wqe;
struct {
__be32 r_key;
@@ -542,9 +551,9 @@ struct mlx5_eqe_page_fault {
__be16 packet_length;
__be32 rdma_op_len;
__be64 rdma_va;
+ __be32 pftype_token;
} __packed rdma;
} __packed;
- __be32 flags_qpn;
} __packed;
struct mlx5_eqe_vport_change {
@@ -562,6 +571,22 @@ struct mlx5_eqe_port_module {
u8 error_type;
} __packed;
+struct mlx5_eqe_pps {
+ u8 rsvd0[3];
+ u8 pin;
+ u8 rsvd1[4];
+ union {
+ struct {
+ __be32 time_sec;
+ __be32 time_nsec;
+ };
+ struct {
+ __be64 time_stamp;
+ };
+ };
+ u8 rsvd2[12];
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -576,6 +601,7 @@ union ev_data {
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
+ struct mlx5_eqe_pps pps;
} __packed;
struct mlx5_eqe {
@@ -945,38 +971,54 @@ enum mlx5_cap_type {
MLX5_CAP_NUM
};
+enum mlx5_pcam_reg_groups {
+ MLX5_PCAM_REGS_5000_TO_507F = 0x0,
+};
+
+enum mlx5_pcam_feature_groups {
+ MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
+enum mlx5_mcam_reg_groups {
+ MLX5_MCAM_REGS_FIRST_128 = 0x0,
+};
+
+enum mlx5_mcam_feature_groups {
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -998,11 +1040,11 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1024,21 +1066,27 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
- mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+ mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
+ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+
+#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1068,12 +1116,12 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
enum {
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
- MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index afc78a3..0787de2 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
{
unsigned long flags;
- spin_lock_irqsave(doorbell_lock, flags);
+ if (doorbell_lock)
+ spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
- spin_unlock_irqrestore(doorbell_lock, flags);
+ if (doorbell_lock)
+ spin_unlock_irqrestore(doorbell_lock, flags);
}
#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 0ae5536..1bc4641 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -42,6 +42,7 @@
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
+#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/mlx5/device.h>
@@ -83,6 +84,7 @@ enum {
MLX5_EQ_VEC_PAGES = 0,
MLX5_EQ_VEC_CMD = 1,
MLX5_EQ_VEC_ASYNC = 2,
+ MLX5_EQ_VEC_PFAULT = 3,
MLX5_EQ_VEC_COMP_BASE,
};
@@ -119,11 +121,15 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MPCNT = 0x9051,
+ MLX5_REG_MTPPS = 0x9053,
+ MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MCAM = 0x907f,
};
enum mlx5_dcbx_oper_mode {
@@ -171,6 +177,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_PKEY_CHANGE,
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
+ MLX5_DEV_EVENT_PPS,
};
enum mlx5_port_status {
@@ -178,36 +185,26 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-struct mlx5_uuar_info {
- struct mlx5_uar *uars;
- int num_uars;
- int num_low_latency_uuars;
- unsigned long *bitmap;
+enum mlx5_eq_type {
+ MLX5_EQ_TYPE_COMP,
+ MLX5_EQ_TYPE_ASYNC,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ MLX5_EQ_TYPE_PF,
+#endif
+};
+
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
unsigned int *count;
- struct mlx5_bf *bfs;
/*
- * protect uuar allocation data structs
+ * protect bfreg allocation data structs
*/
struct mutex lock;
u32 ver;
-};
-
-struct mlx5_bf {
- void __iomem *reg;
- void __iomem *regreg;
- int buf_size;
- struct mlx5_uar *uar;
- unsigned long offset;
- int need_lock;
- /* protect blue flame buffer selection when needed
- */
- spinlock_t lock;
-
- /* serialize 64 bit writes when done as two 32 bit accesses
- */
- spinlock_t lock32;
- int uuarn;
+ bool lib_uar_4k;
+ u32 num_sys_pages;
};
struct mlx5_cmd_first {
@@ -333,6 +330,14 @@ struct mlx5_eq_tasklet {
spinlock_t lock;
};
+struct mlx5_eq_pagefault {
+ struct work_struct work;
+ /* Pagefaults lock */
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ mempool_t *pool;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
__be32 __iomem *doorbell;
@@ -346,7 +351,13 @@ struct mlx5_eq {
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
- struct mlx5_eq_tasklet tasklet_ctx;
+ enum mlx5_eq_type type;
+ union {
+ struct mlx5_eq_tasklet tasklet_ctx;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct mlx5_eq_pagefault pf_ctx;
+#endif
+ };
};
struct mlx5_core_psv {
@@ -370,13 +381,21 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
+enum {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+};
+
struct mlx5_core_mkey {
u64 iova;
u64 size;
u32 key;
u32 pd;
+ u32 type;
};
+#define MLX5_24BIT_MASK ((1 << 24) - 1)
+
enum mlx5_res_type {
MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
@@ -411,20 +430,47 @@ struct mlx5_eq_table {
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct mlx5_eq pfault_eq;
+#endif
int num_comp_vectors;
/* protect EQs list
*/
spinlock_t lock;
};
-struct mlx5_uar {
- u32 index;
- struct list_head bf_list;
- unsigned free_bf_bmap;
- void __iomem *bf_map;
+struct mlx5_uars_page {
void __iomem *map;
+ bool wc;
+ u32 index;
+ struct list_head list;
+ unsigned int bfregs;
+ unsigned long *reg_bitmap; /* for non fast path bf regs */
+ unsigned long *fp_bitmap;
+ unsigned int reg_avail;
+ unsigned int fp_avail;
+ struct kref ref_count;
+ struct mlx5_core_dev *mdev;
+};
+
+struct mlx5_bfreg_head {
+ /* protect blue flame registers allocations */
+ struct mutex lock;
+ struct list_head list;
+};
+
+struct mlx5_bfreg_data {
+ struct mlx5_bfreg_head reg_head;
+ struct mlx5_bfreg_head wc_head;
};
+struct mlx5_sq_bfreg {
+ void __iomem *map;
+ struct mlx5_uars_page *up;
+ bool wc;
+ u32 index;
+ unsigned int offset;
+};
struct mlx5_core_health {
struct health_buffer __iomem *health;
@@ -497,6 +543,7 @@ struct mlx5_fc_stats {
struct mlx5_eswitch;
struct mlx5_lag;
+struct mlx5_pagefault;
struct mlx5_rl_entry {
u32 rate;
@@ -543,8 +590,6 @@ struct mlx5_priv {
struct mlx5_eq_table eq_table;
struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
- struct mlx5_uuar_info uuari;
- MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
/* pages stuff */
struct workqueue_struct *pg_wq;
@@ -601,6 +646,16 @@ struct mlx5_priv {
struct mlx5_rl_table rl_table;
struct mlx5_port_module_event_stats pme_stats;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ void (*pfault)(struct mlx5_core_dev *dev,
+ void *context,
+ struct mlx5_pagefault *pfault);
+ void *pfault_ctx;
+ struct srcu_struct pfault_srcu;
+#endif
+ struct mlx5_bfreg_data bfregs;
+ struct mlx5_uars_page *uar;
};
enum mlx5_device_state {
@@ -619,13 +674,56 @@ enum mlx5_pci_status {
MLX5_PCI_STATUS_ENABLED,
};
+enum mlx5_pagefault_type_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u32 token;
+ u8 event_subtype;
+ u8 type;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * Number of resource holding WQE, depends on type.
+ */
+ u32 wq_num;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+
+ struct mlx5_eq *eq;
+ struct work_struct work;
+};
+
struct mlx5_td {
struct list_head tirs_list;
u32 tdn;
};
struct mlx5e_resources {
- struct mlx5_uar cq_uar;
u32 pdn;
struct mlx5_td td;
struct mlx5_core_mkey mkey;
@@ -640,8 +738,12 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
- u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct {
+ u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
+ u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ } caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
@@ -815,11 +917,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
- bool map_wc);
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -879,15 +976,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name, struct mlx5_uar *uar);
+ int nent, u64 mask, const char *name,
+ enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
@@ -926,12 +1021,19 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u8 port_num, void *out, size_t sz);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+ u32 wq_num, u8 type, int error);
+#endif
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path);
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -959,7 +1061,7 @@ enum {
};
enum {
- MAX_MR_CACHE_ENTRIES = 16,
+ MAX_MR_CACHE_ENTRIES = 21,
};
enum {
@@ -974,6 +1076,9 @@ struct mlx5_interface {
void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
+ void (*pfault)(struct mlx5_core_dev *dev,
+ void *context,
+ struct mlx5_pagefault *pfault);
void * (*get_dev)(void *context);
int protocol;
struct list_head list;
@@ -988,6 +1093,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
struct mlx5_profile {
u64 mask;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 57bec54..afcd473 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 receive[0x1];
u8 write[0x1];
u8 read[0x1];
- u8 reserved_at_4[0x1];
+ u8 atomic[0x1];
u8 srq_receive[0x1];
u8 reserved_at_6[0x1a];
};
@@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 ip_protocol[0x8];
u8 ip_dscp[0x6];
u8 ip_ecn[0x2];
- u8 vlan_tag[0x1];
- u8 reserved_at_91[0x1];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
u8 frag[0x1];
u8 reserved_at_93[0x4];
u8 tcp_flags[0x9];
@@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0xc];
- u8 outer_second_vlan_tag[0x1];
- u8 inner_second_vlan_tag[0x1];
- u8 reserved_at_62[0xe];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
u8 gre_key_h[0x18];
@@ -545,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1];
u8 esw_scheduling[0x1];
- u8 reserved_at_2[0x1e];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 reserved_at_4[0x1c];
u8 reserved_at_20[0x20];
@@ -573,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 lro_cap[0x1];
u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1];
- u8 reserved_at_5[0x3];
+ u8 reserved_at_5[0x2];
+ u8 wqe_vlan_insert[0x1];
u8 self_lb_en_modifiable[0x1];
u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5];
@@ -782,11 +787,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
- u8 reserved_at_108[0x1];
+ u8 fixed_buffer_size[0x1];
u8 log_max_mrw_sz[0x7];
u8 reserved_at_110[0x2];
u8 log_max_bsf_list_size[0x6];
- u8 reserved_at_118[0x2];
+ u8 umr_extended_translation_offset[0x1];
+ u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
u8 reserved_at_120[0xa];
@@ -799,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
- u8 pad_cap[0x1];
+ u8 end_pad[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_at_163[0xd];
+ u8 start_pad[0x1];
+ u8 cache_line_128byte[0x1];
+ u8 reserved_at_163[0xb];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
@@ -823,18 +831,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
u8 early_vf_enable[0x1];
- u8 reserved_at_1a9[0x2];
+ u8 mcam_reg[0x1];
+ u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
- u8 reserved_at_1b0[0x1];
+ u8 reserved_at_1b1[0x1];
u8 ports_check[0x1];
- u8 reserved_at_1b2[0x1];
+ u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
u8 beacon_led[0x1];
u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_at_1c0[0x3];
+ u8 reserved_at_1c0[0x1];
+ u8 pps[0x1];
+ u8 pps_modify[0x1];
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
@@ -858,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 compact_address_vector[0x1];
u8 striding_rq[0x1];
- u8 reserved_at_201[0x2];
+ u8 reserved_at_202[0x2];
u8 ipoib_basic_offloads[0x1];
u8 reserved_at_205[0xa];
u8 drain_sigerr[0x1];
@@ -904,7 +915,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_at_240[0xa];
+ u8 uar_4k[0x1];
+ u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
u8 reserved_at_250[0x8];
u8 log_pg_sz[0x8];
@@ -996,7 +1008,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20];
- u8 reserved_at_500[0x80];
+ u8 reserved_at_500[0x20];
+ u8 num_of_uars_per_page[0x20];
+ u8 reserved_at_540[0x40];
u8 reserved_at_580[0x3f];
u8 cqe_compression[0x1];
@@ -1009,10 +1023,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
u8 log_tag_matching_list_sz[0x5];
- u8 reserved_at_5e8[0x3];
+ u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_5f0[0x200];
+ u8 reserved_at_600[0x200];
};
enum mlx5_flow_destination_type {
@@ -1375,6 +1389,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
u8 reserved_at_640[0x180];
};
+struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 phy_received_bits_high[0x20];
+
+ u8 phy_received_bits_low[0x20];
+
+ u8 phy_symbol_errors_high[0x20];
+
+ u8 phy_symbol_errors_low[0x20];
+
+ u8 phy_corrected_bits_high[0x20];
+
+ u8 phy_corrected_bits_low[0x20];
+
+ u8 phy_corrected_bits_lane0_high[0x20];
+
+ u8 phy_corrected_bits_lane0_low[0x20];
+
+ u8 phy_corrected_bits_lane1_high[0x20];
+
+ u8 phy_corrected_bits_lane1_low[0x20];
+
+ u8 phy_corrected_bits_lane2_high[0x20];
+
+ u8 phy_corrected_bits_lane2_low[0x20];
+
+ u8 phy_corrected_bits_lane3_high[0x20];
+
+ u8 phy_corrected_bits_lane3_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
+};
+
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 symbol_error_counter[0x10];
@@ -1781,56 +1831,6 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 reserved_at_140[0x680];
};
-struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
- u8 life_time_counter_high[0x20];
-
- u8 life_time_counter_low[0x20];
-
- u8 time_to_boot_image_start[0x20];
-
- u8 time_to_link_image[0x20];
-
- u8 calibration_time[0x20];
-
- u8 time_to_first_perst[0x20];
-
- u8 time_to_detect_state[0x20];
-
- u8 time_to_l0[0x20];
-
- u8 time_to_crs_en[0x20];
-
- u8 time_to_plastic_image_start[0x20];
-
- u8 time_to_iron_image_start[0x20];
-
- u8 perst_handler[0x20];
-
- u8 times_in_l1[0x20];
-
- u8 times_in_l23[0x20];
-
- u8 dl_down[0x20];
-
- u8 config_cycle1usec[0x20];
-
- u8 config_cycle2to7usec[0x20];
-
- u8 config_cycle_8to15usec[0x20];
-
- u8 config_cycle_16_to_63usec[0x20];
-
- u8 config_cycle_64usec[0x20];
-
- u8 correctable_err_msg_sent[0x20];
-
- u8 non_fatal_err_msg_sent[0x20];
-
- u8 fatal_err_msg_sent[0x20];
-
- u8 reserved_at_2e0[0x4e0];
-};
-
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@@ -2569,6 +2569,7 @@ enum {
MLX5_MKC_ACCESS_MODE_PA = 0x0,
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
+ MLX5_MKC_ACCESS_MODE_KSM = 0x3,
};
struct mlx5_ifc_mkc_bits {
@@ -2992,12 +2993,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
- struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
@@ -3677,6 +3678,10 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
+
+ u8 null_mkey[0x20];
+
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -4769,12 +4774,11 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 error[0x1];
u8 reserved_at_41[0x4];
- u8 rdma[0x1];
- u8 read_write[0x1];
- u8 req_res[0x1];
- u8 qpn[0x18];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x8];
+ u8 token[0x18];
};
struct mlx5_ifc_nop_out_bits {
@@ -7561,6 +7565,63 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x7e];
+
+ u8 ppcnt_discard_group[0x1];
+ u8 ppcnt_statistical_group[0x1];
+};
+
+struct mlx5_ifc_pcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ union {
+ u8 reserved_at_0[0x80];
+ } port_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features;
+ u8 reserved_at_0[0x80];
+ } feature_cap_mask;
+
+ u8 reserved_at_1c0[0xc0];
+};
+
+struct mlx5_ifc_mcam_enhanced_features_bits {
+ u8 reserved_at_0[0x7f];
+
+ u8 pcie_performance_group[0x1];
+};
+
+struct mlx5_ifc_mcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ union {
+ u8 reserved_at_0[0x80];
+ } mng_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features;
+ u8 reserved_at_0[0x80];
+ } mng_feature_cap_mask;
+
+ u8 reserved_at_1c0[0x80];
+};
+
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
@@ -7905,6 +7966,60 @@ struct mlx5_ifc_initial_seg_bits {
u8 reserved_at_80a0[0x17fc0];
};
+struct mlx5_ifc_mtpps_reg_bits {
+ u8 reserved_at_0[0xc];
+ u8 cap_number_of_pps_pins[0x4];
+ u8 reserved_at_10[0x4];
+ u8 cap_max_num_of_pps_in_pins[0x4];
+ u8 reserved_at_18[0x4];
+ u8 cap_max_num_of_pps_out_pins[0x4];
+
+ u8 reserved_at_20[0x24];
+ u8 cap_pin_3_mode[0x4];
+ u8 reserved_at_48[0x4];
+ u8 cap_pin_2_mode[0x4];
+ u8 reserved_at_50[0x4];
+ u8 cap_pin_1_mode[0x4];
+ u8 reserved_at_58[0x4];
+ u8 cap_pin_0_mode[0x4];
+
+ u8 reserved_at_60[0x4];
+ u8 cap_pin_7_mode[0x4];
+ u8 reserved_at_68[0x4];
+ u8 cap_pin_6_mode[0x4];
+ u8 reserved_at_70[0x4];
+ u8 cap_pin_5_mode[0x4];
+ u8 reserved_at_78[0x4];
+ u8 cap_pin_4_mode[0x4];
+
+ u8 reserved_at_80[0x80];
+
+ u8 enable[0x1];
+ u8 reserved_at_101[0xb];
+ u8 pattern[0x4];
+ u8 reserved_at_110[0x4];
+ u8 pin_mode[0x4];
+ u8 pin[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 time_stamp[0x40];
+
+ u8 out_pulse_duration[0x10];
+ u8 out_periodic_adjustment[0x10];
+
+ u8 reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_mtppse_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 pin[0x8];
+ u8 event_arm[0x1];
+ u8 reserved_at_21[0x1b];
+ u8 event_generation_mode[0x4];
+ u8 reserved_at_40[0x40];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -7950,6 +8065,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
+ struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 0aacb2a..3096370 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -50,9 +50,6 @@
#define MLX5_BSF_APPTAG_ESCAPE 0x1
#define MLX5_BSF_APPREF_ESCAPE 0x2
-#define MLX5_QPN_BITS 24
-#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
-
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MLX5_QP_OPTPAR_RRE = 1 << 1,
@@ -215,6 +212,7 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+#define MLX5_WQE_AV_EXT 0x80000000
enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@@ -223,14 +221,26 @@ enum {
MLX5_ETH_WQE_L4_CSUM = 1 << 7,
};
+enum {
+ MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
+};
+
struct mlx5_wqe_eth_seg {
u8 rsvd0[4];
u8 cs_flags;
u8 rsvd1;
__be16 mss;
__be32 rsvd2;
- __be16 inline_hdr_sz;
- u8 inline_hdr_start[2];
+ union {
+ struct {
+ __be16 sz;
+ u8 start[2];
+ } inline_hdr;
+ struct {
+ __be16 type;
+ __be16 vlan_tci;
+ } insert;
+ };
};
struct mlx5_wqe_xrc_seg {
@@ -245,6 +255,23 @@ struct mlx5_wqe_masked_atomic_seg {
__be64 compare_mask;
};
+struct mlx5_base_av {
+ union {
+ struct {
+ __be32 qkey;
+ __be32 reserved;
+ } qkey;
+ __be64 dc_key;
+ } key;
+ __be32 dqp_dct;
+ u8 stat_rate_sl;
+ u8 fl_mlid;
+ union {
+ __be16 rlid;
+ __be16 udp_sport;
+ };
+};
+
struct mlx5_av {
union {
struct {
@@ -292,10 +319,14 @@ struct mlx5_wqe_data_seg {
struct mlx5_wqe_umr_ctrl_seg {
u8 flags;
u8 rsvd0[3];
- __be16 klm_octowords;
- __be16 bsf_octowords;
+ __be16 xlt_octowords;
+ union {
+ __be16 xlt_offset;
+ __be16 bsf_octowords;
+ };
__be64 mkey_mask;
- u8 rsvd1[32];
+ __be32 xlt_offset_47_16;
+ u8 rsvd1[28];
};
struct mlx5_seg_set_psv {
@@ -389,6 +420,10 @@ struct mlx5_bsf {
struct mlx5_bsf_inl m_inl;
};
+struct mlx5_mtt {
+ __be64 ptag;
+};
+
struct mlx5_klm {
__be32 bcount;
__be32 key;
@@ -410,46 +445,9 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
-enum mlx5_pagefault_flags {
- MLX5_PFAULT_REQUESTOR = 1 << 0,
- MLX5_PFAULT_WRITE = 1 << 1,
- MLX5_PFAULT_RDMA = 1 << 2,
-};
-
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u8 event_subtype;
- enum mlx5_pagefault_flags flags;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-};
-
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
- void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
@@ -549,10 +547,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
- u8 context, int error);
-#endif
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index ec35157..656c70b 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fe6b403..6ff66d6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
+#ifndef lm_alias
+#define lm_alias(x) __va(__pa_symbol(x))
+#endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
@@ -1210,8 +1214,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
- spinlock_t **ptlp);
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 71613e8..41d376e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+ mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}
diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h
deleted file mode 100644
index 23acc3b..0000000
--- a/include/linux/mmc/boot.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef LINUX_MMC_BOOT_H
-#define LINUX_MMC_BOOT_H
-
-enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT,
- MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE };
-
-#endif /* LINUX_MMC_BOOT_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 95d69d4..77e61e0 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -11,7 +11,6 @@
#define LINUX_MMC_CARD_H
#include <linux/device.h>
-#include <linux/mmc/core.h>
#include <linux/mod_devicetable.h>
struct mmc_cid {
@@ -84,6 +83,7 @@ struct mmc_ext_csd {
unsigned int hpi_cmd; /* cmd used as HPI */
bool bkops; /* background support bit */
bool man_bkops_en; /* manual bkops enable bit */
+ bool auto_bkops_en; /* auto bkops enable bit */
unsigned int data_sector_size; /* 512 bytes or 4KB */
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
@@ -121,6 +121,9 @@ struct mmc_ext_csd {
u8 raw_pwr_cl_ddr_200_360; /* 253 */
u8 raw_bkops_status; /* 246 */
u8 raw_sectors[4]; /* 212 - 4 bytes */
+ u8 pre_eol_info; /* 267 */
+ u8 device_life_time_est_typ_a; /* 268 */
+ u8 device_life_time_est_typ_b; /* 269 */
unsigned int feature_support;
#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */
@@ -203,7 +206,6 @@ struct sdio_cis {
};
struct mmc_host;
-struct mmc_ios;
struct sdio_func;
struct sdio_func_tuple;
@@ -247,13 +249,6 @@ struct mmc_card {
#define MMC_TYPE_SDIO 2 /* SDIO card */
#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */
unsigned int state; /* (our) card state */
-#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
-#define MMC_STATE_READONLY (1<<1) /* card is read-only */
-#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
-#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
-#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
-#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */
-#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -272,7 +267,6 @@ struct mmc_card {
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
-
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
@@ -308,245 +302,13 @@ struct mmc_card {
unsigned int nr_parts;
};
-/*
- * This function fill contents in mmc_part.
- */
-static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
- unsigned int part_cfg, char *name, int idx, bool ro,
- int area_type)
-{
- card->part[card->nr_parts].size = size;
- card->part[card->nr_parts].part_cfg = part_cfg;
- sprintf(card->part[card->nr_parts].name, name, idx);
- card->part[card->nr_parts].force_ro = ro;
- card->part[card->nr_parts].area_type = area_type;
- card->nr_parts++;
-}
-
static inline bool mmc_large_sector(struct mmc_card *card)
{
return card->ext_csd.data_sector_size == 4096;
}
-/*
- * The world is not perfect and supplies us with broken mmc/sdio devices.
- * For at least some of these bugs we need a work-around.
- */
-
-struct mmc_fixup {
- /* CID-specific fields. */
- const char *name;
-
- /* Valid revision range */
- u64 rev_start, rev_end;
-
- unsigned int manfid;
- unsigned short oemid;
-
- /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
- u16 cis_vendor, cis_device;
-
- /* for MMC cards */
- unsigned int ext_csd_rev;
-
- void (*vendor_fixup)(struct mmc_card *card, int data);
- int data;
-};
-
-#define CID_MANFID_ANY (-1u)
-#define CID_OEMID_ANY ((unsigned short) -1)
-#define CID_NAME_ANY (NULL)
-
-#define EXT_CSD_REV_ANY (-1u)
-
-#define CID_MANFID_SANDISK 0x2
-#define CID_MANFID_TOSHIBA 0x11
-#define CID_MANFID_MICRON 0x13
-#define CID_MANFID_SAMSUNG 0x15
-#define CID_MANFID_KINGSTON 0x70
-#define CID_MANFID_HYNIX 0x90
-
-#define END_FIXUP { NULL }
-
-#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
- _cis_vendor, _cis_device, \
- _fixup, _data, _ext_csd_rev) \
- { \
- .name = (_name), \
- .manfid = (_manfid), \
- .oemid = (_oemid), \
- .rev_start = (_rev_start), \
- .rev_end = (_rev_end), \
- .cis_vendor = (_cis_vendor), \
- .cis_device = (_cis_device), \
- .vendor_fixup = (_fixup), \
- .data = (_data), \
- .ext_csd_rev = (_ext_csd_rev), \
- }
-
-#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
- _fixup, _data, _ext_csd_rev) \
- _FIXUP_EXT(_name, _manfid, \
- _oemid, _rev_start, _rev_end, \
- SDIO_ANY_ID, SDIO_ANY_ID, \
- _fixup, _data, _ext_csd_rev) \
-
-#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
- MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
- EXT_CSD_REV_ANY)
-
-#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
- _ext_csd_rev) \
- MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
- _ext_csd_rev)
-
-#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
- _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
- CID_OEMID_ANY, 0, -1ull, \
- _vendor, _device, \
- _fixup, _data, EXT_CSD_REV_ANY) \
-
-#define cid_rev(hwrev, fwrev, year, month) \
- (((u64) hwrev) << 40 | \
- ((u64) fwrev) << 32 | \
- ((u64) year) << 16 | \
- ((u64) month))
-
-#define cid_rev_card(card) \
- cid_rev(card->cid.hwrev, \
- card->cid.fwrev, \
- card->cid.year, \
- card->cid.month)
-
-/*
- * Unconditionally quirk add/remove.
- */
-
-static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
-{
- card->quirks |= data;
-}
-
-static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
-{
- card->quirks &= ~data;
-}
-
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
-#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
-#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
-#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
-#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
-#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
-#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
-#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
-
-#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
-#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
-#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
-#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
-#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
-#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
-#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
-#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
-#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
-
-/*
- * Quirk add/remove for MMC products.
- */
-
-static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
-{
- if (mmc_card_mmc(card))
- card->quirks |= data;
-}
-
-static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
- int data)
-{
- if (mmc_card_mmc(card))
- card->quirks &= ~data;
-}
-
-/*
- * Quirk add/remove for SD products.
- */
-
-static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
-{
- if (mmc_card_sd(card))
- card->quirks |= data;
-}
-
-static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
- int data)
-{
- if (mmc_card_sd(card))
- card->quirks &= ~data;
-}
-
-static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_LENIENT_FN0;
-}
-
-static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-}
-
-static inline int mmc_card_disable_cd(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_DISABLE_CD;
-}
-
-static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
-}
-
-static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
-}
-
-static inline int mmc_card_long_read_time(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_LONG_READ_TIME;
-}
-
-static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
-}
-
-static inline int mmc_card_broken_hpi(const struct mmc_card *c)
-{
- return c->quirks & MMC_QUIRK_BROKEN_HPI;
-}
-
-#define mmc_card_name(c) ((c)->cid.prod_name)
-#define mmc_card_id(c) (dev_name(&(c)->dev))
-
-#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
-
-/*
- * MMC device driver (e.g., Flash card, I/O card...)
- */
-struct mmc_driver {
- struct device_driver drv;
- int (*probe)(struct mmc_card *);
- void (*remove)(struct mmc_card *);
- void (*shutdown)(struct mmc_card *);
-};
-
-extern int mmc_register_driver(struct mmc_driver *);
-extern void mmc_unregister_driver(struct mmc_driver *);
-
-extern void mmc_fixup_device(struct mmc_card *card,
- const struct mmc_fixup *table);
-
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index e33cc74..a0c63ea 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -8,10 +8,9 @@
#ifndef LINUX_MMC_CORE_H
#define LINUX_MMC_CORE_H
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/types.h>
-struct request;
struct mmc_data;
struct mmc_request;
@@ -159,79 +158,14 @@ struct mmc_request {
struct mmc_card;
struct mmc_async_req;
-extern int mmc_stop_bkops(struct mmc_card *);
-extern int mmc_read_bkops_status(struct mmc_card *);
-extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
- struct mmc_async_req *,
- enum mmc_blk_status *);
-extern int mmc_interrupt_hpi(struct mmc_card *);
-extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
-extern void mmc_wait_for_req_done(struct mmc_host *host,
- struct mmc_request *mrq);
-extern bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
-extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
-extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
-extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
- struct mmc_command *, int);
-extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
-extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
-extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
-
-#define MMC_ERASE_ARG 0x00000000
-#define MMC_SECURE_ERASE_ARG 0x80000000
-#define MMC_TRIM_ARG 0x00000001
-#define MMC_DISCARD_ARG 0x00000003
-#define MMC_SECURE_TRIM1_ARG 0x80000001
-#define MMC_SECURE_TRIM2_ARG 0x80008000
-
-#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
-
-extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
- unsigned int arg);
-extern int mmc_can_erase(struct mmc_card *card);
-extern int mmc_can_trim(struct mmc_card *card);
-extern int mmc_can_discard(struct mmc_card *card);
-extern int mmc_can_sanitize(struct mmc_card *card);
-extern int mmc_can_secure_erase_trim(struct mmc_card *card);
-extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
- unsigned int nr);
-extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
-
-extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
-extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
- bool is_rel_write);
-extern int mmc_hw_reset(struct mmc_host *host);
-extern int mmc_can_reset(struct mmc_card *card);
-
-extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
-extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
-
-extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
-extern void mmc_release_host(struct mmc_host *host);
-
-extern void mmc_get_card(struct mmc_card *card);
-extern void mmc_put_card(struct mmc_card *card);
-
-extern int mmc_flush_cache(struct mmc_card *);
-
-extern int mmc_detect_card_removed(struct mmc_host *host);
-
-/**
- * mmc_claim_host - exclusively claim a host
- * @host: mmc host to claim
- *
- * Claim a host for a set of operations.
- */
-static inline void mmc_claim_host(struct mmc_host *host)
-{
- __mmc_claim_host(host, NULL);
-}
-
-struct device_node;
-extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
-extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
+struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
+ struct mmc_async_req *areq,
+ enum mmc_blk_status *ret_stat);
+void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ int retries);
+
+int mmc_hw_reset(struct mmc_host *host);
+void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
deleted file mode 100644
index 15db6f8..0000000
--- a/include/linux/mmc/dw_mmc.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Synopsys DesignWare Multimedia Card Interface driver
- * (Based on NXP driver for lpc 31xx)
- *
- * Copyright (C) 2009 NXP Semiconductors
- * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef LINUX_MMC_DW_MMC_H
-#define LINUX_MMC_DW_MMC_H
-
-#include <linux/scatterlist.h>
-#include <linux/mmc/core.h>
-#include <linux/dmaengine.h>
-#include <linux/reset.h>
-
-#define MAX_MCI_SLOTS 2
-
-enum dw_mci_state {
- STATE_IDLE = 0,
- STATE_SENDING_CMD,
- STATE_SENDING_DATA,
- STATE_DATA_BUSY,
- STATE_SENDING_STOP,
- STATE_DATA_ERROR,
- STATE_SENDING_CMD11,
- STATE_WAITING_CMD11_DONE,
-};
-
-enum {
- EVENT_CMD_COMPLETE = 0,
- EVENT_XFER_COMPLETE,
- EVENT_DATA_COMPLETE,
- EVENT_DATA_ERROR,
-};
-
-enum dw_mci_cookie {
- COOKIE_UNMAPPED,
- COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */
- COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */
-};
-
-struct mmc_data;
-
-enum {
- TRANS_MODE_PIO = 0,
- TRANS_MODE_IDMAC,
- TRANS_MODE_EDMAC
-};
-
-struct dw_mci_dma_slave {
- struct dma_chan *ch;
- enum dma_transfer_direction direction;
-};
-
-/**
- * struct dw_mci - MMC controller state shared between all slots
- * @lock: Spinlock protecting the queue and associated data.
- * @irq_lock: Spinlock protecting the INTMASK setting.
- * @regs: Pointer to MMIO registers.
- * @fifo_reg: Pointer to MMIO registers for data FIFO
- * @sg: Scatterlist entry currently being processed by PIO code, if any.
- * @sg_miter: PIO mapping scatterlist iterator.
- * @cur_slot: The slot which is currently using the controller.
- * @mrq: The request currently being processed on @cur_slot,
- * or NULL if the controller is idle.
- * @cmd: The command currently being sent to the card, or NULL.
- * @data: The data currently being transferred, or NULL if no data
- * transfer is in progress.
- * @stop_abort: The command currently prepared for stoping transfer.
- * @prev_blksz: The former transfer blksz record.
- * @timing: Record of current ios timing.
- * @use_dma: Whether DMA channel is initialized or not.
- * @using_dma: Whether DMA is in use for the current transfer.
- * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
- * @sg_dma: Bus address of DMA buffer.
- * @sg_cpu: Virtual address of DMA buffer.
- * @dma_ops: Pointer to platform-specific DMA callbacks.
- * @cmd_status: Snapshot of SR taken upon completion of the current
- * @ring_size: Buffer size for idma descriptors.
- * command. Only valid when EVENT_CMD_COMPLETE is pending.
- * @dms: structure of slave-dma private data.
- * @phy_regs: physical address of controller's register map
- * @data_status: Snapshot of SR taken upon completion of the current
- * data transfer. Only valid when EVENT_DATA_COMPLETE or
- * EVENT_DATA_ERROR is pending.
- * @stop_cmdr: Value to be loaded into CMDR when the stop command is
- * to be sent.
- * @dir_status: Direction of current transfer.
- * @tasklet: Tasklet running the request state machine.
- * @pending_events: Bitmask of events flagged by the interrupt handler
- * to be processed by the tasklet.
- * @completed_events: Bitmask of events which the state machine has
- * processed.
- * @state: Tasklet state.
- * @queue: List of slots waiting for access to the controller.
- * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
- * rate and timeout calculations.
- * @current_speed: Configured rate of the controller.
- * @num_slots: Number of slots available.
- * @fifoth_val: The value of FIFOTH register.
- * @verid: Denote Version ID.
- * @dev: Device associated with the MMC controller.
- * @pdata: Platform data associated with the MMC controller.
- * @drv_data: Driver specific data for identified variant of the controller
- * @priv: Implementation defined private data.
- * @biu_clk: Pointer to bus interface unit clock instance.
- * @ciu_clk: Pointer to card interface unit clock instance.
- * @slot: Slots sharing this MMC controller.
- * @fifo_depth: depth of FIFO.
- * @data_shift: log2 of FIFO item size.
- * @part_buf_start: Start index in part_buf.
- * @part_buf_count: Bytes of partial data in part_buf.
- * @part_buf: Simple buffer for partial fifo reads/writes.
- * @push_data: Pointer to FIFO push function.
- * @pull_data: Pointer to FIFO pull function.
- * @vqmmc_enabled: Status of vqmmc, should be true or false.
- * @irq_flags: The flags to be passed to request_irq.
- * @irq: The irq value to be passed to request_irq.
- * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
- * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
- * @dto_timer: Timer for broken data transfer over scheme.
- *
- * Locking
- * =======
- *
- * @lock is a softirq-safe spinlock protecting @queue as well as
- * @cur_slot, @mrq and @state. These must always be updated
- * at the same time while holding @lock.
- *
- * @irq_lock is an irq-safe spinlock protecting the INTMASK register
- * to allow the interrupt handler to modify it directly. Held for only long
- * enough to read-modify-write INTMASK and no other locks are grabbed when
- * holding this one.
- *
- * The @mrq field of struct dw_mci_slot is also protected by @lock,
- * and must always be written at the same time as the slot is added to
- * @queue.
- *
- * @pending_events and @completed_events are accessed using atomic bit
- * operations, so they don't need any locking.
- *
- * None of the fields touched by the interrupt handler need any
- * locking. However, ordering is important: Before EVENT_DATA_ERROR or
- * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
- * interrupts must be disabled and @data_status updated with a
- * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
- * CMDRDY interrupt must be disabled and @cmd_status updated with a
- * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
- * bytes_xfered field of @data must be written. This is ensured by
- * using barriers.
- */
-struct dw_mci {
- spinlock_t lock;
- spinlock_t irq_lock;
- void __iomem *regs;
- void __iomem *fifo_reg;
-
- struct scatterlist *sg;
- struct sg_mapping_iter sg_miter;
-
- struct dw_mci_slot *cur_slot;
- struct mmc_request *mrq;
- struct mmc_command *cmd;
- struct mmc_data *data;
- struct mmc_command stop_abort;
- unsigned int prev_blksz;
- unsigned char timing;
-
- /* DMA interface members*/
- int use_dma;
- int using_dma;
- int dma_64bit_address;
-
- dma_addr_t sg_dma;
- void *sg_cpu;
- const struct dw_mci_dma_ops *dma_ops;
- /* For idmac */
- unsigned int ring_size;
-
- /* For edmac */
- struct dw_mci_dma_slave *dms;
- /* Registers's physical base address */
- resource_size_t phy_regs;
-
- u32 cmd_status;
- u32 data_status;
- u32 stop_cmdr;
- u32 dir_status;
- struct tasklet_struct tasklet;
- unsigned long pending_events;
- unsigned long completed_events;
- enum dw_mci_state state;
- struct list_head queue;
-
- u32 bus_hz;
- u32 current_speed;
- u32 num_slots;
- u32 fifoth_val;
- u16 verid;
- struct device *dev;
- struct dw_mci_board *pdata;
- const struct dw_mci_drv_data *drv_data;
- void *priv;
- struct clk *biu_clk;
- struct clk *ciu_clk;
- struct dw_mci_slot *slot[MAX_MCI_SLOTS];
-
- /* FIFO push and pull */
- int fifo_depth;
- int data_shift;
- u8 part_buf_start;
- u8 part_buf_count;
- union {
- u16 part_buf16;
- u32 part_buf32;
- u64 part_buf;
- };
- void (*push_data)(struct dw_mci *host, void *buf, int cnt);
- void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
-
- bool vqmmc_enabled;
- unsigned long irq_flags; /* IRQ flags */
- int irq;
-
- int sdio_id0;
-
- struct timer_list cmd11_timer;
- struct timer_list dto_timer;
-};
-
-/* DMA ops for Internal/External DMAC interface */
-struct dw_mci_dma_ops {
- /* DMA Ops */
- int (*init)(struct dw_mci *host);
- int (*start)(struct dw_mci *host, unsigned int sg_len);
- void (*complete)(void *host);
- void (*stop)(struct dw_mci *host);
- void (*cleanup)(struct dw_mci *host);
- void (*exit)(struct dw_mci *host);
-};
-
-struct dma_pdata;
-
-/* Board platform data */
-struct dw_mci_board {
- u32 num_slots;
-
- unsigned int bus_hz; /* Clock speed at the cclk_in pad */
-
- u32 caps; /* Capabilities */
- u32 caps2; /* More capabilities */
- u32 pm_caps; /* PM capabilities */
- /*
- * Override fifo depth. If 0, autodetect it from the FIFOTH register,
- * but note that this may not be reliable after a bootloader has used
- * it.
- */
- unsigned int fifo_depth;
-
- /* delay in mS before detecting cards after interrupt */
- u32 detect_delay_ms;
-
- struct reset_control *rstc;
- struct dw_mci_dma_ops *dma_ops;
- struct dma_pdata *data;
-};
-
-#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8bc8841..83f1c4a 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,16 +10,12 @@
#ifndef LINUX_MMC_HOST_H
#define LINUX_MMC_HOST_H
-#include <linux/leds.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
-#include <linux/mmc/mmc.h>
#include <linux/mmc/pm.h>
struct mmc_ios {
@@ -82,6 +78,8 @@ struct mmc_ios {
bool enhanced_strobe; /* hs400es selection */
};
+struct mmc_host;
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -162,9 +160,6 @@ struct mmc_host_ops {
unsigned int direction, int blk_size);
};
-struct mmc_card;
-struct device;
-
struct mmc_async_req {
/* active mmc request */
struct mmc_request *mrq;
@@ -264,17 +259,16 @@ struct mmc_host {
#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
-#define MMC_CAP_1_8V_DDR (1 << 11) /* can support */
- /* DDR mode at 1.8V */
-#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
- /* DDR mode at 1.2V */
-#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
-#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
-#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */
-#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */
-#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
-#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
-#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
+#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
+#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
+#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
+#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
+#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
+#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
+#define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */
+#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
+#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
+#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -397,11 +391,14 @@ struct mmc_host {
unsigned long private[0] ____cacheline_aligned;
};
+struct device_node;
+
struct mmc_host *mmc_alloc_host(int extra, struct device *);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
int mmc_of_parse(struct mmc_host *host);
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
static inline void *mmc_priv(struct mmc_host *host)
{
@@ -457,6 +454,7 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
}
#endif
+u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
int mmc_regulator_get_supply(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host)
@@ -474,56 +472,20 @@ static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
}
-static inline int mmc_host_cmd23(struct mmc_host *host)
-{
- return host->caps & MMC_CAP_CMD23;
-}
-
-static inline int mmc_boot_partition_access(struct mmc_host *host)
-{
- return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
-}
-
-static inline int mmc_host_uhs(struct mmc_host *host)
-{
- return host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50);
-}
-
+/* TODO: Move to private header */
static inline int mmc_card_hs(struct mmc_card *card)
{
return card->host->ios.timing == MMC_TIMING_SD_HS ||
card->host->ios.timing == MMC_TIMING_MMC_HS;
}
+/* TODO: Move to private header */
static inline int mmc_card_uhs(struct mmc_card *card)
{
return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 &&
card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
}
-static inline bool mmc_card_hs200(struct mmc_card *card)
-{
- return card->host->ios.timing == MMC_TIMING_MMC_HS200;
-}
-
-static inline bool mmc_card_ddr52(struct mmc_card *card)
-{
- return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
-}
-
-static inline bool mmc_card_hs400(struct mmc_card *card)
-{
- return card->host->ios.timing == MMC_TIMING_MMC_HS400;
-}
-
-static inline bool mmc_card_hs400es(struct mmc_card *card)
-{
- return card->host->ios.enhanced_strobe;
-}
-
void mmc_retune_timer_stop(struct mmc_host *host);
static inline void mmc_retune_needed(struct mmc_host *host)
@@ -532,18 +494,12 @@ static inline void mmc_retune_needed(struct mmc_host *host)
host->need_retune = 1;
}
-static inline void mmc_retune_recheck(struct mmc_host *host)
-{
- if (host->hold_retune <= 1)
- host->retune_now = 1;
-}
-
static inline bool mmc_can_retune(struct mmc_host *host)
{
return host->can_retune == 1;
}
-void mmc_retune_pause(struct mmc_host *host);
-void mmc_retune_unpause(struct mmc_host *host);
+int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
+int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 672730a..3ffc27a 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -24,6 +24,8 @@
#ifndef LINUX_MMC_MMC_H
#define LINUX_MMC_MMC_H
+#include <linux/types.h>
+
/* Standard MMC commands (4.1) type argument response */
/* class 1 */
#define MMC_GO_IDLE_STATE 0 /* bc */
@@ -182,50 +184,6 @@ static inline bool mmc_op_multi(u32 opcode)
#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */
#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE
-/* These are unpacked versions of the actual responses */
-
-struct _mmc_csd {
- u8 csd_structure;
- u8 spec_vers;
- u8 taac;
- u8 nsac;
- u8 tran_speed;
- u16 ccc;
- u8 read_bl_len;
- u8 read_bl_partial;
- u8 write_blk_misalign;
- u8 read_blk_misalign;
- u8 dsr_imp;
- u16 c_size;
- u8 vdd_r_curr_min;
- u8 vdd_r_curr_max;
- u8 vdd_w_curr_min;
- u8 vdd_w_curr_max;
- u8 c_size_mult;
- union {
- struct { /* MMC system specification version 3.1 */
- u8 erase_grp_size;
- u8 erase_grp_mult;
- } v31;
- struct { /* MMC system specification version 2.2 */
- u8 sector_size;
- u8 erase_grp_size;
- } v22;
- } erase;
- u8 wp_grp_size;
- u8 wp_grp_enable;
- u8 default_ecc;
- u8 r2w_factor;
- u8 write_bl_len;
- u8 write_bl_partial;
- u8 file_format_grp;
- u8 copy;
- u8 perm_write_protect;
- u8 tmp_write_protect;
- u8 file_format;
- u8 ecc;
-};
-
/*
* OCR bits are mostly in host.h
*/
@@ -339,6 +297,9 @@ struct _mmc_csd {
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
+#define EXT_CSD_PRE_EOL_INFO 267 /* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A 268 /* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B 269 /* RO */
#define EXT_CSD_CMDQ_DEPTH 307 /* RO */
#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
@@ -446,6 +407,7 @@ struct _mmc_csd {
* BKOPS modes
*/
#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
+#define EXT_CSD_AUTO_BKOPS_MASK 0x02
/*
* Command Queue
@@ -457,12 +419,23 @@ struct _mmc_csd {
/*
* MMC_SWITCH access modes
*/
-
#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */
#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+/*
+ * Erase/trim/discard
+ */
+#define MMC_ERASE_ARG 0x00000000
+#define MMC_SECURE_ERASE_ARG 0x80000000
+#define MMC_TRIM_ARG 0x00000001
+#define MMC_DISCARD_ARG 0x00000003
+#define MMC_SECURE_TRIM1_ARG 0x80000001
+#define MMC_SECURE_TRIM2_ARG 0x80008000
+#define MMC_SECURE_ARGS 0x80000000
+#define MMC_TRIM_ARGS 0x00008001
+
#define mmc_driver_type_mask(n) (1 << (n))
#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index d43ef96..b733eb4 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -36,6 +36,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
+#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
@@ -51,6 +52,7 @@
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
+#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
@@ -60,4 +62,10 @@
#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
+#define SDIO_VENDOR_ID_TI 0x0097
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+
+#define SDIO_VENDOR_ID_STE 0x0020
+#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+
#endif /* LINUX_MMC_SDIO_IDS_H */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index ccd8fb2..a7baa29 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -32,13 +32,8 @@
*/
struct sh_mmcif_plat_data {
- int (*get_cd)(struct platform_device *pdef);
unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
unsigned int slave_id_rx;
- bool use_cd_gpio : 1;
- bool ccs_unsupported : 1;
- bool clk_ctrl2_present : 1;
- unsigned int cd_gpio;
u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
unsigned long caps;
u32 ocr;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index a7972cd..82f0d28 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -11,6 +11,9 @@
#ifndef MMC_SLOT_GPIO_H
#define MMC_SLOT_GPIO_H
+#include <linux/types.h>
+#include <linux/irqreturn.h>
+
struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896..f4aac87 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* @zonelist - The zonelist to search for a suitable zone
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8a57f0b..8850fca 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -501,6 +501,7 @@ struct platform_device_id {
kernel_ulong_t driver_data;
};
+#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
diff --git a/include/linux/module.h b/include/linux/module.h
index 4e09f46..0297c5c 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -18,7 +18,6 @@
#include <linux/moduleparam.h>
#include <linux/jump_label.h>
#include <linux/export.h>
-#include <linux/extable.h> /* only as arch move module.h -> extable.h */
#include <linux/rbtree_latch.h>
#include <linux/percpu.h>
@@ -344,7 +343,7 @@ struct module {
/* Exported symbols */
const struct kernel_symbol *syms;
- const unsigned long *crcs;
+ const s32 *crcs;
unsigned int num_syms;
/* Kernel parameters. */
@@ -357,18 +356,18 @@ struct module {
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
- const unsigned long *gpl_crcs;
+ const s32 *gpl_crcs;
#ifdef CONFIG_UNUSED_SYMBOLS
/* unused exported symbols. */
const struct kernel_symbol *unused_syms;
- const unsigned long *unused_crcs;
+ const s32 *unused_crcs;
unsigned int num_unused_syms;
/* GPL-only, unused exported symbols. */
unsigned int num_unused_gpl_syms;
const struct kernel_symbol *unused_gpl_syms;
- const unsigned long *unused_gpl_crcs;
+ const s32 *unused_gpl_crcs;
#endif
#ifdef CONFIG_MODULE_SIG
@@ -380,7 +379,7 @@ struct module {
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
- const unsigned long *gpl_future_crcs;
+ const s32 *gpl_future_crcs;
unsigned int num_gpl_future_syms;
/* Exception table */
@@ -521,7 +520,7 @@ struct module *find_module(const char *name);
struct symsearch {
const struct kernel_symbol *start, *stop;
- const unsigned long *crcs;
+ const s32 *crcs;
enum {
NOT_GPL_ONLY,
GPL_ONLY,
@@ -537,7 +536,7 @@ struct symsearch {
*/
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
- const unsigned long **crc,
+ const s32 **crc,
bool gplok,
bool warn);
@@ -762,7 +761,7 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
-#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+#ifdef CONFIG_STRICT_MODULE_RWX
extern void set_all_modules_text_rw(void);
extern void set_all_modules_text_ro(void);
extern void module_enable_ro(const struct module *mod, bool after_init);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index e5fb813..d7f6333 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -3,6 +3,7 @@
#include <linux/in.h>
#include <linux/pim.h>
+#include <linux/rhashtable.h>
#include <net/sock.h>
#include <uapi/linux/mroute.h>
@@ -60,7 +61,6 @@ struct vif_device {
#define VIFF_STATIC 0x8000
#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-#define MFC_LINES 64
struct mr_table {
struct list_head list;
@@ -69,8 +69,9 @@ struct mr_table {
struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
- struct list_head mfc_cache_array[MFC_LINES];
struct vif_device vif_table[MAXVIFS];
+ struct rhltable mfc_hash;
+ struct list_head mfc_cache_list;
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
@@ -85,17 +86,48 @@ enum {
MFC_STATIC = BIT(0),
};
+struct mfc_cache_cmp_arg {
+ __be32 mfc_mcastgrp;
+ __be32 mfc_origin;
+};
+
+/**
+ * struct mfc_cache - multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_mcastgrp: destination multicast group address
+ * @mfc_origin: source address
+ * @cmparg: used for rhashtable comparisons
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ */
struct mfc_cache {
- struct list_head list;
- __be32 mfc_mcastgrp; /* Group the entry belongs to */
- __be32 mfc_origin; /* Source of packet */
- vifi_t mfc_parent; /* Source interface */
- int mfc_flags; /* Flags on line */
+ struct rhlist_head mnode;
+ union {
+ struct {
+ __be32 mfc_mcastgrp;
+ __be32 mfc_origin;
+ };
+ struct mfc_cache_cmp_arg cmparg;
+ };
+ vifi_t mfc_parent;
+ int mfc_flags;
union {
struct {
unsigned long expires;
- struct sk_buff_head unresolved; /* Unresolved buffers */
+ struct sk_buff_head unresolved;
} unres;
struct {
unsigned long last_assert;
@@ -105,20 +137,15 @@ struct mfc_cache {
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
- unsigned char ttls[MAXVIFS]; /* TTL thresholds */
+ unsigned char ttls[MAXVIFS];
} res;
} mfc_un;
+ struct list_head list;
struct rcu_head rcu;
};
-#ifdef __BIG_ENDIAN
-#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
-#else
-#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
-#endif
-
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait, u32 portid);
+ struct rtmsg *rtm, u32 portid);
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 19a1c0c..ce44e3e 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait, u32 portid);
+ struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0db320b..a83b84f 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -17,7 +17,13 @@ struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+#else
+static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+}
+#endif
typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
@@ -116,11 +122,15 @@ struct msi_desc {
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
#else /* CONFIG_PCI_MSI */
static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
{
return NULL;
}
+static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+}
#endif /* CONFIG_PCI_MSI */
struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
@@ -128,7 +138,6 @@ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
deleted file mode 100644
index ad3c348..0000000
--- a/include/linux/mtd/fsmc.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * incude/mtd/fsmc.h
- *
- * ST Microelectronics
- * Flexible Static Memory Controller (FSMC)
- * platform data interface and header file
- *
- * Copyright © 2010 ST Microelectronics
- * Vipin Kumar <vipin.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MTD_FSMC_H
-#define __MTD_FSMC_H
-
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/types.h>
-#include <linux/mtd/partitions.h>
-#include <asm/param.h>
-
-#define FSMC_NAND_BW8 1
-#define FSMC_NAND_BW16 2
-
-#define FSMC_MAX_NOR_BANKS 4
-#define FSMC_MAX_NAND_BANKS 4
-
-#define FSMC_FLASH_WIDTH8 1
-#define FSMC_FLASH_WIDTH16 2
-
-/* fsmc controller registers for NOR flash */
-#define CTRL 0x0
- /* ctrl register definitions */
- #define BANK_ENABLE (1 << 0)
- #define MUXED (1 << 1)
- #define NOR_DEV (2 << 2)
- #define WIDTH_8 (0 << 4)
- #define WIDTH_16 (1 << 4)
- #define RSTPWRDWN (1 << 6)
- #define WPROT (1 << 7)
- #define WRT_ENABLE (1 << 12)
- #define WAIT_ENB (1 << 13)
-
-#define CTRL_TIM 0x4
- /* ctrl_tim register definitions */
-
-#define FSMC_NOR_BANK_SZ 0x8
-#define FSMC_NOR_REG_SIZE 0x40
-
-#define FSMC_NOR_REG(base, bank, reg) (base + \
- FSMC_NOR_BANK_SZ * (bank) + \
- reg)
-
-/* fsmc controller registers for NAND flash */
-#define PC 0x00
- /* pc register definitions */
- #define FSMC_RESET (1 << 0)
- #define FSMC_WAITON (1 << 1)
- #define FSMC_ENABLE (1 << 2)
- #define FSMC_DEVTYPE_NAND (1 << 3)
- #define FSMC_DEVWID_8 (0 << 4)
- #define FSMC_DEVWID_16 (1 << 4)
- #define FSMC_ECCEN (1 << 6)
- #define FSMC_ECCPLEN_512 (0 << 7)
- #define FSMC_ECCPLEN_256 (1 << 7)
- #define FSMC_TCLR_1 (1)
- #define FSMC_TCLR_SHIFT (9)
- #define FSMC_TCLR_MASK (0xF)
- #define FSMC_TAR_1 (1)
- #define FSMC_TAR_SHIFT (13)
- #define FSMC_TAR_MASK (0xF)
-#define STS 0x04
- /* sts register definitions */
- #define FSMC_CODE_RDY (1 << 15)
-#define COMM 0x08
- /* comm register definitions */
- #define FSMC_TSET_0 0
- #define FSMC_TSET_SHIFT 0
- #define FSMC_TSET_MASK 0xFF
- #define FSMC_TWAIT_6 6
- #define FSMC_TWAIT_SHIFT 8
- #define FSMC_TWAIT_MASK 0xFF
- #define FSMC_THOLD_4 4
- #define FSMC_THOLD_SHIFT 16
- #define FSMC_THOLD_MASK 0xFF
- #define FSMC_THIZ_1 1
- #define FSMC_THIZ_SHIFT 24
- #define FSMC_THIZ_MASK 0xFF
-#define ATTRIB 0x0C
-#define IOATA 0x10
-#define ECC1 0x14
-#define ECC2 0x18
-#define ECC3 0x1C
-#define FSMC_NAND_BANK_SZ 0x20
-
-#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
- (FSMC_NAND_BANK_SZ * (bank)) + \
- reg)
-
-#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
-
-struct fsmc_nand_timings {
- uint8_t tclr;
- uint8_t tar;
- uint8_t thiz;
- uint8_t thold;
- uint8_t twait;
- uint8_t tset;
-};
-
-enum access_mode {
- USE_DMA_ACCESS = 1,
- USE_WORD_ACCESS,
-};
-
-/**
- * fsmc_nand_platform_data - platform specific NAND controller config
- * @nand_timings: timing setup for the physical NAND interface
- * @partitions: partition table for the platform, use a default fallback
- * if this is NULL
- * @nr_partitions: the number of partitions in the previous entry
- * @options: different options for the driver
- * @width: bus width
- * @bank: default bank
- * @select_bank: callback to select a certain bank, this is
- * platform-specific. If the controller only supports one bank
- * this may be set to NULL
- */
-struct fsmc_nand_platform_data {
- struct fsmc_nand_timings *nand_timings;
- struct mtd_partition *partitions;
- unsigned int nr_partitions;
- unsigned int options;
- unsigned int width;
- unsigned int bank;
-
- enum access_mode mode;
-
- void (*select_bank)(uint32_t bank, uint32_t busw);
-
- /* priv structures for dma accesses */
- void *read_dma_priv;
- void *write_dma_priv;
-};
-
-extern int __init fsmc_nor_init(struct platform_device *pdev,
- unsigned long base, uint32_t bank, uint32_t width);
-extern void __init fsmc_init_board_info(struct platform_device *pdev,
- struct mtd_partition *partitions, unsigned int nr_partitions,
- unsigned int width);
-
-#endif /* __MTD_FSMC_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 13f8052..eebdc63 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -24,6 +24,7 @@
#include <linux/uio.h>
#include <linux/notifier.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <mtd/mtd-abi.h>
@@ -322,6 +323,7 @@ struct mtd_info {
int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
int (*_suspend) (struct mtd_info *mtd);
void (*_resume) (struct mtd_info *mtd);
void (*_reboot) (struct mtd_info *mtd);
@@ -385,6 +387,8 @@ static inline void mtd_set_of_node(struct mtd_info *mtd,
struct device_node *np)
{
mtd->dev.of_node = np;
+ if (!mtd->name)
+ of_property_read_string(np, "label", &mtd->name);
}
static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
@@ -397,6 +401,18 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
+static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
+ loff_t ofs, size_t len)
+{
+ if (!mtd->_max_bad_blocks)
+ return -ENOTSUPP;
+
+ if (mtd->size < (len + ofs) || ofs < 0)
+ return -EINVAL;
+
+ return mtd->_max_bad_blocks(mtd, ofs, len);
+}
+
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
struct mtd_pairing_info *info);
int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5f3a01..9591e0f 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -615,7 +615,7 @@ struct nand_buffers {
* @tALS_min: ALE setup time
* @tAR_min: ALE to RE# delay
* @tCEA_max: CE# access time
- * @tCEH_min:
+ * @tCEH_min: CE# high hold time
* @tCH_min: CE# hold time
* @tCHZ_max: CE# high to output hi-Z
* @tCLH_min: CLE hold time
@@ -801,6 +801,10 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* supported, 0 otherwise.
* @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
* supported, 0 otherwise.
+ * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
+ * this nand device will encounter their life times.
+ * @blocks_per_die: [INTERN] The number of PEBs in a die
+ * @data_interface: [INTERN] NAND interface timing information
* @read_retries: [INTERN] the number of read retry modes supported
* @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
* @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
@@ -883,6 +887,8 @@ struct nand_chip {
struct nand_onfi_params onfi_params;
struct nand_jedec_params jedec_params;
};
+ u16 max_bb_per_die;
+ u32 blocks_per_die;
struct nand_data_interface *data_interface;
@@ -958,6 +964,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_INTEL 0x89
#define NAND_MFR_ATO 0x9b
+#define NAND_MFR_WINBOND 0xef
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 70736e1..06df1e0 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -41,6 +41,7 @@ struct mtd_partition {
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ struct device_node *of_node;
};
#define MTDPART_OFS_RETAIN (-3)
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index c425c7b..f2a7180 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -43,9 +43,13 @@
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
-#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */
-#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */
+#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
+#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
@@ -56,11 +60,17 @@
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
-#define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */
-#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */
-#define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */
-#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */
+#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
+#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */
+#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */
+#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
/* Used for SST flashes only. */
@@ -68,6 +78,15 @@
#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
+/* Used for S3AN flashes only */
+#define SPINOR_OP_XSE 0x50 /* Sector erase */
+#define SPINOR_OP_XPP 0x82 /* Page program */
+#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
+
+#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
+#define XSR_RDY BIT(7) /* Ready */
+
+
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
@@ -119,6 +138,9 @@ enum spi_nor_ops {
enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
SNOR_F_HAS_SR_TB = BIT(1),
+ SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
+ SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
+ SNOR_F_READY_XSR_RDY = BIT(4),
};
/**
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f..1127fe3 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,8 @@
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
+struct ww_acquire_ctx;
+
/*
* Simple, straightforward mutexes with strict semantics:
*
@@ -65,7 +67,7 @@ struct mutex {
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
- return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
+ return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
}
/*
@@ -75,6 +77,7 @@ static inline struct task_struct *__mutex_owner(struct mutex *lock)
struct mutex_waiter {
struct list_head list;
struct task_struct *task;
+ struct ww_acquire_ctx *ww_ctx;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
@@ -156,10 +159,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
unsigned int subclass);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
do { \
@@ -171,11 +176,13 @@ do { \
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
extern int __must_check mutex_lock_killable(struct mutex *lock);
+extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
#endif
/*
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 9c6c8ef..9a04195 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -71,7 +71,6 @@ enum {
NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
- NETIF_F_BUSY_POLL_BIT, /* Busy poll */
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
@@ -134,7 +133,6 @@ enum {
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
-#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define for_each_netdev_feature(mask_addr, bit) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 994f742..f40f0ab 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -352,6 +352,7 @@ enum gro_result {
GRO_HELD,
GRO_NORMAL,
GRO_DROP,
+ GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -463,7 +464,6 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
-bool __napi_complete(struct napi_struct *n);
bool napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
@@ -866,11 +866,15 @@ struct netdev_xdp {
* of useless work if you return NETDEV_TX_BUSY.
* Required; cannot be NULL.
*
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- * netdev_features_t features);
- * Adjusts the requested feature flags according to device-specific
- * constraints, and returns the resulting flags. Must not modify
- * the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
+ * Called by core transmit path to determine if device is capable of
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
@@ -913,8 +917,8 @@ struct netdev_xdp {
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
- * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
- * struct rtnl_link_stats64 *storage);
+ * void (*ndo_get_stats64)(struct net_device *dev,
+ * struct rtnl_link_stats64 *storage);
* struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
* statistics. Drivers must do one of the following:
@@ -964,11 +968,12 @@ struct netdev_xdp {
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
- * Called to setup 'tc' number of traffic classes in the net device. This
- * is always called from the stack with the rtnl lock held and netif tx
- * queues stopped. This allows the netdevice to perform queue management
- * safely.
+ * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
+ * __be16 protocol, struct tc_to_netdev *tc);
+ * Called to setup any 'tc' scheduler, classifier or action on @dev.
+ * This is always called from the stack with the rtnl lock held and netif
+ * tx queues stopped. This allows the netdevice to perform queue
+ * management safely.
*
* Fiber Channel over Ethernet (FCoE) offload functions.
* int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1028,6 +1033,12 @@ struct netdev_xdp {
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1111,6 @@ struct netdev_xdp {
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
- * Called by core transmit path to determine if device is capable of
- * performing offload operations on a given packet. This is to give
- * the device an opportunity to implement any restrictions that cannot
- * be otherwise expressed by feature flags. The check is called with
- * the set of features that the stack has calculated and it returns
- * those the driver believes to be appropriate.
* int (*ndo_set_tx_maxrate)(struct net_device *dev,
* int queue_index, u32 maxrate);
* Called when a user wants to set a max-rate limitation of specific
@@ -1165,8 +1167,8 @@ struct net_device_ops {
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
- struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
- struct rtnl_link_stats64 *storage);
+ void (*ndo_get_stats64)(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev,
@@ -1183,9 +1185,6 @@ struct net_device_ops {
struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- int (*ndo_busy_poll)(struct napi_struct *dev);
-#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
@@ -1510,6 +1509,7 @@ enum netdev_priv_flags {
* @max_mtu: Interface Maximum MTU value
* @type: Interface hardware type
* @hard_header_len: Maximum hardware header length.
+ * @min_header_len: Minimum hardware header length
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -1551,7 +1551,6 @@ enum netdev_priv_flags {
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
*
- * @last_rx: Time of last Rx
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
*
@@ -1727,6 +1726,7 @@ struct net_device {
unsigned int max_mtu;
unsigned short type;
unsigned short hard_header_len;
+ unsigned short min_header_len;
unsigned short needed_headroom;
unsigned short needed_tailroom;
@@ -1777,8 +1777,6 @@ struct net_device {
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
- unsigned long last_rx;
-
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
@@ -1868,8 +1866,12 @@ struct net_device {
struct pcpu_vstats __percpu *vstats;
};
+#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
+#endif
+#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
+#endif
struct device dev;
const struct attribute_group *sysfs_groups[4];
@@ -2477,14 +2479,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
if (!pskb_may_pull(skb, hlen))
return NULL;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
@@ -2661,6 +2668,19 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS)
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+#endif
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -2688,6 +2708,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
{
if (likely(len >= dev->hard_header_len))
return true;
+ if (len < dev->min_header_len)
+ return false;
if (capable(CAP_SYS_RAWIO)) {
memset(ll_header + len, 0, dev->hard_header_len - len);
@@ -3101,7 +3123,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
-void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
+/**
+ * netif_wake_subqueue - allow sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ netif_tx_wake_queue(txq);
+}
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -3795,6 +3829,10 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
+extern int dev_weight_rx_bias;
+extern int dev_weight_tx_bias;
+extern int dev_rx_weight;
+extern int dev_tx_weight;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -3872,10 +3910,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
-int netdev_default_l2upper_neigh_construct(struct net_device *dev,
- struct neighbour *n);
-void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
- struct neighbour *n);
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
@@ -4328,6 +4362,15 @@ do { \
})
#endif
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
+ do { \
+ if (cond) \
+ netif_dbg(priv, type, netdev, fmt, ##args); \
+ else \
+ netif_ ## level(priv, type, netdev, fmt, ##args); \
+ } while (0)
+
#if defined(VERBOSE_DEBUG)
#define netif_vdbg netif_dbg
#else
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 1d82dd5..1b49209 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -28,6 +28,7 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb);
+ bool (*valid_genid)(struct net *net, u32 genid);
};
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5117e4d..be378cf 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -167,6 +167,7 @@ struct xt_match {
const char *table;
unsigned int matchsize;
+ unsigned int usersize;
#ifdef CONFIG_COMPAT
unsigned int compatsize;
#endif
@@ -207,6 +208,7 @@ struct xt_target {
const char *table;
unsigned int targetsize;
+ unsigned int usersize;
#ifdef CONFIG_COMPAT
unsigned int compatsize;
#endif
@@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
+int xt_match_to_user(const struct xt_entry_match *m,
+ struct xt_entry_match __user *u);
+int xt_target_to_user(const struct xt_entry_target *t,
+ struct xt_entry_target __user *u);
+int xt_data_to_user(void __user *dst, const void *src,
+ int usersize, int size);
+
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
struct xt_counters_info *info, bool compat);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bca5363..1b1ca04 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -282,7 +282,7 @@ enum nfsstat4 {
static inline bool seqid_mutating_err(u32 err)
{
- /* rfc 3530 section 8.1.5: */
+ /* See RFC 7530, section 9.1.7 */
switch (err) {
case NFS4ERR_STALE_CLIENTID:
case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_BADXDR:
case NFS4ERR_RESOURCE:
case NFS4ERR_NOFILEHANDLE:
+ case NFS4ERR_MOVED:
return false;
};
return true;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca82..0a3fadc 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 3d1c6f1..0b676a0 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -244,6 +244,7 @@ enum {
NVME_CTRL_ONCS_DSM = 1 << 2,
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_VWC_PRESENT = 1 << 0,
+ NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
};
struct nvme_lbaf {
@@ -553,6 +554,8 @@ enum {
NVME_DSMGMT_AD = 1 << 2,
};
+#define NVME_DSM_MAX_RANGES 256
+
struct nvme_dsm_range {
__le32 cattr;
__le32 nlb;
diff --git a/include/linux/of.h b/include/linux/of.h
index d72f010..21e6323 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -280,6 +280,7 @@ extern struct device_node *of_get_child_by_name(const struct device_node *node,
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
+extern int of_find_last_cache_level(unsigned int cpu);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index cc7dd687..e9afbcc 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -37,6 +37,7 @@ extern const void *of_device_get_match_data(const struct device *dev);
extern ssize_t of_device_get_modalias(struct device *dev,
char *str, ssize_t len);
+extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
@@ -78,6 +79,11 @@ static inline int of_device_get_modalias(struct device *dev,
return -ENODEV;
}
+static inline int of_device_request_module(struct device *dev)
+{
+ return -ENODEV;
+}
+
static inline int of_device_uevent_modalias(struct device *dev,
struct kobj_uevent_env *env)
{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 6a7fc50..13394ac 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
-static inline void of_iommu_set_ops(struct device_node *np,
- const struct iommu_ops *ops)
-{
- iommu_register_instance(&np->fwnode, ops);
-}
-
-static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
-{
- return iommu_get_instance(&np->fwnode);
-}
-
extern struct of_device_id __iommu_of_table;
typedef int (*of_iommu_init_fn)(struct device_node *);
diff --git a/include/linux/parman.h b/include/linux/parman.h
new file mode 100644
index 0000000..3c8cccc
--- /dev/null
+++ b/include/linux/parman.h
@@ -0,0 +1,76 @@
+/*
+ * include/linux/parman.h - Manager for linear priority array areas
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARMAN_H
+#define _PARMAN_H
+
+#include <linux/list.h>
+
+enum parman_algo_type {
+ PARMAN_ALGO_TYPE_LSORT,
+};
+
+struct parman_item {
+ struct list_head list;
+ unsigned long index;
+};
+
+struct parman_prio {
+ struct list_head list;
+ struct list_head item_list;
+ unsigned long priority;
+};
+
+struct parman_ops {
+ unsigned long base_count;
+ unsigned long resize_step;
+ int (*resize)(void *priv, unsigned long new_count);
+ void (*move)(void *priv, unsigned long from_index,
+ unsigned long to_index, unsigned long count);
+ enum parman_algo_type algo;
+};
+
+struct parman;
+
+struct parman *parman_create(const struct parman_ops *ops, void *priv);
+void parman_destroy(struct parman *parman);
+void parman_prio_init(struct parman *parman, struct parman_prio *prio,
+ unsigned long priority);
+void parman_prio_fini(struct parman_prio *prio);
+int parman_item_add(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+void parman_item_remove(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e2d1a12..adbc859 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -885,7 +885,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
-#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
/* Generic PCI functions exported to card drivers */
@@ -1630,7 +1629,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
-#define dev_num_vf(d) (0)
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 1c7eec0..3a481a4 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
- int ret;
+ bool ret;
rcu_read_lock_sched();
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
- int ret = false;
+ bool ret = false;
rcu_read_lock_sched();
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5b2e615..93664f0 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -4,15 +4,15 @@
#include <linux/atomic.h>
#include <linux/rwsem.h>
#include <linux/percpu.h>
-#include <linux/wait.h>
+#include <linux/rcuwait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
- struct rw_semaphore rw_sem;
- wait_queue_head_t writer;
+ struct rw_semaphore rw_sem; /* slowpath */
+ struct rcuwait writer; /* blocked writer */
int readers_block;
};
@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name = { \
.rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
.read_count = &__percpu_rwsem_rc_##name, \
.rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
- .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
+ .writer = __RCUWAIT_INITIALIZER(name.writer), \
}
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4741ecd..000fdb2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -482,6 +482,7 @@ struct perf_addr_filter {
* @list: list of filters for this event
* @lock: spinlock that serializes accesses to the @list and event's
* (and its children's) filter generations.
+ * @nr_file_filters: number of file-based filters
*
* A child event will use parent's @list (and therefore @lock), so they are
* bundled together; see perf_event_addr_filters().
@@ -489,6 +490,7 @@ struct perf_addr_filter {
struct perf_addr_filters_head {
struct list_head list;
raw_spinlock_t lock;
+ unsigned int nr_file_filters;
};
/**
@@ -785,9 +787,9 @@ struct perf_cpu_context {
ktime_t hrtimer_interval;
unsigned int hrtimer_active;
- struct pmu *unique_pmu;
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
+ struct list_head cgrp_cpuctx_entry;
#endif
struct list_head sched_cb_entry;
@@ -1259,6 +1261,7 @@ extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f7d95f6..7724760 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
-#include <linux/phy_led_triggers.h>
#include <linux/atomic.h>
@@ -82,6 +81,9 @@ typedef enum {
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -142,6 +144,12 @@ static inline const char *phy_modes(phy_interface_t interface)
return "qsgmii";
case PHY_INTERFACE_MODE_TRGMII:
return "trgmii";
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return "1000base-x";
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return "2500base-x";
+ case PHY_INTERFACE_MODE_RXAUI:
+ return "rxaui";
default:
return "unknown";
}
@@ -158,11 +166,7 @@ static inline const char *phy_modes(phy_interface_t interface)
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE (20 - 3)
+#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -632,7 +636,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[20];
+ char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
@@ -803,6 +807,9 @@ int phy_stop_interrupts(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev)
{
+ if (!phydev->drv)
+ return -EIO;
+
return phydev->drv->read_status(phydev);
}
@@ -882,6 +889,25 @@ void mdio_bus_exit(void);
extern struct bus_type mdio_bus_type;
+struct mdio_board_info {
+ const char *bus_id;
+ char modalias[MDIO_NAME_SIZE];
+ int mdio_addr;
+ const void *platform_data;
+};
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+int mdiobus_register_board_info(const struct mdio_board_info *info,
+ unsigned int n);
+#else
+static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
+ unsigned int n)
+{
+ return 0;
+}
+#endif
+
+
/**
* module_phy_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0..b37b05b 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
#ifdef CONFIG_LED_TRIGGER_PHY
#include <linux/leds.h>
+#include <linux/phy.h>
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
-#define PHY_MII_BUS_ID_SIZE (20 - 3)
-#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index d7e5d60..a0f2aba 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -29,6 +29,7 @@ extern int pinctrl_request_gpio(unsigned gpio);
extern void pinctrl_free_gpio(unsigned gpio);
extern int pinctrl_gpio_direction_input(unsigned gpio);
extern int pinctrl_gpio_direction_output(unsigned gpio);
+extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
extern void pinctrl_put(struct pinctrl *p);
@@ -80,6 +81,11 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
return 0;
}
+static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+{
+ return 0;
+}
+
static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
{
return NULL;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 12343ca..7620eb1 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -12,12 +12,6 @@
#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
#define __LINUX_PINCTRL_PINCONF_GENERIC_H
-/*
- * You shouldn't even be able to compile with these enums etc unless you're
- * using generic pin config. That is why this is defined out.
- */
-#ifdef CONFIG_GENERIC_PINCONF
-
/**
* enum pin_config_param - possible pin configuration parameters
* @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
@@ -92,6 +86,8 @@
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
+ * @PIN_CONFIG_MAX: this is the maximum configuration value that can be
+ * presented using the packed format.
*/
enum pin_config_param {
PIN_CONFIG_BIAS_BUS_HOLD,
@@ -112,49 +108,53 @@ enum pin_config_param {
PIN_CONFIG_OUTPUT,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SLEW_RATE,
- PIN_CONFIG_END = 0x7FFF,
-};
-
-#ifdef CONFIG_DEBUG_FS
-#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
- .has_arg = d }
-
-struct pin_config_item {
- const enum pin_config_param param;
- const char * const display;
- const char * const format;
- bool has_arg;
+ PIN_CONFIG_END = 0x7F,
+ PIN_CONFIG_MAX = 0xFF,
};
-#endif /* CONFIG_DEBUG_FS */
/*
* Helpful configuration macro to be used in tables etc.
*/
-#define PIN_CONF_PACKED(p, a) ((a << 16) | ((unsigned long) p & 0xffffUL))
+#define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL))
/*
* The following inlines stuffs a configuration parameter and data value
* into and out of an unsigned long argument, as used by the generic pin config
- * system. We put the parameter in the lower 16 bits and the argument in the
- * upper 16 bits.
+ * system. We put the parameter in the lower 8 bits and the argument in the
+ * upper 24 bits.
*/
static inline enum pin_config_param pinconf_to_config_param(unsigned long config)
{
- return (enum pin_config_param) (config & 0xffffUL);
+ return (enum pin_config_param) (config & 0xffUL);
}
-static inline u16 pinconf_to_config_argument(unsigned long config)
+static inline u32 pinconf_to_config_argument(unsigned long config)
{
- return (enum pin_config_param) ((config >> 16) & 0xffffUL);
+ return (u32) ((config >> 8) & 0xffffffUL);
}
static inline unsigned long pinconf_to_config_packed(enum pin_config_param param,
- u16 argument)
+ u32 argument)
{
return PIN_CONF_PACKED(param, argument);
}
+#ifdef CONFIG_GENERIC_PINCONF
+
+#ifdef CONFIG_DEBUG_FS
+#define PCONFDUMP(a, b, c, d) { \
+ .param = a, .display = b, .format = c, .has_arg = d \
+ }
+
+struct pin_config_item {
+ const enum pin_config_param param;
+ const char * const display;
+ const char * const format;
+ bool has_arg;
+};
+#endif /* CONFIG_DEBUG_FS */
+
#ifdef CONFIG_OF
#include <linux/device.h>
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index a42e57d..8ce2d87 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -141,12 +141,27 @@ struct pinctrl_desc {
};
/* External interface to pin controller */
+
+extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data,
+ struct pinctrl_dev **pctldev);
+
+/* Please use pinctrl_register_and_init() instead */
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
+
extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+
+extern int devm_pinctrl_register_and_init(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data,
+ struct pinctrl_dev **pctldev);
+
+/* Please use devm_pinctrl_register_and_init() instead */
extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
struct pinctrl_desc *pctldesc,
void *driver_data);
+
extern void devm_pinctrl_unregister(struct device *dev,
struct pinctrl_dev *pctldev);
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index e69e415..896cb71 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -41,6 +41,7 @@ struct dw_dma_slave {
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
* @is_memcpy: The device channels do support memory-to-memory transfers.
+ * @is_idma32: The type of the DMA controller is iDMA32
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -53,6 +54,7 @@ struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
bool is_memcpy;
+ bool is_idma32;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
new file mode 100644
index 0000000..942b0c3
--- /dev/null
+++ b/include/linux/platform_data/intel-spi.h
@@ -0,0 +1,31 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef INTEL_SPI_PDATA_H
+#define INTEL_SPI_PDATA_H
+
+enum intel_spi_type {
+ INTEL_SPI_BYT = 1,
+ INTEL_SPI_LPT,
+ INTEL_SPI_BXT,
+};
+
+/**
+ * struct intel_spi_boardinfo - Board specific data for Intel SPI driver
+ * @type: Type which this controller is compatible with
+ * @writeable: The chip is writeable
+ */
+struct intel_spi_boardinfo {
+ enum intel_spi_type type;
+ bool writeable;
+};
+
+#endif /* INTEL_SPI_PDATA_H */
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 812d873..2c94ab5 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -1,7 +1,7 @@
-#ifndef _LIRC_RX51_H
-#define _LIRC_RX51_H
+#ifndef _IR_RX51_H
+#define _IR_RX51_H
-struct lirc_rx51_platform_data {
+struct ir_rx51_platform_data {
int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
};
diff --git a/include/linux/platform_data/mmc-mxcmmc.h b/include/linux/platform_data/mmc-mxcmmc.h
index 29115f4..b0fdaa9 100644
--- a/include/linux/platform_data/mmc-mxcmmc.h
+++ b/include/linux/platform_data/mmc-mxcmmc.h
@@ -1,6 +1,7 @@
#ifndef ASMARM_ARCH_MMC_H
#define ASMARM_ARCH_MMC_H
+#include <linux/interrupt.h>
#include <linux/mmc/host.h>
struct device;
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
index 9bb63ac..171a271 100644
--- a/include/linux/platform_data/spi-ep93xx.h
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -5,25 +5,14 @@ struct spi_device;
/**
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @num_chipselect: number of chip selects on this board, must be
- * at least one
+ * @chipselect: array of gpio numbers to use as chip selects
+ * @num_chipselect: ARRAY_SIZE(chipselect)
* @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
+ int *chipselect;
int num_chipselect;
bool use_dma;
};
-/**
- * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device
- * @setup: setup the chip select mechanism
- * @cleanup: cleanup the chip select mechanism
- * @cs_control: control the device chip select
- */
-struct ep93xx_spi_chip_ops {
- int (*setup)(struct spi_device *spi);
- void (*cleanup)(struct spi_device *spi);
- void (*cs_control)(struct spi_device *spi, int value);
-};
-
#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
new file mode 100644
index 0000000..ac72e11
--- /dev/null
+++ b/include/linux/platform_data/ti-aemif.h
@@ -0,0 +1,23 @@
+/*
+ * TI DaVinci AEMIF platform glue.
+ *
+ * Copyright (C) 2017 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TI_DAVINCI_AEMIF_DATA_H__
+#define __TI_DAVINCI_AEMIF_DATA_H__
+
+#include <linux/of_platform.h>
+
+struct aemif_platform_data {
+ struct of_dev_auxdata *dev_lookup;
+};
+
+#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 81ece61..5339ed5 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -182,6 +182,9 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
{
return -ENOTSUPP;
}
+
+#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
+#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0edd88f..a6685b3 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -78,6 +78,9 @@ struct dev_pm_set_opp_data {
#if defined(CONFIG_PM_OPP)
+struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
+void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
+
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
@@ -88,7 +91,7 @@ int dev_pm_opp_get_opp_count(struct device *dev);
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
-struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
+unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -99,6 +102,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
+void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
@@ -108,22 +112,30 @@ int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
-struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
-int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
- unsigned int count);
-void dev_pm_opp_put_supported_hw(struct device *dev);
-int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
-void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
+int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
+
+struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
+void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
+void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_register_put_opp_helper(struct device *dev);
+struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
+void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
#else
+static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
+
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
return 0;
@@ -159,9 +171,9 @@ static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device
return 0;
}
-static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
{
- return NULL;
+ return 0;
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -182,6 +194,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-ENOTSUPP);
}
+static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
+
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
@@ -202,35 +216,39 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
return 0;
}
-static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
- struct device *dev)
+static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
- return ERR_PTR(-ENOTSUPP);
+ return -ENOTSUPP;
}
-static inline int dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
+static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
{
return -ENOTSUPP;
}
-static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
+static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
-static inline int dev_pm_opp_register_set_opp_helper(struct device *dev,
+static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
+
+static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
int (*set_opp)(struct dev_pm_set_opp_data *data))
{
- return -ENOTSUPP;
+ return ERR_PTR(-ENOTSUPP);
}
-static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {}
+static inline void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) {}
-static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
- return -ENOTSUPP;
+ return ERR_PTR(-ENOTSUPP);
}
-static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
+static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
{
@@ -270,6 +288,7 @@ void dev_pm_opp_of_remove_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -293,6 +312,11 @@ static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct
{
return -ENOTSUPP;
}
+
+static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 0f65d36..d4d3479 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -6,7 +6,6 @@
*/
#include <linux/plist.h>
#include <linux/notifier.h>
-#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/workqueue.h>
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 51334ed..a395403 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -80,6 +80,7 @@
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
+#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA)
/********** lib/flex_array.c **********/
#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 62d44c1..64aa189 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -8,19 +8,9 @@
#include <linux/alarmtimer.h>
-static inline unsigned long long cputime_to_expires(cputime_t expires)
-{
- return (__force unsigned long long)expires;
-}
-
-static inline cputime_t expires_to_cputime(unsigned long long expires)
-{
- return (__force cputime_t)expires;
-}
-
struct cpu_timer_list {
struct list_head entry;
- unsigned long long expires, incr;
+ u64 expires, incr;
struct task_struct *task;
int firing;
};
@@ -129,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
void posix_cpu_timers_exit(struct task_struct *task);
void posix_cpu_timers_exit_group(struct task_struct *task);
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
- cputime_t *newval, cputime_t *oldval);
+ u64 *newval, u64 *oldval);
long clock_nanosleep_restart(struct restart_block *restart_block);
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index bed9557..b312bce 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -4,8 +4,16 @@
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
- BQ27500, /* bq27500 */
- BQ27510, /* bq27510, bq27520 */
+ BQ2750X, /* bq27500 deprecated alias */
+ BQ2751X, /* bq27510, bq27520 deprecated alias */
+ BQ27500, /* bq27500/1 */
+ BQ27510G1, /* bq27510G1 */
+ BQ27510G2, /* bq27510G2 */
+ BQ27510G3, /* bq27510G3 */
+ BQ27520G1, /* bq27520G1 */
+ BQ27520G2, /* bq27520G2 */
+ BQ27520G3, /* bq27520G3 */
+ BQ27520G4, /* bq27520G4 */
BQ27530, /* bq27530, bq27531 */
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
BQ27545, /* bq27545 */
diff --git a/include/linux/property.h b/include/linux/property.h
index 856e50b..64e3a9c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -160,12 +160,12 @@ struct property_entry {
bool is_string;
union {
union {
- void *raw_data;
- u8 *u8_data;
- u16 *u16_data;
- u32 *u32_data;
- u64 *u64_data;
- const char **str;
+ const void *raw_data;
+ const u8 *u8_data;
+ const u16 *u16_data;
+ const u32 *u32_data;
+ const u64 *u64_data;
+ const char * const *str;
} pointer;
union {
unsigned long long raw_data;
@@ -241,8 +241,13 @@ struct property_entry {
.name = _name_, \
}
+struct property_entry *
+property_entries_dup(const struct property_entry *properties);
+
+void property_entries_free(const struct property_entry *properties);
+
int device_add_properties(struct device *dev,
- struct property_entry *properties);
+ const struct property_entry *properties);
void device_remove_properties(struct device *dev);
bool device_dma_supported(struct device *dev);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 2052011..6c70444 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
return 0;
}
+/*
+ * Note: resize (below) nests producer lock within consumer lock, so if you
+ * consume in interrupt or BH context, you must disable interrupts/BH when
+ * calling this.
+ */
static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
int ret;
@@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
return ptr;
}
+/*
+ * Note: resize (below) nests producer lock within consumer lock, so if you
+ * call this in interrupt or BH context, you must disable interrupts/BH when
+ * producing.
+ */
static inline void *ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
@@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
void **old;
void *ptr;
- while ((ptr = ptr_ring_consume(r)))
+ while ((ptr = __ptr_ring_consume(r)))
if (producer < size)
queue[producer++] = ptr;
else if (destroy)
@@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
return old;
}
+/*
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
void (*destroy)(void *))
{
@@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
if (!queue)
return -ENOMEM;
- spin_lock_irqsave(&(r)->producer_lock, flags);
+ spin_lock_irqsave(&(r)->consumer_lock, flags);
+ spin_lock(&(r)->producer_lock);
old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
- spin_unlock_irqrestore(&(r)->producer_lock, flags);
+ spin_unlock(&(r)->producer_lock);
+ spin_unlock_irqrestore(&(r)->consumer_lock, flags);
kfree(old);
return 0;
}
+/*
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
int size,
gfp_t gfp, void (*destroy)(void *))
@@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
}
for (i = 0; i < nrings; ++i) {
- spin_lock_irqsave(&(rings[i])->producer_lock, flags);
+ spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
+ spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
- spin_unlock_irqrestore(&(rings[i])->producer_lock, flags);
+ spin_unlock(&(rings[i])->producer_lock);
+ spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
}
for (i = 0; i < nrings; ++i)
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 2d6f0c39..a052232 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -90,9 +90,9 @@
#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
-#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
-#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
#define RX_THRESH_CE4100_DFLT 2
#define TX_THRESH_CE4100_DFLT 2
@@ -106,9 +106,9 @@
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
/* QUARK_X1000 SSCR0 bit definition */
-#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
-#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
-#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
+#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
#define RX_THRESH_QUARK_X1000_DFLT 1
@@ -121,8 +121,8 @@
#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
-#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
-#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
+#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 734deb0..52966b9 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,10 +1,35 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2016 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
+
#ifndef _COMMON_HSI_H
#define _COMMON_HSI_H
#include <linux/types.h>
@@ -37,6 +62,7 @@
#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
#define FW_ASSERT_GENERAL_ATTN_IDX 32
@@ -180,6 +206,9 @@
#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
@@ -236,6 +265,7 @@
#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
@@ -266,6 +296,9 @@
#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
@@ -703,7 +736,7 @@ enum mf_mode {
/* Per-protocol connection types */
enum protocol_type {
PROTOCOLID_ISCSI,
- PROTOCOLID_RESERVED2,
+ PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
PROTOCOLID_ETH,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 1aa0727..4b402fb 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __ETH_COMMON__
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
new file mode 100644
index 0000000..2e417a4
--- /dev/null
+++ b/include/linux/qed/fcoe_common.h
@@ -0,0 +1,715 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __FCOE_COMMON__
+#define __FCOE_COMMON__
+/*********************/
+/* FCOE FW CONSTANTS */
+/*********************/
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
+#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600)
+
+struct fcoe_abts_pkt {
+ __le32 abts_rsp_fc_payload_lo;
+ __le16 abts_rsp_rx_id;
+ u8 abts_rsp_rctl;
+ u8 reserved2;
+};
+
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+ __le32 previous_tid;
+ __le32 parent_tid;
+ __le32 burst_length;
+ __le32 seq_rec_updated_offset;
+};
+
+struct fcoe_exp_ro {
+ __le32 data_offset;
+ __le32 reserved;
+};
+
+union fcoe_cleanup_addr_exp_ro_union {
+ struct regpair abts_rsp_fc_payload_hi;
+ struct fcoe_exp_ro exp_ro;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+ FCOE_COMPLETION_STATUS_SUCCESS,
+ FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+ FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+ MAX_FCOE_COMPLETION_STATUS
+};
+
+struct fc_addr_nw {
+ u8 addr_lo;
+ u8 addr_mid;
+ u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le16 tx_max_fc_pay_len;
+ __le16 e_d_tov_timer_val;
+ __le16 rx_max_fc_pay_len;
+ __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
+ __le16 physical_q0;
+ __le16 rec_rr_tov_timer_val;
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
+ __le16 conn_id;
+ u8 def_q_idx;
+ u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+ struct regpair terminate_params_addr;
+};
+
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
+};
+
+struct fcoe_slow_sgl_ctx {
+ struct regpair base_sgl_addr;
+ __le16 curr_sge_off;
+ __le16 remainder_num_sges;
+ __le16 curr_sgl_index;
+ __le16 reserved;
+};
+
+struct fcoe_sge {
+ struct regpair sge_addr;
+ __le16 size;
+ __le16 reserved0;
+ u8 reserved1[3];
+ u8 is_valid_sge;
+};
+
+union fcoe_data_desc_ctx {
+ struct fcoe_fast_sgl_ctx fast;
+ struct fcoe_slow_sgl_ctx slow;
+ struct fcoe_sge single_sge;
+};
+
+union fcoe_dix_desc_ctx {
+ struct fcoe_slow_sgl_ctx dix_sgl;
+ struct fcoe_sge cached_dix_sge;
+};
+
+struct fcoe_fcp_cmd_payload {
+ __le32 opaque[8];
+};
+
+struct fcoe_fcp_rsp_payload {
+ __le32 opaque[6];
+};
+
+struct fcoe_fcp_xfer_payload {
+ __le32 opaque[3];
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+ __le16 mtu;
+ __le16 sq_num_pages_in_pbl;
+ __le32 reserved;
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+ FCOE_INITIATOR_MODE = 0x0,
+ FCOE_TARGET_MODE = 0x1,
+ FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+ MAX_FCOE_MODE_TYPE
+};
+
+struct fcoe_mstorm_fcoe_task_st_ctx_fp {
+ __le16 flags;
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
+ __le16 difDataResidue;
+ __le16 parent_id;
+ __le16 single_sge_saved_offset;
+ __le32 data_2_trns_rem;
+ __le32 offset_in_io;
+ union fcoe_dix_desc_ctx dix_desc;
+ union fcoe_data_desc_ctx data_desc;
+};
+
+struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
+ __le16 flags;
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
+ u8 tx_rx_sgl_mode;
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
+ u8 rsrv2;
+ __le32 num_prm_zero_read;
+ struct regpair rsp_buf_addr;
+};
+
+struct fcoe_rx_stat {
+ struct regpair fcoe_rx_byte_cnt;
+ struct regpair fcoe_rx_data_pkt_cnt;
+ struct regpair fcoe_rx_xfer_pkt_cnt;
+ struct regpair fcoe_rx_other_pkt_cnt;
+ __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+ __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ __le32 fcoe_silent_drop_total_pkt_cnt;
+ __le32 rsrv;
+};
+
+enum fcoe_sgl_mode {
+ FCOE_SLOW_SGL,
+ FCOE_SINGLE_FAST_SGE,
+ FCOE_2_FAST_SGE,
+ FCOE_3_FAST_SGE,
+ FCOE_4_FAST_SGE,
+ FCOE_MUL_FAST_SGES,
+ MAX_FCOE_SGL_MODE
+};
+
+struct fcoe_stat_ramrod_data {
+ struct regpair stat_params_addr;
+};
+
+struct protection_info_ctx {
+ __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
+ u8 dix_block_size;
+ u8 dst_size;
+};
+
+union protection_info_union_ctx {
+ struct protection_info_ctx info;
+ __le32 value;
+};
+
+struct fcp_rsp_payload_padded {
+ struct fcoe_fcp_rsp_payload rsp_payload;
+ __le32 reserved[2];
+};
+
+struct fcp_xfer_payload_padded {
+ struct fcoe_fcp_xfer_payload xfer_payload;
+ __le32 reserved[5];
+};
+
+struct fcoe_tx_data_params {
+ __le32 data_offset;
+ __le32 offset_in_io;
+ u8 flags;
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
+ u8 dif_residual;
+ __le16 seq_cnt;
+ __le16 single_sge_saved_offset;
+ __le16 next_dif_offset;
+ __le16 seq_id;
+ __le16 reserved3;
+};
+
+struct fcoe_tx_mid_path_params {
+ __le32 parameter;
+ u8 r_ctl;
+ u8 type;
+ u8 cs_ctl;
+ u8 df_ctl;
+ __le16 rx_id;
+ __le16 ox_id;
+};
+
+struct fcoe_tx_params {
+ struct fcoe_tx_data_params data;
+ struct fcoe_tx_mid_path_params mid_path;
+};
+
+union fcoe_tx_info_union_ctx {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcp_rsp_payload_padded fcp_rsp_payload;
+ struct fcp_xfer_payload_padded fcp_xfer_payload;
+ struct fcoe_tx_params tx_params;
+};
+
+struct ystorm_fcoe_task_st_ctx {
+ u8 task_type;
+ u8 sgl_mode;
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
+ u8 cached_dix_sge;
+ u8 expect_first_xfer;
+ __le32 num_pbf_zero_write;
+ union protection_info_union_ctx protection_info_union;
+ __le32 data_2_trns_rem;
+ union fcoe_tx_info_union_ctx tx_info_union;
+ union fcoe_dix_desc_ctx dix_desc;
+ union fcoe_data_desc_ctx data_desc;
+ __le16 ox_id;
+ __le16 rx_id;
+ __le32 task_rety_identifier;
+ __le32 reserved1[2];
+};
+
+struct ystorm_fcoe_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 rx_id;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+struct tstorm_fcoe_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+ u8 flags1;
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+ u8 flags3;
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 cleanup_state;
+ __le16 last_sent_tid;
+ __le32 rec_rr_tov_exp_timeout;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 data_offset_end_of_seq;
+ __le32 data_offset_next;
+};
+
+struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
+ union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
+ __le16 flags;
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
+ __le16 seq_cnt;
+ u8 seq_id;
+ u8 ooo_rx_seq_id;
+ __le16 rx_id;
+ struct fcoe_abts_pkt abts_data;
+ __le32 e_d_tov_exp_timeout_val;
+ __le16 ooo_rx_seq_cnt;
+ __le16 reserved1;
+};
+
+struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
+ u8 task_type;
+ u8 dev_type;
+ u8 conf_supported;
+ u8 glbl_q_num;
+ __le32 cid;
+ __le32 fcp_cmd_trns_size;
+ __le32 rsrv;
+};
+
+struct tstorm_fcoe_task_st_ctx {
+ struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
+ struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
+};
+
+struct mstorm_fcoe_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 cleanup_state;
+ __le32 received_bytes;
+ u8 byte3;
+ u8 glbl_q_num;
+ __le16 word1;
+ __le16 tid_to_xfer;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le32 expected_bytes;
+ __le32 reg2;
+};
+
+struct mstorm_fcoe_task_st_ctx {
+ struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
+ struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+};
+
+struct ustorm_fcoe_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 global_cq_num;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+struct fcoe_task_context {
+ struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+ struct tdif_task_context tdif_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct tstorm_fcoe_task_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_st_ctx mstorm_st_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct rdif_task_context rdif_context;
+};
+
+struct fcoe_tx_stat {
+ struct regpair fcoe_tx_byte_cnt;
+ struct regpair fcoe_tx_data_pkt_cnt;
+ struct regpair fcoe_tx_xfer_pkt_cnt;
+ struct regpair fcoe_tx_other_pkt_cnt;
+};
+
+struct fcoe_wqe {
+ __le16 task_id;
+ __le16 flags;
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x7
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 7
+#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
+#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
+#define FCOE_WQE_SUPER_IO_MASK 0x1
+#define FCOE_WQE_SUPER_IO_SHIFT 9
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
+#define FCOE_WQE_RESERVED0_MASK 0x1F
+#define FCOE_WQE_RESERVED0_SHIFT 11
+ union fcoe_additional_info_union additional_info_union;
+};
+
+struct xfrqe_prot_flags {
+ u8 flags;
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
+};
+
+struct fcoe_db_data {
+ u8 params;
+#define FCOE_DB_DATA_DEST_MASK 0x3
+#define FCOE_DB_DATA_DEST_SHIFT 0
+#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
+#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define FCOE_DB_DATA_RESERVED_MASK 0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT 5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 sq_prod;
+};
+#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 8f64b122..4c5747b 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __ISCSI_COMMON__
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 37dfba1..5cd7a46 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_CHAIN_H
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 7a52f7c..4cd1f0c 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_ETH_IF_H
@@ -53,7 +77,7 @@ struct qed_dev_eth_info {
};
struct qed_update_vport_rss_params {
- u16 rss_ind_table[128];
+ void *rss_ind_table[128];
u32 rss_key[10];
u8 rss_caps;
};
@@ -72,6 +96,7 @@ struct qed_update_vport_params {
struct qed_start_vport_params {
bool remove_inner_vlan;
+ bool handle_ptp_pkts;
bool gro_enable;
bool drop_ttl0;
u8 vport_id;
@@ -135,6 +160,15 @@ struct qed_eth_cb_ops {
void (*force_mac) (void *dev, u8 *mac, bool forced);
};
+#define QED_MAX_PHC_DRIFT_PPB 291666666
+
+enum qed_ptp_filter_type {
+ QED_PTP_FILTER_L2,
+ QED_PTP_FILTER_IPV4,
+ QED_PTP_FILTER_IPV4_IPV6,
+ QED_PTP_FILTER_L2_IPV4_IPV6
+};
+
#ifdef CONFIG_DCB
/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
* of dcbnl_rtnl_ops structure.
@@ -194,6 +228,17 @@ struct qed_eth_dcbnl_ops {
};
#endif
+struct qed_eth_ptp_ops {
+ int (*hwtstamp_tx_on)(struct qed_dev *);
+ int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
+ int (*read_rx_ts)(struct qed_dev *, u64 *);
+ int (*read_tx_ts)(struct qed_dev *, u64 *);
+ int (*read_cc)(struct qed_dev *, u64 *);
+ int (*disable)(struct qed_dev *);
+ int (*adjfreq)(struct qed_dev *, s32);
+ int (*enable)(struct qed_dev *);
+};
+
struct qed_eth_ops {
const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
@@ -202,6 +247,7 @@ struct qed_eth_ops {
#ifdef CONFIG_DCB
const struct qed_eth_dcbnl_ops *dcb;
#endif
+ const struct qed_eth_ptp_ops *ptp;
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
new file mode 100644
index 0000000..bd6bcb8
--- /dev/null
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -0,0 +1,145 @@
+#ifndef _QED_FCOE_IF_H
+#define _QED_FCOE_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+struct qed_fcoe_stats {
+ u64 fcoe_rx_byte_cnt;
+ u64 fcoe_rx_data_pkt_cnt;
+ u64 fcoe_rx_xfer_pkt_cnt;
+ u64 fcoe_rx_other_pkt_cnt;
+ u32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ u32 fcoe_silent_drop_pkt_rq_full_cnt;
+ u32 fcoe_silent_drop_pkt_crc_error_cnt;
+ u32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ u32 fcoe_silent_drop_total_pkt_cnt;
+
+ u64 fcoe_tx_byte_cnt;
+ u64 fcoe_tx_data_pkt_cnt;
+ u64 fcoe_tx_xfer_pkt_cnt;
+ u64 fcoe_tx_other_pkt_cnt;
+};
+
+struct qed_dev_fcoe_info {
+ struct qed_dev_info common;
+
+ void __iomem *primary_dbq_rq_addr;
+ void __iomem *secondary_bdq_rq_addr;
+};
+
+struct qed_fcoe_params_offload {
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t sq_curr_page_addr;
+ dma_addr_t sq_next_page_addr;
+
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+
+ u16 tx_max_fc_pay_len;
+ u16 e_d_tov_timer_val;
+ u16 rec_tov_timer_val;
+ u16 rx_max_fc_pay_len;
+ u16 vlan_tag;
+
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+ u8 def_q_idx;
+};
+
+#define MAX_TID_BLOCKS_FCOE (512)
+struct qed_fcoe_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_FCOE];
+};
+
+struct qed_fcoe_cb_ops {
+ struct qed_common_cb_ops common;
+ u32 (*get_login_failures)(void *cookie);
+};
+
+void qed_fcoe_set_pf_params(struct qed_dev *cdev,
+ struct qed_fcoe_pf_params *params);
+
+/**
+ * struct qed_fcoe_ops - qed FCoE operations.
+ * @common: common operations pointer
+ * @fill_dev_info: fills FCoE specific information
+ * @param cdev
+ * @param info
+ * @return 0 on sucesss, otherwise error value.
+ * @register_ops: register FCoE operations
+ * @param cdev
+ * @param ops - specified using qed_iscsi_cb_ops
+ * @param cookie - driver private
+ * @ll2: light L2 operations pointer
+ * @start: fcoe in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: stops fcoe in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new fcoe connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * return 0 on sucesss, otherwise error value.
+ * @release_conn: release a previously acquired fcoe connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param terminate_params
+ * return 0 on success, otherwise error value.
+ * @get_stats: gets FCoE related statistics
+ * @param cdev
+ * @param stats - pointer to struck that would be filled
+ * we stats
+ * return 0 on success, error otherwise.
+ */
+struct qed_fcoe_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_fcoe_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_fcoe_cb_ops *ops, void *cookie);
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_fcoe_params_offload *conn_info);
+ int (*destroy_conn)(struct qed_dev *cdev,
+ u32 handle, dma_addr_t terminate_params);
+
+ int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats);
+};
+
+const struct qed_fcoe_ops *qed_get_fcoe_ops(void);
+void qed_put_fcoe_ops(void);
+#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 4b454f4..fde56c4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1,10 +1,33 @@
/* QLogic qed NIC Driver
- *
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_IF_H
@@ -36,7 +59,6 @@ enum dcbx_protocol_type {
#define QED_ROCE_PROTOCOL_INDEX (3)
-#ifdef CONFIG_DCB
#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
#define QED_LLDP_PORT_ID_STAT_LEN 4
#define QED_DCBX_MAX_APP_PROTOCOL 32
@@ -132,7 +154,6 @@ struct qed_dcbx_get {
struct qed_dcbx_remote_params remote;
struct qed_dcbx_admin_params local;
};
-#endif
enum qed_led_mode {
QED_LED_MODE_OFF,
@@ -159,6 +180,38 @@ struct qed_eth_pf_params {
u16 num_cons;
};
+struct qed_fcoe_pf_params {
+ /* The following parameters are used during protocol-init */
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 sq_num_pbl_pages;
+
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u16 rq_buffer_log_size;
+ u16 mtu;
+ u16 dummy_icid;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 rq_buffer_size;
+ u8 num_cqs; /* num of global CQs */
+ u8 log_page_size;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+};
+
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
@@ -222,6 +275,7 @@ struct qed_rdma_pf_params {
struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
+ struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -282,6 +336,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_FCOE,
};
enum qed_link_mode_bits {
@@ -368,6 +423,7 @@ struct qed_int_info {
struct qed_common_cb_ops {
void (*link_update)(void *dev,
struct qed_link_output *link);
+ void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
};
struct qed_selftest_ops {
@@ -471,6 +527,10 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+ int (*dbg_grc)(struct qed_dev *cdev,
+ void *buffer, u32 *num_dumped_bytes);
+
+ int (*dbg_grc_size)(struct qed_dev *cdev);
int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
index 5a4f8d0..ac2e6a3 100644
--- a/include/linux/qed/qed_iov_if.h
+++ b/include/linux/qed/qed_iov_if.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_IOV_IF_H
@@ -29,6 +53,8 @@ struct qed_iov_hv_ops {
int (*set_rate) (struct qed_dev *cdev, int vfid,
u32 min_rate, u32 max_rate);
+
+ int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust);
};
#endif
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index d279124..f70bb81 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_ISCSI_IF_H
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index fd75c26..4fb4666 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -1,10 +1,33 @@
/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_LL2_IF_H
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
index 53047d3..f742d43 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_roce_if.h
@@ -1,5 +1,5 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index f48d64b..3b8dd55 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -1,5 +1,5 @@
/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 7663725..f773aa5 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __RDMA_COMMON__
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 2eeaf3d..bad02df 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __ROCE_COMMON__
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 3b8e1ef..03f3e37 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __STORAGE_COMMON__
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dc3889d..46fe785 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __TCP_COMMON__
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 5dea8f6..52bda85 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -306,7 +306,9 @@ void radix_tree_iter_replace(struct radix_tree_root *,
void radix_tree_replace_slot(struct radix_tree_root *root,
void **slot, void *item);
void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node);
+ struct radix_tree_node *node,
+ radix_tree_update_node_t update_node,
+ void *private);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
void radix_tree_clear_tags(struct radix_tree_root *root,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed..6ade6a5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
#error "Unknown RCU implementation specified to kernel configuration"
#endif
+#define RCU_SCHEDULER_INACTIVE 0
+#define RCU_SCHEDULER_INIT 1
+#define RCU_SCHEDULER_RUNNING 2
+
/*
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
* initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -1157,5 +1161,17 @@ do { \
ftrace_dump(oops_dump_mode); \
} while (0)
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifdef CONFIG_PPC
+#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
+#else /* #ifdef CONFIG_PPC */
+#define smp_mb__after_unlock_lock() do { } while (0)
+#endif /* #else #ifdef CONFIG_PPC */
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index ac81e40..4f9b2fa 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,6 +27,12 @@
#include <linux/cache.h>
+struct rcu_dynticks;
+static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
+{
+ return 0;
+}
+
static inline unsigned long get_state_synchronize_rcu(void)
{
return 0;
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
new file mode 100644
index 0000000..a4ede51
--- /dev/null
+++ b/include/linux/rcuwait.h
@@ -0,0 +1,63 @@
+#ifndef _LINUX_RCUWAIT_H_
+#define _LINUX_RCUWAIT_H_
+
+#include <linux/rcupdate.h>
+
+/*
+ * rcuwait provides a way of blocking and waking up a single
+ * task in an rcu-safe manner; where it is forbidden to use
+ * after exit_notify(). task_struct is not properly rcu protected,
+ * unless dealing with rcu-aware lists, ie: find_task_by_*().
+ *
+ * Alternatively we have task_rcu_dereference(), but the return
+ * semantics have different implications which would break the
+ * wakeup side. The only time @task is non-nil is when a user is
+ * blocked (or checking if it needs to) on a condition, and reset
+ * as soon as we know that the condition has succeeded and are
+ * awoken.
+ */
+struct rcuwait {
+ struct task_struct *task;
+};
+
+#define __RCUWAIT_INITIALIZER(name) \
+ { .task = NULL, }
+
+static inline void rcuwait_init(struct rcuwait *w)
+{
+ w->task = NULL;
+}
+
+extern void rcuwait_wake_up(struct rcuwait *w);
+
+/*
+ * The caller is responsible for locking around rcuwait_wait_event(),
+ * such that writes to @task are properly serialized.
+ */
+#define rcuwait_wait_event(w, condition) \
+({ \
+ /* \
+ * Complain if we are called after do_exit()/exit_notify(), \
+ * as we cannot rely on the rcu critical region for the \
+ * wakeup side. \
+ */ \
+ WARN_ON(current->exit_state); \
+ \
+ rcu_assign_pointer((w)->task, current); \
+ for (;;) { \
+ /* \
+ * Implicit barrier (A) pairs with (B) in \
+ * rcuwait_wake_up(). \
+ */ \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ \
+ schedule(); \
+ } \
+ \
+ WRITE_ONCE((w)->task, NULL); \
+ __set_current_state(TASK_RUNNING); \
+})
+
+#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644
index 0000000..600aadf
--- /dev/null
+++ b/include/linux/refcount.h
@@ -0,0 +1,294 @@
+#ifndef _LINUX_REFCOUNT_H
+#define _LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_REFCOUNT
+#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
+#define __refcount_check __must_check
+#else
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#endif
+
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+ atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+ return atomic_read(&r->refs);
+}
+
+static inline __refcount_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (!val)
+ return false;
+
+ if (unlikely(val == UINT_MAX))
+ return true;
+
+ new = val + i;
+ if (new < val)
+ new = UINT_MAX;
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+ REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ new = val + 1;
+
+ if (!val)
+ return false;
+
+ if (unlikely(!new))
+ return true;
+
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+ REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return false;
+
+ new = val - i;
+ if (new > val) {
+ REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return false;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_sub_and_test(1, r);
+}
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+static inline
+void refcount_dec(refcount_t *r)
+{
+ REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+static inline __refcount_check
+bool refcount_dec_if_one(refcount_t *r)
+{
+ return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+static inline __refcount_check
+bool refcount_dec_not_one(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return true;
+
+ if (val == 1)
+ return false;
+
+ new = val - 1;
+ if (new > val) {
+ REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return true;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return true;
+}
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ mutex_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ mutex_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+
+#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f667313..e886492 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -40,12 +40,13 @@ enum regcache_type {
};
/**
- * Default value for a register. We use an array of structs rather
- * than a simple array as many modern devices have very sparse
- * register maps.
+ * struct reg_default - Default value for a register.
*
* @reg: Register address.
* @def: Register default value.
+ *
+ * We use an array of structs rather than a simple array as many modern devices
+ * have very sparse register maps.
*/
struct reg_default {
unsigned int reg;
@@ -53,12 +54,14 @@ struct reg_default {
};
/**
- * Register/value pairs for sequences of writes with an optional delay in
- * microseconds to be applied after each write.
+ * struct reg_sequence - An individual write from a sequence of writes.
*
* @reg: Register address.
* @def: Register value.
* @delay_us: Delay to be applied after the register write in microseconds
+ *
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
*/
struct reg_sequence {
unsigned int reg;
@@ -98,6 +101,7 @@ struct reg_sequence {
/**
* regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
+ *
* @map: Regmap to read from
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
@@ -146,8 +150,8 @@ enum regmap_endian {
};
/**
- * A register range, used for access related checks
- * (readable/writeable/volatile/precious checks)
+ * struct regmap_range - A register range, used for access related checks
+ * (readable/writeable/volatile/precious checks)
*
* @range_min: address of first register
* @range_max: address of last register
@@ -159,16 +163,18 @@ struct regmap_range {
#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
-/*
- * A table of ranges including some yes ranges and some no ranges.
- * If a register belongs to a no_range, the corresponding check function
- * will return false. If a register belongs to a yes range, the corresponding
- * check function will return true. "no_ranges" are searched first.
+/**
+ * struct regmap_access_table - A table of register ranges for access checks
*
* @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
* @n_yes_ranges: size of the above array
* @no_ranges: pointer to an array of regmap ranges used as "no ranges"
* @n_no_ranges: size of the above array
+ *
+ * A table of ranges including some yes ranges and some no ranges.
+ * If a register belongs to a no_range, the corresponding check function
+ * will return false. If a register belongs to a yes range, the corresponding
+ * check function will return true. "no_ranges" are searched first.
*/
struct regmap_access_table {
const struct regmap_range *yes_ranges;
@@ -181,7 +187,7 @@ typedef void (*regmap_lock)(void *);
typedef void (*regmap_unlock)(void *);
/**
- * Configuration for the register map of a device.
+ * struct regmap_config - Configuration for the register map of a device.
*
* @name: Optional name of the regmap. Useful when a device has multiple
* register regions.
@@ -314,22 +320,24 @@ struct regmap_config {
};
/**
- * Configuration for indirectly accessed or paged registers.
- * Registers, mapped to this virtual range, are accessed in two steps:
- * 1. page selector register update;
- * 2. access through data window registers.
+ * struct regmap_range_cfg - Configuration for indirectly accessed or paged
+ * registers.
*
* @name: Descriptive name for diagnostics
*
* @range_min: Address of the lowest register address in virtual range.
* @range_max: Address of the highest register in virtual range.
*
- * @page_sel_reg: Register with selector field.
- * @page_sel_mask: Bit shift for selector value.
- * @page_sel_shift: Bit mask for selector value.
+ * @selector_reg: Register with selector field.
+ * @selector_mask: Bit shift for selector value.
+ * @selector_shift: Bit mask for selector value.
*
* @window_start: Address of first (lowest) register in data window.
* @window_len: Number of registers in data window.
+ *
+ * Registers, mapped to this virtual range, are accessed in two steps:
+ * 1. page selector register update;
+ * 2. access through data window registers.
*/
struct regmap_range_cfg {
const char *name;
@@ -372,7 +380,8 @@ typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
typedef void (*regmap_hw_free_context)(void *context);
/**
- * Description of a hardware bus for the register map infrastructure.
+ * struct regmap_bus - Description of a hardware bus for the register map
+ * infrastructure.
*
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
@@ -385,6 +394,10 @@ typedef void (*regmap_hw_free_context)(void *context);
* must serialise with respect to non-async I/O.
* @reg_write: Write a single register value to the given register address. This
* write operation has to complete when returning from the function.
+ * @reg_update_bits: Update bits operation to be used against volatile
+ * registers, intended for devices supporting some mechanism
+ * for setting clearing bits without having to
+ * read/modify/write.
* @read: Read operation. Data is returned in the buffer used to transmit
* data.
* @reg_read: Read a single register value from a given register address.
@@ -514,7 +527,7 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
#endif
/**
- * regmap_init(): Initialise register map
+ * regmap_init() - Initialise register map
*
* @dev: Device that will be interacted with
* @bus: Bus-specific callbacks to use with device
@@ -532,7 +545,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
const struct regmap_config *config);
/**
- * regmap_init_i2c(): Initialise register map
+ * regmap_init_i2c() - Initialise register map
*
* @i2c: Device that will be interacted with
* @config: Configuration for register map
@@ -545,9 +558,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
- * regmap_init_spi(): Initialise register map
+ * regmap_init_spi() - Initialise register map
*
- * @spi: Device that will be interacted with
+ * @dev: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
@@ -558,8 +571,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
dev, config)
/**
- * regmap_init_spmi_base(): Create regmap for the Base register space
- * @sdev: SPMI device that will be interacted with
+ * regmap_init_spmi_base() - Create regmap for the Base register space
+ *
+ * @dev: SPMI device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
@@ -570,8 +584,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
dev, config)
/**
- * regmap_init_spmi_ext(): Create regmap for Ext register space
- * @sdev: Device that will be interacted with
+ * regmap_init_spmi_ext() - Create regmap for Ext register space
+ *
+ * @dev: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
@@ -582,7 +597,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
dev, config)
/**
- * regmap_init_mmio_clk(): Initialise register map with register clock
+ * regmap_init_mmio_clk() - Initialise register map with register clock
*
* @dev: Device that will be interacted with
* @clk_id: register clock consumer ID
@@ -597,7 +612,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
dev, clk_id, regs, config)
/**
- * regmap_init_mmio(): Initialise register map
+ * regmap_init_mmio() - Initialise register map
*
* @dev: Device that will be interacted with
* @regs: Pointer to memory-mapped IO region
@@ -610,7 +625,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
regmap_init_mmio_clk(dev, NULL, regs, config)
/**
- * regmap_init_ac97(): Initialise AC'97 register map
+ * regmap_init_ac97() - Initialise AC'97 register map
*
* @ac97: Device that will be interacted with
* @config: Configuration for register map
@@ -624,7 +639,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
- * devm_regmap_init(): Initialise managed register map
+ * devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
* @bus: Bus-specific callbacks to use with device
@@ -641,7 +656,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
dev, bus, bus_context, config)
/**
- * devm_regmap_init_i2c(): Initialise managed register map
+ * devm_regmap_init_i2c() - Initialise managed register map
*
* @i2c: Device that will be interacted with
* @config: Configuration for register map
@@ -655,9 +670,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
- * devm_regmap_init_spi(): Initialise register map
+ * devm_regmap_init_spi() - Initialise register map
*
- * @spi: Device that will be interacted with
+ * @dev: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
@@ -669,8 +684,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
dev, config)
/**
- * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
- * @sdev: SPMI device that will be interacted with
+ * devm_regmap_init_spmi_base() - Create managed regmap for Base register space
+ *
+ * @dev: SPMI device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
@@ -682,8 +698,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
dev, config)
/**
- * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
- * @sdev: SPMI device that will be interacted with
+ * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space
+ *
+ * @dev: SPMI device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
@@ -695,7 +712,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
dev, config)
/**
- * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
+ * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
*
* @dev: Device that will be interacted with
* @clk_id: register clock consumer ID
@@ -711,7 +728,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
dev, clk_id, regs, config)
/**
- * devm_regmap_init_mmio(): Initialise managed register map
+ * devm_regmap_init_mmio() - Initialise managed register map
*
* @dev: Device that will be interacted with
* @regs: Pointer to memory-mapped IO region
@@ -725,7 +742,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
devm_regmap_init_mmio_clk(dev, NULL, regs, config)
/**
- * devm_regmap_init_ac97(): Initialise AC'97 register map
+ * devm_regmap_init_ac97() - Initialise AC'97 register map
*
* @ac97: Device that will be interacted with
* @config: Configuration for register map
@@ -800,7 +817,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
unsigned int nranges);
/**
- * Description of an register field
+ * struct reg_field - Description of an register field
*
* @reg: Offset of the register within the regmap bank
* @lsb: lsb of the register field.
@@ -841,7 +858,7 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
bool *change, bool async, bool force);
/**
- * Description of an IRQ for the generic regmap irq_chip.
+ * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
*
* @reg_offset: Offset of the status/mask register within the bank
* @mask: Mask used to flag/control the register.
@@ -861,9 +878,7 @@ struct regmap_irq {
[_irq] = { .reg_offset = (_off), .mask = (_mask) }
/**
- * Description of a generic regmap irq_chip. This is not intended to
- * handle every possible interrupt controller, but it should handle a
- * substantial proportion of those that are found in the wild.
+ * struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
*
@@ -897,6 +912,10 @@ struct regmap_irq {
* after handling the interrupts in regmap_irq_handler().
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when
* driver specific pre/post interrupt handler is called.
+ *
+ * This is not intended to handle every possible interrupt controller, but
+ * it should handle a substantial proportion of those that are found in the
+ * wild.
*/
struct regmap_irq_chip {
const char *name;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e2f3a32..8265d35 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -408,7 +408,8 @@ enum rproc_crash_type {
* @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
- * @table_ptr: our copy of the resource table
+ * @table_ptr: pointer to the resource table in effect
+ * @cached_table: copy of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
@@ -440,6 +441,7 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
+ struct resource_table *cached_table;
bool has_iommu;
bool auto_boot;
};
diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h
deleted file mode 100644
index aca36bc..0000000
--- a/include/linux/rfkill-regulator.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * rfkill-regulator.c - Regulator consumer driver for rfkill
- *
- * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
- * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __LINUX_RFKILL_REGULATOR_H
-#define __LINUX_RFKILL_REGULATOR_H
-
-/*
- * Use "vrfkill" as supply id when declaring the regulator consumer:
- *
- * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = {
- * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" },
- * };
- *
- * If you have several regulator driven rfkill, you can append a numerical id to
- * .dev_name as done above, and use the same id when declaring the platform
- * device:
- *
- * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = {
- * .name = "ezx-bluetooth",
- * .type = RFKILL_TYPE_BLUETOOTH,
- * };
- *
- * static struct platform_device a910_rfkill = {
- * .name = "rfkill-regulator",
- * .id = 0,
- * .dev = {
- * .platform_data = &ezx_rfkill_bt_data,
- * },
- * };
- */
-
-#include <linux/rfkill.h>
-
-struct rfkill_regulator_platform_data {
- char *name; /* the name for the rfkill switch */
- enum rfkill_type type; /* the type as specified in rfkill.h */
-};
-
-#endif /* __LINUX_RFKILL_REGULATOR_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5c132d3..f2e12a8 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -61,6 +61,7 @@ struct rhlist_head {
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
+ * @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[]
@@ -68,10 +69,12 @@ struct rhlist_head {
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
+ * @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets
*/
struct bucket_table {
unsigned int size;
+ unsigned int nest;
unsigned int rehash;
u32 hash_rnd;
unsigned int locks_mask;
@@ -81,7 +84,7 @@ struct bucket_table {
struct bucket_table __rcu *future_tbl;
- struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+ struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/**
@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
+struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash);
+
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+static inline struct rhash_head __rcu *const *rht_bucket(
+ const struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_var(
+ struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
+ &tbl->buckets[hash];
+}
+
/**
* rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor.
@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
- rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+ rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/**
* rht_for_each_entry_continue - continue iterating over hash chain
@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member)
/**
@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
-#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = next, \
- next = !rht_is_a_nulls(pos) ? \
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/**
@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_rcu(pos, tbl, hash) \
- rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+ rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member)
/**
@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- const struct bucket_table *tbl;
+ struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -697,8 +727,12 @@ slow_path:
}
elasticity = ht->elasticity;
- pprev = &tbl->buckets[hash];
- rht_for_each(head, tbl, hash) {
+ pprev = rht_bucket_insert(ht, tbl, hash);
+ data = ERR_PTR(-ENOMEM);
+ if (!pprev)
+ goto out;
+
+ rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;
@@ -736,7 +770,7 @@ slow_path:
if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path;
- head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+ head = rht_dereference_bucket(*pprev, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
@@ -746,7 +780,7 @@ slow_path:
RCU_INIT_POINTER(list->next, NULL);
}
- rcu_assign_pointer(tbl->buckets[hash], obj);
+ rcu_assign_pointer(*pprev, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(he, *pprev, tbl, hash) {
struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead);
@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(he, *pprev, tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index f017fd6..d4e0a20 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -259,6 +259,26 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
unsigned int sbitmap_weight(const struct sbitmap *sb);
/**
+ * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
+ * @sb: Bitmap to show.
+ * @m: struct seq_file to write to.
+ *
+ * This is intended for debugging. The format may change at any time.
+ */
+void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
+
+/**
+ * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
+ * seq_file.
+ * @sb: Bitmap to show.
+ * @m: struct seq_file to write to.
+ *
+ * This is intended for debugging. The output isn't guaranteed to be internally
+ * consistent.
+ */
+void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
+
+/**
* sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
* memory node.
* @sbq: Bitmap queue to initialize.
@@ -370,4 +390,14 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
*/
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
+/**
+ * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
+ * seq_file.
+ * @sbq: Bitmap queue to show.
+ * @m: struct seq_file to write to.
+ *
+ * This is intended for debugging. The format may change at any time.
+ */
+void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
+
#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d19052..c8e519d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -29,7 +29,6 @@ struct sched_param {
#include <asm/page.h>
#include <asm/ptrace.h>
-#include <linux/cputime.h>
#include <linux/smp.h>
#include <linux/sem.h>
@@ -227,7 +226,7 @@ extern void proc_sched_set_task(struct task_struct *p);
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
-/* Convenience macros for the sake of set_task_state */
+/* Convenience macros for the sake of set_current_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
@@ -254,17 +253,6 @@ extern char ___assert_task_state[1 - 2*!!(
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-#define __set_task_state(tsk, state_value) \
- do { \
- (tsk)->task_state_change = _THIS_IP_; \
- (tsk)->state = (state_value); \
- } while (0)
-#define set_task_state(tsk, state_value) \
- do { \
- (tsk)->task_state_change = _THIS_IP_; \
- smp_store_mb((tsk)->state, (state_value)); \
- } while (0)
-
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
@@ -277,20 +265,6 @@ extern char ___assert_task_state[1 - 2*!!(
} while (0)
#else
-
-/*
- * @tsk had better be current, or you get to keep the pieces.
- *
- * The only reason is that computing current can be more expensive than
- * using a pointer that's already available.
- *
- * Therefore, see set_current_state().
- */
-#define __set_task_state(tsk, state_value) \
- do { (tsk)->state = (state_value); } while (0)
-#define set_task_state(tsk, state_value) \
- smp_store_mb((tsk)->state, (state_value))
-
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
@@ -461,12 +435,10 @@ extern signed long schedule_timeout_idle(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
+extern int __must_check io_schedule_prepare(void);
+extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
-
-static inline void io_schedule(void)
-{
- io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
-}
+extern void io_schedule(void);
void __noreturn do_task_dead(void);
@@ -565,15 +537,13 @@ struct pacct_struct {
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
- cputime_t ac_utime, ac_stime;
+ u64 ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};
struct cpu_itimer {
- cputime_t expires;
- cputime_t incr;
- u32 error;
- u32 incr_error;
+ u64 expires;
+ u64 incr;
};
/**
@@ -587,8 +557,8 @@ struct cpu_itimer {
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- cputime_t utime;
- cputime_t stime;
+ u64 utime;
+ u64 stime;
raw_spinlock_t lock;
#endif
};
@@ -603,8 +573,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
/**
* struct task_cputime - collected CPU time counts
- * @utime: time spent in user mode, in &cputime_t units
- * @stime: time spent in kernel mode, in &cputime_t units
+ * @utime: time spent in user mode, in nanoseconds
+ * @stime: time spent in kernel mode, in nanoseconds
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are tracked for
@@ -612,8 +582,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
* these counts together and treat all three of them in parallel.
*/
struct task_cputime {
- cputime_t utime;
- cputime_t stime;
+ u64 utime;
+ u64 stime;
unsigned long long sum_exec_runtime;
};
@@ -622,13 +592,6 @@ struct task_cputime {
#define prof_exp stime
#define sched_exp sum_exec_runtime
-#define INIT_CPUTIME \
- (struct task_cputime) { \
- .utime = 0, \
- .stime = 0, \
- .sum_exec_runtime = 0, \
- }
-
/*
* This is the atomic variant of task_cputime, which can be used for
* storing and updating task_cputime statistics without locking.
@@ -734,13 +697,14 @@ struct signal_struct {
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
+#ifdef CONFIG_POSIX_TIMERS
+
/* POSIX.1b Interval Timers */
int posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
- struct pid *leader_pid;
ktime_t it_real_incr;
/*
@@ -759,12 +723,16 @@ struct signal_struct {
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+#endif
+
+ struct pid *leader_pid;
+
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
#endif
- struct list_head cpu_timers[3];
-
struct pid *tty_old_pgrp;
/* boolean value for session group leader */
@@ -782,9 +750,9 @@ struct signal_struct {
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
- cputime_t utime, stime, cutime, cstime;
- cputime_t gtime;
- cputime_t cgtime;
+ u64 utime, stime, cutime, cstime;
+ u64 gtime;
+ u64 cgtime;
struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
@@ -854,6 +822,16 @@ struct signal_struct {
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+ SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+ unsigned int flags)
+{
+ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
@@ -1015,8 +993,8 @@ enum cpu_idle_type {
*
* The DEFINE_WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
- * called near the end of a function, where the fact that the queue is
- * not used again will be easy to see by inspection.
+ * called near the end of a function. Otherwise, the list can be
+ * re-initialized for later re-use by wake_q_init().
*
* Note that this can cause spurious wakeups. schedule() callers
* must ensure the call is done inside a loop, confirming that the
@@ -1036,6 +1014,12 @@ struct wake_q_head {
#define DEFINE_WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+static inline void wake_q_init(struct wake_q_head *head)
+{
+ head->first = WAKE_Q_TAIL;
+ head->lastp = &head->first;
+}
+
extern void wake_q_add(struct wake_q_head *head,
struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
@@ -1653,11 +1637,11 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- cputime_t utime, stime;
+ u64 utime, stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
- cputime_t utimescaled, stimescaled;
+ u64 utimescaled, stimescaled;
#endif
- cputime_t gtime;
+ u64 gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_t vtime_seqcount;
@@ -1681,8 +1665,10 @@ struct task_struct {
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
+#ifdef CONFIG_POSIX_TIMERS
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
+#endif
/* process credentials */
const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
@@ -1807,7 +1793,7 @@ struct task_struct {
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
- cputime_t acct_timexpd; /* stime + utime since last update */
+ u64 acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
@@ -2252,17 +2238,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
- cputime_t *utime, cputime_t *stime);
-extern cputime_t task_gtime(struct task_struct *t);
+ u64 *utime, u64 *stime);
+extern u64 task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
- cputime_t *utime, cputime_t *stime)
+ u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
}
-static inline cputime_t task_gtime(struct task_struct *t)
+static inline u64 task_gtime(struct task_struct *t)
{
return t->gtime;
}
@@ -2270,23 +2256,23 @@ static inline cputime_t task_gtime(struct task_struct *t)
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled,
- cputime_t *stimescaled)
+ u64 *utimescaled,
+ u64 *stimescaled)
{
*utimescaled = t->utimescaled;
*stimescaled = t->stimescaled;
}
#else
static inline void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled,
- cputime_t *stimescaled)
+ u64 *utimescaled,
+ u64 *stimescaled)
{
task_cputime(t, utimescaled, stimescaled);
}
#endif
-extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
-extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
+extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
/*
* Per process flags
@@ -2505,10 +2491,18 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_init_late(void)
+{
+}
+
static inline void sched_clock_tick(void)
{
}
+static inline void clear_sched_clock_stable(void)
+{
+}
+
static inline void sched_clock_idle_sleep_event(void)
{
}
@@ -2527,6 +2521,7 @@ static inline u64 local_clock(void)
return sched_clock();
}
#else
+extern void sched_clock_init_late(void);
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
@@ -2534,7 +2529,6 @@ static inline u64 local_clock(void)
* is reliable after all:
*/
extern int sched_clock_stable(void);
-extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 4411453..49308e1 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
extern unsigned int sysctl_sched_autogroup_enabled;
#endif
+extern int sysctl_sched_rr_timeslice;
extern int sched_rr_timeslice;
extern int sched_rr_handler(struct ctl_table *table, int write,
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index fcb4c36..7a4804c 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -62,7 +62,7 @@ typedef struct sctphdr {
__be16 dest;
__be32 vtag;
__le32 checksum;
-} __packed sctp_sctphdr_t;
+} sctp_sctphdr_t;
static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
{
@@ -74,7 +74,7 @@ typedef struct sctp_chunkhdr {
__u8 type;
__u8 flags;
__be16 length;
-} __packed sctp_chunkhdr_t;
+} sctp_chunkhdr_t;
/* Section 3.2. Chunk Type Values.
@@ -108,6 +108,7 @@ typedef enum {
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
SCTP_CID_ASCONF_ACK = 0x80,
+ SCTP_CID_RECONF = 0x82,
} sctp_cid_t; /* enum */
@@ -164,7 +165,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 };
typedef struct sctp_paramhdr {
__be16 type;
__be16 length;
-} __packed sctp_paramhdr_t;
+} sctp_paramhdr_t;
typedef enum {
@@ -199,6 +200,13 @@ typedef enum {
SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
+ /* RE-CONFIG. Section 4 */
+ SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d),
+ SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e),
+ SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f),
+ SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010),
+ SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011),
+ SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012),
} sctp_param_t; /* enum */
@@ -225,12 +233,12 @@ typedef struct sctp_datahdr {
__be16 ssn;
__be32 ppid;
__u8 payload[0];
-} __packed sctp_datahdr_t;
+} sctp_datahdr_t;
typedef struct sctp_data_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_datahdr_t data_hdr;
-} __packed sctp_data_chunk_t;
+} sctp_data_chunk_t;
/* DATA Chuck Specific Flags */
enum {
@@ -256,78 +264,78 @@ typedef struct sctp_inithdr {
__be16 num_inbound_streams;
__be32 initial_tsn;
__u8 params[0];
-} __packed sctp_inithdr_t;
+} sctp_inithdr_t;
typedef struct sctp_init_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_inithdr_t init_hdr;
-} __packed sctp_init_chunk_t;
+} sctp_init_chunk_t;
/* Section 3.3.2.1. IPv4 Address Parameter (5) */
typedef struct sctp_ipv4addr_param {
sctp_paramhdr_t param_hdr;
struct in_addr addr;
-} __packed sctp_ipv4addr_param_t;
+} sctp_ipv4addr_param_t;
/* Section 3.3.2.1. IPv6 Address Parameter (6) */
typedef struct sctp_ipv6addr_param {
sctp_paramhdr_t param_hdr;
struct in6_addr addr;
-} __packed sctp_ipv6addr_param_t;
+} sctp_ipv6addr_param_t;
/* Section 3.3.2.1 Cookie Preservative (9) */
typedef struct sctp_cookie_preserve_param {
sctp_paramhdr_t param_hdr;
__be32 lifespan_increment;
-} __packed sctp_cookie_preserve_param_t;
+} sctp_cookie_preserve_param_t;
/* Section 3.3.2.1 Host Name Address (11) */
typedef struct sctp_hostname_param {
sctp_paramhdr_t param_hdr;
uint8_t hostname[0];
-} __packed sctp_hostname_param_t;
+} sctp_hostname_param_t;
/* Section 3.3.2.1 Supported Address Types (12) */
typedef struct sctp_supported_addrs_param {
sctp_paramhdr_t param_hdr;
__be16 types[0];
-} __packed sctp_supported_addrs_param_t;
+} sctp_supported_addrs_param_t;
/* Appendix A. ECN Capable (32768) */
typedef struct sctp_ecn_capable_param {
sctp_paramhdr_t param_hdr;
-} __packed sctp_ecn_capable_param_t;
+} sctp_ecn_capable_param_t;
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
typedef struct sctp_adaptation_ind_param {
struct sctp_paramhdr param_hdr;
__be32 adaptation_ind;
-} __packed sctp_adaptation_ind_param_t;
+} sctp_adaptation_ind_param_t;
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
typedef struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} __packed sctp_supported_ext_param_t;
+} sctp_supported_ext_param_t;
/* AUTH Section 3.1 Random */
typedef struct sctp_random_param {
sctp_paramhdr_t param_hdr;
__u8 random_val[0];
-} __packed sctp_random_param_t;
+} sctp_random_param_t;
/* AUTH Section 3.2 Chunk List */
typedef struct sctp_chunks_param {
sctp_paramhdr_t param_hdr;
__u8 chunks[0];
-} __packed sctp_chunks_param_t;
+} sctp_chunks_param_t;
/* AUTH Section 3.3 HMAC Algorithm */
typedef struct sctp_hmac_algo_param {
sctp_paramhdr_t param_hdr;
__be16 hmac_ids[0];
-} __packed sctp_hmac_algo_param_t;
+} sctp_hmac_algo_param_t;
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
* The INIT ACK chunk is used to acknowledge the initiation of an SCTP
@@ -339,13 +347,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t;
typedef struct sctp_cookie_param {
sctp_paramhdr_t p;
__u8 body[0];
-} __packed sctp_cookie_param_t;
+} sctp_cookie_param_t;
/* Section 3.3.3.1 Unrecognized Parameters (8) */
typedef struct sctp_unrecognized_param {
sctp_paramhdr_t param_hdr;
sctp_paramhdr_t unrecognized;
-} __packed sctp_unrecognized_param_t;
+} sctp_unrecognized_param_t;
@@ -360,7 +368,7 @@ typedef struct sctp_unrecognized_param {
typedef struct sctp_gap_ack_block {
__be16 start;
__be16 end;
-} __packed sctp_gap_ack_block_t;
+} sctp_gap_ack_block_t;
typedef __be32 sctp_dup_tsn_t;
@@ -375,12 +383,12 @@ typedef struct sctp_sackhdr {
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
sctp_sack_variable_t variable[0];
-} __packed sctp_sackhdr_t;
+} sctp_sackhdr_t;
typedef struct sctp_sack_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_sackhdr_t sack_hdr;
-} __packed sctp_sack_chunk_t;
+} sctp_sack_chunk_t;
/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -392,12 +400,12 @@ typedef struct sctp_sack_chunk {
typedef struct sctp_heartbeathdr {
sctp_paramhdr_t info;
-} __packed sctp_heartbeathdr_t;
+} sctp_heartbeathdr_t;
typedef struct sctp_heartbeat_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_heartbeathdr_t hb_hdr;
-} __packed sctp_heartbeat_chunk_t;
+} sctp_heartbeat_chunk_t;
/* For the abort and shutdown ACK we must carry the init tag in the
@@ -406,7 +414,7 @@ typedef struct sctp_heartbeat_chunk {
*/
typedef struct sctp_abort_chunk {
sctp_chunkhdr_t uh;
-} __packed sctp_abort_chunk_t;
+} sctp_abort_chunk_t;
/* For the graceful shutdown we must carry the tag (in common header)
@@ -414,12 +422,12 @@ typedef struct sctp_abort_chunk {
*/
typedef struct sctp_shutdownhdr {
__be32 cum_tsn_ack;
-} __packed sctp_shutdownhdr_t;
+} sctp_shutdownhdr_t;
struct sctp_shutdown_chunk_t {
sctp_chunkhdr_t chunk_hdr;
sctp_shutdownhdr_t shutdown_hdr;
-} __packed;
+};
/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
@@ -427,12 +435,12 @@ typedef struct sctp_errhdr {
__be16 cause;
__be16 length;
__u8 variable[0];
-} __packed sctp_errhdr_t;
+} sctp_errhdr_t;
typedef struct sctp_operr_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_errhdr_t err_hdr;
-} __packed sctp_operr_chunk_t;
+} sctp_operr_chunk_t;
/* RFC 2960 3.3.10 - Operation Error
*
@@ -522,7 +530,7 @@ typedef struct sctp_ecnehdr {
typedef struct sctp_ecne_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_ecnehdr_t ence_hdr;
-} __packed sctp_ecne_chunk_t;
+} sctp_ecne_chunk_t;
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Congestion Window Reduced (CWR) (13)
@@ -534,7 +542,7 @@ typedef struct sctp_cwrhdr {
typedef struct sctp_cwr_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_cwrhdr_t cwr_hdr;
-} __packed sctp_cwr_chunk_t;
+} sctp_cwr_chunk_t;
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -585,17 +593,17 @@ typedef struct sctp_cwr_chunk {
struct sctp_fwdtsn_skip {
__be16 stream;
__be16 ssn;
-} __packed;
+};
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
struct sctp_fwdtsn_skip skip[0];
-} __packed;
+};
struct sctp_fwdtsn_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_fwdtsn_hdr fwdtsn_hdr;
-} __packed;
+};
/* ADDIP
@@ -633,17 +641,17 @@ struct sctp_fwdtsn_chunk {
typedef struct sctp_addip_param {
sctp_paramhdr_t param_hdr;
__be32 crr_id;
-} __packed sctp_addip_param_t;
+} sctp_addip_param_t;
typedef struct sctp_addiphdr {
__be32 serial;
__u8 params[0];
-} __packed sctp_addiphdr_t;
+} sctp_addiphdr_t;
typedef struct sctp_addip_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_addiphdr_t addip_hdr;
-} __packed sctp_addip_chunk_t;
+} sctp_addip_chunk_t;
/* AUTH
* Section 4.1 Authentication Chunk (AUTH)
@@ -698,16 +706,71 @@ typedef struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
__u8 hmac[0];
-} __packed sctp_authhdr_t;
+} sctp_authhdr_t;
typedef struct sctp_auth_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_authhdr_t auth_hdr;
-} __packed sctp_auth_chunk_t;
+} sctp_auth_chunk_t;
struct sctp_infox {
struct sctp_info *sctpinfo;
struct sctp_association *asoc;
};
+struct sctp_reconf_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ __u8 params[0];
+};
+
+struct sctp_strreset_outreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u32 response_seq;
+ __u32 send_reset_at_tsn;
+ __u16 list_of_streams[0];
+};
+
+struct sctp_strreset_inreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u16 list_of_streams[0];
+};
+
+struct sctp_strreset_tsnreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+};
+
+struct sctp_strreset_addstrm {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u16 number_of_streams;
+ __u16 reserved;
+};
+
+enum {
+ SCTP_STRRESET_NOTHING_TO_DO = 0x00,
+ SCTP_STRRESET_PERFORMED = 0x01,
+ SCTP_STRRESET_DENIED = 0x02,
+ SCTP_STRRESET_ERR_WRONG_SSN = 0x03,
+ SCTP_STRRESET_ERR_IN_PROGRESS = 0x04,
+ SCTP_STRRESET_ERR_BAD_SEQNO = 0x05,
+ SCTP_STRRESET_IN_PROGRESS = 0x06,
+};
+
+struct sctp_strreset_resp {
+ sctp_paramhdr_t param_hdr;
+ __u32 response_seq;
+ __u32 result;
+};
+
+struct sctp_strreset_resptsn {
+ sctp_paramhdr_t param_hdr;
+ __u32 response_seq;
+ __u32 result;
+ __u32 senders_next_tsn;
+ __u32 receivers_next_tsn;
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index c2125e90..d3868f2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -332,7 +332,6 @@ int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
int security_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid);
-int security_task_wait(struct task_struct *p);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
@@ -361,7 +360,7 @@ int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
int security_getprocattr(struct task_struct *p, char *name, char **value);
-int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size);
+int security_setprocattr(const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -980,11 +979,6 @@ static inline int security_task_kill(struct task_struct *p,
return 0;
}
-static inline int security_task_wait(struct task_struct *p)
-{
- return 0;
-}
-
static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
@@ -1106,7 +1100,7 @@ static inline int security_getprocattr(struct task_struct *p, char *name, char *
return -EINVAL;
}
-static inline int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size)
+static inline int security_setprocattr(char *name, void *value, size_t size)
{
return -EINVAL;
}
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
new file mode 100644
index 0000000..deee23d
--- /dev/null
+++ b/include/linux/sed-opal.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Authors:
+ * Rafael Antognolli <rafael.antognolli@intel.com>
+ * Scott Bauer <scott.bauer@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef LINUX_OPAL_H
+#define LINUX_OPAL_H
+
+#include <uapi/linux/sed-opal.h>
+#include <linux/kernel.h>
+
+struct opal_dev;
+
+typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer,
+ size_t len, bool send);
+
+#ifdef CONFIG_BLK_SED_OPAL
+bool opal_unlock_from_suspend(struct opal_dev *dev);
+struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
+int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
+
+static inline bool is_sed_ioctl(unsigned int cmd)
+{
+ switch (cmd) {
+ case IOC_OPAL_SAVE:
+ case IOC_OPAL_LOCK_UNLOCK:
+ case IOC_OPAL_TAKE_OWNERSHIP:
+ case IOC_OPAL_ACTIVATE_LSP:
+ case IOC_OPAL_SET_PW:
+ case IOC_OPAL_ACTIVATE_USR:
+ case IOC_OPAL_REVERT_TPR:
+ case IOC_OPAL_LR_SETUP:
+ case IOC_OPAL_ADD_USR_TO_LR:
+ case IOC_OPAL_ENABLE_DISABLE_MBR:
+ case IOC_OPAL_ERASE_LR:
+ case IOC_OPAL_SECURE_ERASE_LR:
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool is_sed_ioctl(unsigned int cmd)
+{
+ return false;
+}
+
+static inline int sed_ioctl(struct opal_dev *dev, unsigned int cmd,
+ void __user *ioctl_ptr)
+{
+ return 0;
+}
+static inline bool opal_unlock_from_suspend(struct opal_dev *dev)
+{
+ return false;
+}
+#define init_opal_dev(data, send_recv) NULL
+#endif /* CONFIG_BLK_SED_OPAL */
+#endif /* LINUX_OPAL_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
new file mode 100644
index 0000000..9519da6
--- /dev/null
+++ b/include/linux/serdev.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_SERDEV_H
+#define _LINUX_SERDEV_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct serdev_controller;
+struct serdev_device;
+
+/*
+ * serdev device structures
+ */
+
+/**
+ * struct serdev_device_ops - Callback operations for a serdev device
+ * @receive_buf: Function called with data received from device.
+ * @write_wakeup: Function called when ready to transmit more data.
+ */
+struct serdev_device_ops {
+ int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ void (*write_wakeup)(struct serdev_device *);
+};
+
+/**
+ * struct serdev_device - Basic representation of an serdev device
+ * @dev: Driver model representation of the device.
+ * @nr: Device number on serdev bus.
+ * @ctrl: serdev controller managing this device.
+ * @ops: Device operations.
+ */
+struct serdev_device {
+ struct device dev;
+ int nr;
+ struct serdev_controller *ctrl;
+ const struct serdev_device_ops *ops;
+};
+
+static inline struct serdev_device *to_serdev_device(struct device *d)
+{
+ return container_of(d, struct serdev_device, dev);
+}
+
+/**
+ * struct serdev_device_driver - serdev slave device driver
+ * @driver: serdev device drivers should initialize name field of this
+ * structure.
+ * @probe: binds this driver to a serdev device.
+ * @remove: unbinds this driver from the serdev device.
+ */
+struct serdev_device_driver {
+ struct device_driver driver;
+ int (*probe)(struct serdev_device *);
+ void (*remove)(struct serdev_device *);
+};
+
+static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d)
+{
+ return container_of(d, struct serdev_device_driver, driver);
+}
+
+/*
+ * serdev controller structures
+ */
+struct serdev_controller_ops {
+ int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ void (*write_flush)(struct serdev_controller *);
+ int (*write_room)(struct serdev_controller *);
+ int (*open)(struct serdev_controller *);
+ void (*close)(struct serdev_controller *);
+ void (*set_flow_control)(struct serdev_controller *, bool);
+ unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
+};
+
+/**
+ * struct serdev_controller - interface to the serdev controller
+ * @dev: Driver model representation of the device.
+ * @nr: number identifier for this controller/bus.
+ * @serdev: Pointer to slave device for this controller.
+ * @ops: Controller operations.
+ */
+struct serdev_controller {
+ struct device dev;
+ unsigned int nr;
+ struct serdev_device *serdev;
+ const struct serdev_controller_ops *ops;
+};
+
+static inline struct serdev_controller *to_serdev_controller(struct device *d)
+{
+ return container_of(d, struct serdev_controller, dev);
+}
+
+static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev)
+{
+ return dev_get_drvdata(&serdev->dev);
+}
+
+static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void *data)
+{
+ dev_set_drvdata(&serdev->dev, data);
+}
+
+/**
+ * serdev_device_put() - decrement serdev device refcount
+ * @serdev serdev device.
+ */
+static inline void serdev_device_put(struct serdev_device *serdev)
+{
+ if (serdev)
+ put_device(&serdev->dev);
+}
+
+static inline void serdev_device_set_client_ops(struct serdev_device *serdev,
+ const struct serdev_device_ops *ops)
+{
+ serdev->ops = ops;
+}
+
+static inline
+void *serdev_controller_get_drvdata(const struct serdev_controller *ctrl)
+{
+ return ctrl ? dev_get_drvdata(&ctrl->dev) : NULL;
+}
+
+static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl,
+ void *data)
+{
+ dev_set_drvdata(&ctrl->dev, data);
+}
+
+/**
+ * serdev_controller_put() - decrement controller refcount
+ * @ctrl serdev controller.
+ */
+static inline void serdev_controller_put(struct serdev_controller *ctrl)
+{
+ if (ctrl)
+ put_device(&ctrl->dev);
+}
+
+struct serdev_device *serdev_device_alloc(struct serdev_controller *);
+int serdev_device_add(struct serdev_device *);
+void serdev_device_remove(struct serdev_device *);
+
+struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+int serdev_controller_add(struct serdev_controller *);
+void serdev_controller_remove(struct serdev_controller *);
+
+static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl)
+{
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->write_wakeup)
+ return;
+
+ serdev->ops->write_wakeup(ctrl->serdev);
+}
+
+static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const unsigned char *data,
+ size_t count)
+{
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->receive_buf)
+ return -EINVAL;
+
+ return serdev->ops->receive_buf(ctrl->serdev, data, count);
+}
+
+#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
+
+int serdev_device_open(struct serdev_device *);
+void serdev_device_close(struct serdev_device *);
+unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
+void serdev_device_set_flow_control(struct serdev_device *, bool);
+int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+void serdev_device_write_flush(struct serdev_device *);
+int serdev_device_write_room(struct serdev_device *);
+
+/*
+ * serdev device driver functions
+ */
+int __serdev_device_driver_register(struct serdev_device_driver *, struct module *);
+#define serdev_device_driver_register(sdrv) \
+ __serdev_device_driver_register(sdrv, THIS_MODULE)
+
+/**
+ * serdev_device_driver_unregister() - unregister an serdev client driver
+ * @sdrv: the driver to unregister
+ */
+static inline void serdev_device_driver_unregister(struct serdev_device_driver *sdrv)
+{
+ if (sdrv)
+ driver_unregister(&sdrv->driver);
+}
+
+#define module_serdev_device_driver(__serdev_device_driver) \
+ module_driver(__serdev_device_driver, serdev_device_driver_register, \
+ serdev_device_driver_unregister)
+
+#else
+
+static inline int serdev_device_open(struct serdev_device *sdev)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_close(struct serdev_device *sdev) {}
+static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev, unsigned int baudrate)
+{
+ return 0;
+}
+static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
+static inline int serdev_device_write_buf(struct serdev_device *sdev, const unsigned char *buf, size_t count)
+{
+ return -ENODEV;
+}
+static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
+static inline int serdev_device_write_room(struct serdev_device *sdev)
+{
+ return 0;
+}
+
+#define serdev_device_driver_register(x)
+#define serdev_device_driver_unregister(x)
+
+#endif /* CONFIG_SERIAL_DEV_BUS */
+
+/*
+ * serdev hooks into TTY core
+ */
+struct tty_port;
+struct tty_driver;
+
+#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
+struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx);
+void serdev_tty_port_unregister(struct tty_port *port);
+#else
+static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void serdev_tty_port_unregister(struct tty_port *port) {}
+#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+
+#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 5def8e8..58484fb 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -450,7 +450,7 @@ extern void uart_handle_cts_change(struct uart_port *uport,
extern void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag);
-#ifdef SUPPORT_SYSRQ
+#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL)
static inline int
uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
{
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 9f2bfd0..e598eae 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -9,8 +9,6 @@
* Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
*/
-#define SCIx_NOT_SUPPORTED (-1)
-
/* Serial Control Register (@ = not supported by all parts) */
#define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */
#define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */
@@ -41,24 +39,16 @@ enum {
SCIx_NR_REGTYPES,
};
-struct device;
-
struct plat_sci_port_ops {
void (*init_pins)(struct uart_port *, unsigned int cflag);
};
/*
- * Port-specific capabilities
- */
-#define SCIx_HAVE_RTSCTS BIT(0)
-
-/*
* Platform device specific platform_data struct
*/
struct plat_sci_port {
unsigned int type; /* SCI / SCIF / IRDA / HSCIF */
upf_t flags; /* UPF_* flags */
- unsigned long capabilities; /* Port features/capabilities */
unsigned int sampling_rate;
unsigned int scscr; /* SCSCR initialization */
@@ -66,14 +56,9 @@ struct plat_sci_port {
/*
* Platform overrides if necessary, defaults otherwise.
*/
- int port_reg;
- unsigned char regshift;
unsigned char regtype;
struct plat_sci_port_ops *ops;
-
- unsigned int dma_slave_tx;
- unsigned int dma_slave_rx;
};
#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644
index 0000000..fa7a6b9
--- /dev/null
+++ b/include/linux/siphash.h
@@ -0,0 +1,140 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+ u64 key[2];
+} siphash_key_t;
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
+#endif
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+ const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+ const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+ const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+ const siphash_key_t *key)
+{
+ return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+ const u32 d, const siphash_key_t *key)
+{
+ return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+ const siphash_key_t *key)
+{
+ if (__builtin_constant_p(len) && len == 4)
+ return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+ if (__builtin_constant_p(len) && len == 8)
+ return siphash_1u64(le64_to_cpu(data[0]), key);
+ if (__builtin_constant_p(len) && len == 16)
+ return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ key);
+ if (__builtin_constant_p(len) && len == 24)
+ return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ le64_to_cpu(data[2]), key);
+ if (__builtin_constant_p(len) && len == 32)
+ return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+ key);
+ return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+ const siphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ return __siphash_unaligned(data, len, key);
+#endif
+ return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+ unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+ const hsiphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key);
+#endif
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+ const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+ const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ if (__builtin_constant_p(len) && len == 4)
+ return hsiphash_1u32(le32_to_cpu(data[0]), key);
+ if (__builtin_constant_p(len) && len == 8)
+ return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ key);
+ if (__builtin_constant_p(len) && len == 12)
+ return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ le32_to_cpu(data[2]), key);
+ if (__builtin_constant_p(len) && len == 16)
+ return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+ key);
+ return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ return __hsiphash_unaligned(data, len, key);
+#endif
+ return ___hsiphash_aligned(data, len, key);
+}
+
+#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b53c0cf..69ccd26 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -585,20 +585,22 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @cloned: Head may be cloned (check refcnt to be sure)
* @ip_summed: Driver fed us an IP checksum
* @nohdr: Payload reference only, must not modify header
- * @nfctinfo: Relationship of this skb to the connection
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @tc_skip_classify: do not classify packet. set by IFB device
+ * @tc_at_ingress: used within tc_classify to distinguish in/egress
+ * @tc_redirected: packet was redirected by a tc action
+ * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
- * @nfct: Associated connection, if any
+ * @_nfct: Associated connection, if any (with nfctinfo bits)
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
- * @tc_verd: traffic control verdict
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
@@ -610,6 +612,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @dst_pending_confirm: need to confirm neighbour
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
@@ -668,7 +671,7 @@ struct sk_buff {
struct sec_path *sp;
#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- struct nf_conntrack *nfct;
+ unsigned long _nfct;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info *nf_bridge;
@@ -721,7 +724,6 @@ struct sk_buff {
__u8 pkt_type:3;
__u8 pfmemalloc:1;
__u8 ignore_df:1;
- __u8 nfctinfo:3;
__u8 nf_trace:1;
__u8 ip_summed:2;
@@ -740,6 +742,7 @@ struct sk_buff {
__u8 csum_level:2;
__u8 csum_bad:1;
+ __u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
@@ -749,13 +752,15 @@ struct sk_buff {
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
#endif
- /* 2, 4 or 5 bit hole */
+#ifdef CONFIG_NET_CLS_ACT
+ __u8 tc_skip_classify:1;
+ __u8 tc_at_ingress:1;
+ __u8 tc_redirected:1;
+ __u8 tc_from_ingress:1;
+#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
-#ifdef CONFIG_NET_CLS_ACT
- __u16 tc_verd; /* traffic control verdict */
-#endif
#endif
union {
@@ -836,6 +841,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
#define SKB_DST_NOREF 1UL
#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
+#define SKB_NFCT_PTRMASK ~(7UL)
/**
* skb_dst - returns skb dst_entry
* @skb: buffer
@@ -2178,6 +2184,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
return skb->head + skb->mac_header;
}
+static inline int skb_mac_offset(const struct sk_buff *skb)
+{
+ return skb_mac_header(skb) - skb->data;
+}
+
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != (typeof(skb->mac_header))~0U;
@@ -2480,7 +2491,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
static inline void skb_free_frag(void *addr)
{
- __free_page_frag(addr);
+ page_frag_free(addr);
}
void *napi_alloc_frag(unsigned int fragsz);
@@ -3553,6 +3564,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
skb->csum = csum_add(skb->csum, delta);
}
+static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
+#else
+ return NULL;
+#endif
+}
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -3581,8 +3601,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
static inline void nf_reset(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- nf_conntrack_put(skb->nfct);
- skb->nfct = NULL;
+ nf_conntrack_put(skb_nfct(skb));
+ skb->_nfct = 0;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
@@ -3602,10 +3622,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- dst->nfct = src->nfct;
- nf_conntrack_get(src->nfct);
- if (copy)
- dst->nfctinfo = src->nfctinfo;
+ dst->_nfct = src->_nfct;
+ nf_conntrack_get(skb_nfct(src));
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dst->nf_bridge = src->nf_bridge;
@@ -3620,7 +3638,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- nf_conntrack_put(dst->nfct);
+ nf_conntrack_put(skb_nfct(dst));
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(dst->nf_bridge);
@@ -3652,9 +3670,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_XFRM)
!skb->sp &&
#endif
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- !skb->nfct &&
-#endif
+ !skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
}
@@ -3689,6 +3705,16 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
+static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
+{
+ skb->dst_pending_confirm = val;
+}
+
+static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
+{
+ return skb->dst_pending_confirm != 0;
+}
+
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 084b12b..4c53635 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX 30
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 7b88697..b8478ee 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -1,7 +1,7 @@
#ifndef __QCOM_SMEM_STATE__
#define __QCOM_SMEM_STATE__
-#include <linux/errno.h>
+#include <linux/err.h>
struct device_node;
struct qcom_smem_state;
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index e2e9de1..e57eb4b 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -12,6 +12,8 @@
#ifndef __LINUX_SOC_EXYNOS_PMU_H
#define __LINUX_SOC_EXYNOS_PMU_H
+struct regmap;
+
enum sys_powerdown {
SYS_AFTR,
SYS_LPA,
@@ -20,5 +22,13 @@ enum sys_powerdown {
};
extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
+#ifdef CONFIG_EXYNOS_PMU
+extern struct regmap *exynos_get_pmu_regmap(void);
+#else
+static inline struct regmap *exynos_get_pmu_regmap(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 35cb926..2b78826 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -41,6 +41,8 @@
#define KNAV_DMA_DESC_RETQ_SHIFT 0
#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
+#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4)
+#define KNAV_DMA_DESC_EFLAGS_SHIFT 20
#define KNAV_DMA_NUM_EPIB_WORDS 4
#define KNAV_DMA_NUM_PS_WORDS 16
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b5cc5a6..0820274 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -92,9 +92,9 @@ struct cmsghdr {
#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
-#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr))))
-#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len))
-#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len))
+#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr)))
+#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len))
+#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len))
#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
(struct cmsghdr *)(ctl) : \
@@ -202,8 +202,12 @@ struct ucred {
#define AF_VSOCK 40 /* vSockets */
#define AF_KCM 41 /* Kernel Connection Multiplexor*/
#define AF_QIPCRTR 42 /* Qualcomm IPC Router */
+#define AF_SMC 43 /* smc sockets: reserve number for
+ * PF_SMC protocol family that
+ * reuses AF_INET address family
+ */
-#define AF_MAX 43 /* For now.. */
+#define AF_MAX 44 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -251,6 +255,7 @@ struct ucred {
#define PF_VSOCK AF_VSOCK
#define PF_KCM AF_KCM
#define PF_QIPCRTR AF_QIPCRTR
+#define PF_SMC AF_SMC
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 47dd0ce..59248dc 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -180,8 +180,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
-# define raw_spin_lock_bh_nested(lock, subclass) \
- _raw_spin_lock_bh_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
@@ -197,7 +195,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
-# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -317,11 +314,6 @@ do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
-#define spin_lock_bh_nested(lock, subclass) \
-do { \
- raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
-} while (0)
-
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 5344268..42dfab8 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,8 +22,6 @@ int in_lock_functions(unsigned long addr);
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
- __acquires(lock);
void __lockfunc
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d3afef9..d0d1888 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,7 +57,6 @@
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/sram.h b/include/linux/sram.h
new file mode 100644
index 0000000..c97dcbe
--- /dev/null
+++ b/include/linux/sram.h
@@ -0,0 +1,27 @@
+/*
+ * Generic SRAM Driver Interface
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LINUX_SRAM_H__
+#define __LINUX_SRAM_H__
+
+struct gen_pool;
+
+#ifdef CONFIG_SRAM_EXEC
+int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
+#else
+static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+ size_t size)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_SRAM_EXEC */
+#endif /* __LINUX_SRAM_H__ */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index dc8eb63..a598cf3 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -33,9 +33,9 @@
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
-struct srcu_struct_array {
- unsigned long c[2];
- unsigned long seq[2];
+struct srcu_array {
+ unsigned long lock_count[2];
+ unsigned long unlock_count[2];
};
struct rcu_batch {
@@ -46,7 +46,7 @@ struct rcu_batch {
struct srcu_struct {
unsigned long completed;
- struct srcu_struct_array __percpu *per_cpu_ref;
+ struct srcu_array __percpu *per_cpu_ref;
spinlock_t queue_lock; /* protect ->batch_queue, ->running */
bool running;
/* callbacks just queued */
@@ -118,7 +118,7 @@ void process_srcu(struct work_struct *work);
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/
#define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+ static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 266dab9..fc273e9 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -103,7 +103,6 @@ struct stmmac_axi {
u32 axi_wr_osr_lmt;
u32 axi_rd_osr_lmt;
bool axi_kbbe;
- bool axi_axi_all;
u32 axi_blen[AXI_BLEN];
bool axi_fb;
bool axi_mb;
@@ -135,13 +134,18 @@ struct plat_stmmacenet_data {
int tx_fifo_size;
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
+ struct clk *stmmac_clk;
+ struct clk *pclk;
+ struct clk *clk_ptp_ref;
+ unsigned int clk_ptp_rate;
+ struct reset_control *stmmac_rst;
struct stmmac_axi *axi;
int has_gmac4;
bool tso_en;
int mac_port_sel_speed;
+ bool en_tx_lpi_clockgating;
};
#endif
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 62a60ee..8a511c0 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -198,7 +198,7 @@ static inline struct cache_head *cache_get(struct cache_head *h)
static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
{
- if (atomic_read(&h->ref.refcount) <= 2 &&
+ if (kref_read(&h->ref) <= 2 &&
h->expiry_time < cd->nextcheck)
cd->nextcheck = h->expiry_time;
kref_put(&h->ref, cd->cache_put);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 85cc819..333ad11 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
+void rpc_cleanup_clids(void);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index e5d1934..7440290 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -66,6 +66,7 @@ struct svc_xprt {
#define XPT_LISTENER 10 /* listening endpoint */
#define XPT_CACHE_AUTH 11 /* cache auth info */
#define XPT_LOCAL 12 /* connection from loopback interface */
+#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3..d971837 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 09f4be1..7f47b70 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -150,8 +150,9 @@ enum {
SWP_FILE = (1 << 7), /* set after swap_activate success */
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
+ SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
/* add others here before... */
- SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 183f37c..4ee479f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -9,7 +9,13 @@ struct device;
struct page;
struct scatterlist;
-extern int swiotlb_force;
+enum swiotlb_force {
+ SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
+ SWIOTLB_FORCE, /* swiotlb=force */
+ SWIOTLB_NO_FORCE, /* swiotlb=noforce */
+};
+
+extern enum swiotlb_force swiotlb_force;
/*
* Maximum allowable number of contiguous slabs to map,
@@ -108,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
extern void __init swiotlb_free(void);
+unsigned int swiotlb_max_segment(void);
#else
static inline void swiotlb_free(void) { }
+static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
+extern void swiotlb_set_max_segment(unsigned int);
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fc5848d..cfc2d95 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
/* TCP Fast Open Cookie as stored in memory */
struct tcp_fastopen_cookie {
+ union {
+ u8 val[TCP_FASTOPEN_COOKIE_MAX];
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr addr;
+#endif
+ };
s8 len;
- u8 val[TCP_FASTOPEN_COOKIE_MAX];
bool exp; /* In RFC6994 experimental option format */
};
@@ -207,6 +212,8 @@ struct tcp_sock {
/* Information of the most recently (s)acked skb */
struct tcp_rack {
struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u32 rtt_us; /* Associated RTT */
+ u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */
u8 reord; /* reordering detected */
} rack;
@@ -215,15 +222,15 @@ struct tcp_sock {
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
- unused:5;
+ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
+ unused:4;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
- thin_dupack : 1,/* Fast retransmit on first dupack */
+ unused1 : 1,
repair : 1,
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
- u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
- syn_data:1, /* SYN includes data */
+ u8 syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
@@ -305,7 +312,6 @@ struct tcp_sock {
*/
int lost_cnt_hint;
- u32 retransmit_high; /* L-bits may be on up to this seqno */
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
@@ -439,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
+static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
+{
+ /* We use READ_ONCE() here because socket might not be locked.
+ * This happens for listeners.
+ */
+ u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
+
+ return (user_mss && user_mss < mss) ? user_mss : mss;
+}
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 51d601f..5a209b8 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -20,11 +20,6 @@ struct timer_list {
unsigned long data;
u32 flags;
-#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
-#endif
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
@@ -197,46 +192,6 @@ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
*/
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern int timer_stats_active;
-
-extern void init_timer_stats(void);
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm, u32 flags);
-
-extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
- void *addr);
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
- if (likely(!timer_stats_active))
- return;
- __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
- timer->start_site = NULL;
-}
-#else
-static inline void init_timer_stats(void)
-{
-}
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-}
-#endif
-
extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index bd36ce4..bab0b1a 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -8,23 +8,7 @@
#ifndef _LINUX_TIMERFD_H
#define _LINUX_TIMERFD_H
-/* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/fcntl.h>
-
-/* For _IO helpers */
-#include <linux/ioctl.h>
-
-/*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
- * new flags, since they might collide with O_* ones. We want
- * to re-use O_* flags that couldn't possibly have a meaning
- * from eventfd, in order to leave a free define-space for
- * shared O_* flags.
- */
-#define TFD_TIMER_ABSTIME (1 << 0)
-#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
-#define TFD_CLOEXEC O_CLOEXEC
-#define TFD_NONBLOCK O_NONBLOCK
+#include <uapi/linux/timerfd.h>
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
/* Flags for timerfd_create. */
@@ -32,6 +16,4 @@
/* Flags for timerfd_settime. */
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
-#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
-
#endif /* _LINUX_TIMERFD_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index be00761..0f16550 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -33,7 +33,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
unsigned int bitmask_size);
const char *trace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+ const unsigned char *buf, int len,
+ bool concatenate);
const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 40144f3..1017e904 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -217,12 +217,18 @@ struct tty_port_operations {
/* Called on the final put of a port */
void (*destruct)(struct tty_port *port);
};
-
+
+struct tty_port_client_operations {
+ int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
+ void (*write_wakeup)(struct tty_port *port);
+};
+
struct tty_port {
struct tty_bufhead buf; /* Locked internally */
struct tty_struct *tty; /* Back pointer */
struct tty_struct *itty; /* internal back ptr */
const struct tty_port_operations *ops; /* Port operations */
+ const struct tty_port_client_operations *client_ops; /* Port client operations */
spinlock_t lock; /* Lock protecting tty field */
int blocked_open; /* Waiting to open */
int count; /* Usage count */
@@ -241,6 +247,7 @@ struct tty_port {
based drain is needed else
set to size of fifo */
struct kref kref; /* Ref counter */
+ void *client_data;
};
/* tty_port::iflags bits -- use atomic bit ops */
@@ -528,6 +535,7 @@ extern int tty_alloc_file(struct file *file);
extern void tty_add_file(struct tty_struct *tty, struct file *file);
extern void tty_free_file(struct file *file);
extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
+extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
@@ -656,7 +664,7 @@ extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
-extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count);
/* n_tty.c */
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 5dd75fa..c5fdfcf9 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -12,16 +12,18 @@ struct ci_hdrc;
/**
* struct ci_hdrc_cable - structure for external connector cable state tracking
- * @state: current state of the line
+ * @connected: true if cable is connected, false otherwise
* @changed: set to true when extcon event happen
+ * @enabled: set to true if we've enabled the vbus or id interrupt
* @edev: device which generate events
* @ci: driver state of the chipidea device
* @nb: hold event notification callback
* @conn: used for notification registration
*/
struct ci_hdrc_cable {
- bool state;
+ bool connected;
bool changed;
+ bool enabled;
struct extcon_dev *edev;
struct ci_hdrc *ci;
struct notifier_block nb;
@@ -55,10 +57,11 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
+#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
- void (*notify_event) (struct ci_hdrc *ci, unsigned event);
+ int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
bool tpl_support;
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 2d095fc..4dff73a 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -19,6 +19,30 @@
#include <uapi/linux/uuid.h>
/*
+ * V1 (time-based) UUID definition [RFC 4122].
+ * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
+ * increments since midnight 15th October 1582
+ * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
+ * time
+ * - the clock sequence is a 14-bit counter to avoid duplicate times
+ */
+struct uuid_v1 {
+ __be32 time_low; /* low part of timestamp */
+ __be16 time_mid; /* mid part of timestamp */
+ __be16 time_hi_and_version; /* high part of timestamp and version */
+#define UUID_TO_UNIX_TIME 0x01b21dd213814000ULL
+#define UUID_TIMEHI_MASK 0x0fff
+#define UUID_VERSION_TIME 0x1000 /* time-based UUID */
+#define UUID_VERSION_NAME 0x3000 /* name-based UUID */
+#define UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */
+ u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
+#define UUID_CLOCKHI_MASK 0x3f
+#define UUID_VARIANT_STD 0x80
+ u8 clock_seq_low; /* clock seq low */
+ u8 node[6]; /* spatially unique node ID (MAC addr) */
+};
+
+/*
* The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
* not including trailing NUL.
*/
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb547..04b0d3f 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -132,12 +132,16 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev)
return container_of(_dev, struct virtio_device, dev);
}
+void virtio_add_status(struct virtio_device *dev, unsigned int status);
int register_virtio_device(struct virtio_device *dev);
void unregister_virtio_device(struct virtio_device *dev);
void virtio_break_device(struct virtio_device *dev);
void virtio_config_changed(struct virtio_device *dev);
+void virtio_config_disable(struct virtio_device *dev);
+void virtio_config_enable(struct virtio_device *dev);
+int virtio_finalize_features(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 6620400..5209b5e 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
- bool little_endian)
+ bool little_endian,
+ bool has_data_valid)
{
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
skb_checksum_start_offset(skb));
hdr->csum_offset = __cpu_to_virtio16(little_endian,
skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ } else if (has_data_valid &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 8c58917..ec5e8bf 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -108,7 +108,6 @@ struct vme_dev {
};
struct vme_driver {
- struct list_head node;
const char *name;
int (*match)(struct vme_dev *);
int (*probe)(struct vme_dev *);
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 1bd31a3..b724ef7 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -54,13 +54,6 @@
#define VMCI_IMR_DATAGRAM 0x1
#define VMCI_IMR_NOTIFICATION 0x2
-/* Interrupt type. */
-enum {
- VMCI_INTR_TYPE_INTX = 0,
- VMCI_INTR_TYPE_MSI = 1,
- VMCI_INTR_TYPE_MSIX = 2,
-};
-
/* Maximum MSI/MSI-X interrupt vectors in the device. */
#define VMCI_MAX_INTRS 2
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index aa9bfea..0681fe2 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev)
extern void vtime_account_system(struct task_struct *tsk);
extern void vtime_account_idle(struct task_struct *tsk);
-extern void vtime_account_user(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
static inline void vtime_task_switch(struct task_struct *prev) { }
static inline void vtime_account_system(struct task_struct *tsk) { }
-static inline void vtime_account_user(struct task_struct *tsk) { }
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void vtime_account_user(struct task_struct *tsk);
extern void vtime_user_enter(struct task_struct *tsk);
static inline void vtime_user_exit(struct task_struct *tsk)
{
vtime_account_user(tsk);
}
+
extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk);
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+static inline void vtime_account_user(struct task_struct *tsk) { }
static inline void vtime_user_enter(struct task_struct *tsk) { }
static inline void vtime_user_exit(struct task_struct *tsk) { }
static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk)
/* On hard|softirq exit we always account to hard|softirq cputime */
vtime_account_system(tsk);
}
+extern void vtime_flush(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
+static inline void vtime_flush(struct task_struct *tsk) { }
#endif
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 7b00668..5dd9a76 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -51,10 +51,10 @@ struct ww_mutex {
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
- , .ww_class = &ww_class
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
+ , .ww_class = class
#else
-# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
#endif
#define __WW_CLASS_INITIALIZER(ww_class) \
@@ -63,7 +63,7 @@ struct ww_mutex {
, .mutex_name = #ww_class "_mutex" }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
- { .base = { \__MUTEX_INITIALIZER(lockname) } \
+ { .base = __MUTEX_INITIALIZER(lockname.base) \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
#define DEFINE_WW_CLASS(classname) \
@@ -186,11 +186,6 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
#endif
}
-extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
-extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
-
/**
* ww_mutex_lock - acquire the w/w mutex
* @lock: the mutex to be acquired
@@ -220,14 +215,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
- if (ctx)
- return __ww_mutex_lock(lock, ctx);
-
- mutex_lock(&lock->base);
- return 0;
-}
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -259,14 +247,8 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx)
-{
- if (ctx)
- return __ww_mutex_lock_interruptible(lock, ctx);
- else
- return mutex_lock_interruptible(&lock->base);
-}
+extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx);
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
OpenPOWER on IntegriCloud